diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 325cb31af1c..9e3ff403eb5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -14,4 +14,3 @@ updates: schedule: interval: "weekly" labels: ["skip news", "C: dependencies", "T: documentation"] - reviewers: ["ichard26"] diff --git a/.github/workflows/diff_shades.yml b/.github/workflows/diff_shades.yml index 0e1aab00e34..51a448a12a5 100644 --- a/.github/workflows/diff_shades.yml +++ b/.github/workflows/diff_shades.yml @@ -26,7 +26,7 @@ jobs: - name: Install diff-shades and support dependencies run: | - python -m pip install 'click==8.1.3' packaging urllib3 + python -m pip install 'click>=8.1.7' packaging urllib3 python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip - name: Calculate run configuration & metadata @@ -64,7 +64,7 @@ jobs: - name: Install diff-shades and support dependencies run: | python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip - python -m pip install 'click==8.1.3' packaging urllib3 + python -m pip install 'click>=8.1.7' packaging urllib3 # After checking out old revisions, this might not exist so we'll use a copy. cat scripts/diff_shades_gha_helper.py > helper.py git config user.name "diff-shades-gha" diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 4c592d73919..f34e5041091 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -26,13 +26,15 @@ jobs: - name: Set up latest Python uses: actions/setup-python@v5 with: - python-version: "*" + python-version: "3.13" + allow-prereleases: true - name: Install dependencies run: | python -m pip install uv - python -m uv pip install --system -e ".[d]" - python -m uv pip install --system -r "docs/requirements.txt" + python -m uv venv + python -m uv pip install -e ".[d]" + python -m uv pip install -r "docs/requirements.txt" - name: Build documentation run: sphinx-build -a -b html -W --keep-going docs/ docs/_build diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index ee858236fcf..43d7a2453b7 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -36,7 +36,7 @@ jobs: latest_non_release)" >> $GITHUB_ENV - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64 @@ -47,7 +47,7 @@ jobs: if: ${{ github.event_name == 'release' && github.event.action == 'published' && !github.event.release.prerelease }} - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64 @@ -58,7 +58,7 @@ jobs: if: ${{ github.event_name == 'release' && github.event.action == 'published' && github.event.release.prerelease }} - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64 diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 42a399fd0aa..48f101c206f 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -22,7 +22,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12.4", "3.13"] steps: - uses: actions/checkout@v4 @@ -31,6 +31,7 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + allow-prereleases: true - name: Install dependencies run: | diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f75734400ce..2d14092481a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -26,7 +26,8 @@ jobs: - name: Set up latest Python uses: actions/setup-python@v5 with: - python-version: "*" + python-version: "3.13" + allow-prereleases: true - name: Install dependencies run: | diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 197617e6153..a7cde47b229 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -23,7 +23,8 @@ jobs: - name: Set up latest Python uses: actions/setup-python@v5 with: - python-version: "*" + python-version: "3.13" + allow-prereleases: true - name: Install latest pip, build, twine run: | @@ -48,7 +49,7 @@ jobs: - uses: actions/checkout@v4 - name: Install cibuildwheel and pypyp run: | - pipx install cibuildwheel==2.15.0 + pipx install cibuildwheel==2.20.0 pipx install pypyp==1 - name: generate matrix if: github.event_name != 'pull_request' @@ -73,7 +74,7 @@ jobs: | pyp 'json.dumps({"only": x, "os": "ubuntu-latest"})' } | pyp 'json.dumps(list(map(json.loads, lines)))' > /tmp/matrix env: - CIBW_BUILD: "cp38-* cp312-*" + CIBW_BUILD: "cp39-* cp312-*" CIBW_ARCHS_LINUX: x86_64 - id: set-matrix run: echo "include=$(cat /tmp/matrix)" | tee -a $GITHUB_OUTPUT @@ -89,7 +90,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: pypa/cibuildwheel@v2.18.1 + - uses: pypa/cibuildwheel@v2.20.0 with: only: ${{ matrix.only }} diff --git a/.github/workflows/release_tests.yml b/.github/workflows/release_tests.yml index 192ba004f81..6d0af004aae 100644 --- a/.github/workflows/release_tests.yml +++ b/.github/workflows/release_tests.yml @@ -25,7 +25,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.12"] + python-version: ["3.13"] os: [macOS-latest, ubuntu-latest, windows-latest] steps: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cb6eb3f1307..004256e563d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -31,7 +31,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "pypy-3.9"] + python-version: ["3.9", "3.10", "3.11", "3.12.4", "3.13", "pypy-3.9"] os: [ubuntu-latest, macOS-latest, windows-latest] steps: @@ -41,6 +41,7 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + allow-prereleases: true - name: Install tox run: | @@ -62,7 +63,7 @@ jobs: if: github.repository == 'psf/black' && matrix.os == 'ubuntu-latest' && !startsWith(matrix.python-version, 'pypy') - uses: AndreMiras/coveralls-python-action@65c1672f0b8a201702d86c81b79187df74072505 + uses: AndreMiras/coveralls-python-action@ac868b9540fad490f7ca82b8ca00480fd751ed19 with: github-token: ${{ secrets.GITHUB_TOKEN }} parallel: true @@ -77,7 +78,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Send finished signal to Coveralls - uses: AndreMiras/coveralls-python-action@65c1672f0b8a201702d86c81b79187df74072505 + uses: AndreMiras/coveralls-python-action@ac868b9540fad490f7ca82b8ca00480fd751ed19 with: parallel-finished: true debug: true @@ -98,7 +99,7 @@ jobs: - name: Set up latest Python uses: actions/setup-python@v5 with: - python-version: "*" + python-version: "3.12.4" - name: Install black with uvloop run: | diff --git a/.github/workflows/upload_binary.yml b/.github/workflows/upload_binary.yml index 06e55cfe93a..1bde446442a 100644 --- a/.github/workflows/upload_binary.yml +++ b/.github/workflows/upload_binary.yml @@ -34,7 +34,7 @@ jobs: - name: Set up latest Python uses: actions/setup-python@v5 with: - python-version: "*" + python-version: "3.12.4" - name: Install Black and PyInstaller run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 81dc7337ff6..9dcf9382346 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - id: isort - repo: https://github.com/pycqa/flake8 - rev: 7.0.0 + rev: 7.1.0 hooks: - id: flake8 additional_dependencies: @@ -39,7 +39,7 @@ repos: exclude: ^src/blib2to3/ - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.9.0 + rev: v1.11.2 hooks: - id: mypy exclude: ^(docs/conf.py|scripts/generate_schema.py)$ @@ -69,7 +69,7 @@ repos: exclude: \.github/workflows/diff_shades\.yml - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace diff --git a/CHANGES.md b/CHANGES.md index ec8e88a1358..73f7d8bfda2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,18 +6,35 @@ +- Black is now officially tested with Python 3.13. Note that Black does not yet provide + mypyc-compiled wheels for Python 3.13, so performance may be slower than on other + versions of Python. We will provide 3.13 mypyc-compiled wheels in a future release. + (#4436) +- Black will issue an error when used with Python 3.12.5, due to an upstream memory + safety issue in Python 3.12.5 that can cause Black's AST safety checks to fail. Please + use Python 3.12.6 or Python 3.12.4 instead. (#4447) +- Black no longer supports running with Python 3.8 (#4452) + ### Stable style -- Fix crash when `# fmt: off` is used before a closing parenthesis or bracket. (#4363) +- Fix crashes involving comments in parenthesised return types or `X | Y` style unions. + (#4453) +- Fix skipping Jupyter cells with unknown `%%` magic (#4462) ### Preview style +- Fix type annotation spacing between * and more complex type variable tuple (i.e. `def + fn(*args: *tuple[*Ts, T]) -> None: pass`) (#4440) - Remove parenthesis around sole list items (#4312) +### Caching + +- Fix bug where the cache was shared between runs with and without `--unstable` (#4466) + ### Configuration @@ -26,18 +43,13 @@ -- Packaging metadata updated: docs are explictly linked, the issue tracker is now also - linked. This improves the PyPI listing for Black. (#4345) +- Upgrade version of mypyc used to 1.11.2 (#4450) +- `blackd` now requires a newer version of aiohttp. (#4451) ### Parser -- Fix regression where Black failed to parse a multiline f-string containing another - multiline string (#4339) - -- Fix bug with Black incorrectly parsing empty lines with a backslash (#4343) - ### Performance @@ -46,9 +58,12 @@ +- Added Python target version information on parse error (#4378) +- Add information about Black version to internal error messages (#4457) + ### _Blackd_ -- Fix blackd (and all extras installs) for docker container (#4357) + ### Integrations @@ -59,6 +74,36 @@ +## 24.8.0 + +### Stable style + +- Fix crash when `# fmt: off` is used before a closing parenthesis or bracket. (#4363) + +### Packaging + +- Packaging metadata updated: docs are explictly linked, the issue tracker is now also + linked. This improves the PyPI listing for Black. (#4345) + +### Parser + +- Fix regression where Black failed to parse a multiline f-string containing another + multiline string (#4339) +- Fix regression where Black failed to parse an escaped single quote inside an f-string + (#4401) +- Fix bug with Black incorrectly parsing empty lines with a backslash (#4343) +- Fix bugs with Black's tokenizer not handling `\{` inside f-strings very well (#4422) +- Fix incorrect line numbers in the tokenizer for certain tokens within f-strings + (#4423) + +### Performance + +- Improve performance when a large directory is listed in `.gitignore` (#4415) + +### _Blackd_ + +- Fix blackd (and all extras installs) for docker container (#4357) + ## 24.4.2 This is a bugfix release to fix two regressions in the new f-string parser introduced in diff --git a/docs/contributing/release_process.md b/docs/contributing/release_process.md index c66ffae8ace..2c904fb95c4 100644 --- a/docs/contributing/release_process.md +++ b/docs/contributing/release_process.md @@ -29,8 +29,8 @@ frequently than monthly nets rapidly diminishing returns. **You must have `write` permissions for the _Black_ repository to cut a release.** The 10,000 foot view of the release process is that you prepare a release PR and then -publish a [GitHub Release]. This triggers [release automation](#release-workflows) that -builds all release artifacts and publishes them to the various platforms we publish to. +publish a [GitHub Release]. This triggers [release automation](#release-workflows) that builds +all release artifacts and publishes them to the various platforms we publish to. We now have a `scripts/release.py` script to help with cutting the release PRs. @@ -96,9 +96,8 @@ In the end, use your best judgement and ask other maintainers for their thoughts ## Release workflows -All of _Black_'s release automation uses [GitHub Actions]. All workflows are therefore -configured using YAML files in the `.github/workflows` directory of the _Black_ -repository. +All of _Black_'s release automation uses [GitHub Actions]. All workflows are therefore configured +using YAML files in the `.github/workflows` directory of the _Black_ repository. They are triggered by the publication of a [GitHub Release]. diff --git a/docs/contributing/the_basics.md b/docs/contributing/the_basics.md index bc1680eecfd..344bd09fba0 100644 --- a/docs/contributing/the_basics.md +++ b/docs/contributing/the_basics.md @@ -16,7 +16,7 @@ $ source .venv/bin/activate # activation for linux and mac $ .venv\Scripts\activate # activation for windows (.venv)$ pip install -r test_requirements.txt -(.venv)$ pip install -e .[d] +(.venv)$ pip install -e ".[d]" (.venv)$ pre-commit install ``` diff --git a/docs/integrations/github_actions.md b/docs/integrations/github_actions.md index 4ba03b5faac..c527253b562 100644 --- a/docs/integrations/github_actions.md +++ b/docs/integrations/github_actions.md @@ -24,7 +24,7 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: psf/black@stable ``` diff --git a/docs/integrations/source_version_control.md b/docs/integrations/source_version_control.md index bbbd257c9d2..f66b5e00a77 100644 --- a/docs/integrations/source_version_control.md +++ b/docs/integrations/source_version_control.md @@ -8,7 +8,7 @@ Use [pre-commit](https://pre-commit.com/). Once you repos: # Using this mirror lets us use mypyc-compiled black, which is about 2x faster - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.4.2 + rev: 24.8.0 hooks: - id: black # It is recommended to specify the latest version of Python @@ -35,7 +35,7 @@ include Jupyter Notebooks. To use this hook, simply replace the hook's `id: blac repos: # Using this mirror lets us use mypyc-compiled black, which is about 2x faster - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.4.2 + rev: 24.8.0 hooks: - id: black-jupyter # It is recommended to specify the latest version of Python diff --git a/docs/requirements.txt b/docs/requirements.txt index 5c49e0f4af5..d5c88e84668 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,9 +1,9 @@ # Used by ReadTheDocs; pinned requirements for stability. myst-parser==3.0.1 -Sphinx==7.3.7 +Sphinx==7.4.7 # Older versions break Sphinx even though they're declared to be supported. docutils==0.20.1 sphinxcontrib-programoutput==0.17 sphinx_copybutton==0.5.2 -furo==2024.5.6 +furo==2024.8.6 diff --git a/docs/the_black_code_style/future_style.md b/docs/the_black_code_style/future_style.md index 6677b606e5f..328858875eb 100644 --- a/docs/the_black_code_style/future_style.md +++ b/docs/the_black_code_style/future_style.md @@ -36,6 +36,8 @@ Currently, the following features are included in the preview style: `case` blocks. - `parens_for_long_if_clauses_in_case_block`: Adds parentheses to `if` clauses in `case` blocks when the line is too long +- `pep646_typed_star_arg_type_var_tuple`: fix type annotation spacing between * and more + complex type variable tuple (i.e. `def fn(*args: *tuple[*Ts, T]) -> None: pass`) (labels/unstable-features)= diff --git a/docs/usage_and_configuration/black_docker_image.md b/docs/usage_and_configuration/black_docker_image.md index c97c25af328..72969b7b68a 100644 --- a/docs/usage_and_configuration/black_docker_image.md +++ b/docs/usage_and_configuration/black_docker_image.md @@ -8,16 +8,16 @@ _Black_ images with the following tags are available: - release numbers, e.g. `21.5b2`, `21.6b0`, `21.7b0` etc.\ ℹ Recommended for users who want to use a particular version of _Black_. - `latest_release` - tag created when a new version of _Black_ is released.\ - ℹ Recommended for users who want to use released versions of _Black_. It maps to [the latest release](https://github.com/psf/black/releases/latest) - of _Black_. + ℹ Recommended for users who want to use released versions of _Black_. It maps to + [the latest release](https://github.com/psf/black/releases/latest) of _Black_. - `latest_prerelease` - tag created when a new alpha (prerelease) version of _Black_ is released.\ - ℹ Recommended for users who want to preview or test alpha versions of _Black_. Note that - the most recent release may be newer than any prerelease, because no prereleases are created - before most releases. + ℹ Recommended for users who want to preview or test alpha versions of _Black_. Note + that the most recent release may be newer than any prerelease, because no prereleases + are created before most releases. - `latest` - tag used for the newest image of _Black_.\ - ℹ Recommended for users who always want to use the latest version of _Black_, even before - it is released. + ℹ Recommended for users who always want to use the latest version of _Black_, even + before it is released. There is one more tag used for _Black_ Docker images - `latest_non_release`. It is created for all unreleased diff --git a/docs/usage_and_configuration/the_basics.md b/docs/usage_and_configuration/the_basics.md index b468a04c499..7b5da98879f 100644 --- a/docs/usage_and_configuration/the_basics.md +++ b/docs/usage_and_configuration/the_basics.md @@ -269,8 +269,8 @@ configuration file for consistent results across environments. ```console $ black --version -black, 24.4.2 (compiled: yes) -$ black --required-version 24.4.2 -c "format = 'this'" +black, 24.8.0 (compiled: yes) +$ black --required-version 24.8.0 -c "format = 'this'" format = "this" $ black --required-version 31.5b2 -c "still = 'beta?!'" Oh no! 💥 💔 💥 The required version does not match the running version! @@ -366,7 +366,7 @@ You can check the version of _Black_ you have installed using the `--version` fl ```console $ black --version -black, 24.4.2 +black, 24.8.0 ``` #### `--config` diff --git a/pyproject.toml b/pyproject.toml index 38c4a3ac500..c75d6fec266 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ [tool.black] line-length = 88 -target-version = ['py38'] +target-version = ['py39'] include = '\.pyi?$' extend-exclude = ''' /( @@ -34,7 +34,7 @@ build-backend = "hatchling.build" name = "black" description = "The uncompromising code formatter." license = { text = "MIT" } -requires-python = ">=3.8" +requires-python = ">=3.9" authors = [ { name = "Łukasz Langa", email = "lukasz@langa.pl" }, ] @@ -55,11 +55,11 @@ classifiers = [ "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Quality Assurance", ] @@ -77,10 +77,7 @@ dynamic = ["readme", "version"] [project.optional-dependencies] colorama = ["colorama>=0.4.3"] uvloop = ["uvloop>=0.15.2"] -d = [ - "aiohttp>=3.7.4; sys_platform != 'win32' or implementation_name != 'pypy'", - "aiohttp>=3.7.4, !=3.9.0; sys_platform == 'win32' and implementation_name == 'pypy'", -] +d = ["aiohttp>=3.10"] jupyter = [ "ipython>=7.8.0", "tokenize-rt>=3.2.0", @@ -128,8 +125,8 @@ macos-max-compat = true enable-by-default = false dependencies = [ "hatch-mypyc>=0.16.0", - "mypy==1.7.1", - "click==8.1.3", # avoid https://github.com/pallets/click/issues/2558 + "mypy==1.11.2", + "click>=8.1.7", ] require-runtime-dependencies = true exclude = [ @@ -152,12 +149,14 @@ options = { debug_level = "0" } [tool.cibuildwheel] build-verbosity = 1 + # So these are the environments we target: -# - Python: CPython 3.8+ only +# - Python: CPython 3.9+ only # - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64 # - OS: Linux (no musl), Windows, and macOS build = "cp3*" skip = ["*-manylinux_i686", "*-musllinux_*", "*-win32", "pp*"] + # This is the bare minimum needed to run the test suite. Pulling in the full # test_requirements.txt would download a bunch of other packages not necessary # here and would slow down the testing step a fair bit. @@ -172,11 +171,9 @@ test-skip = ["*-macosx_arm64", "*-macosx_universal2:arm64"] HATCH_BUILD_HOOKS_ENABLE = "1" MYPYC_OPT_LEVEL = "3" MYPYC_DEBUG_LEVEL = "0" -# CPython 3.11 wheels aren't available for aiohttp and building a Cython extension -# from source also doesn't work. -AIOHTTP_NO_EXTENSIONS = "1" [tool.cibuildwheel.linux] +manylinux-x86_64-image = "manylinux_2_28" before-build = [ "yum install -y clang gcc", ] @@ -185,7 +182,6 @@ before-build = [ HATCH_BUILD_HOOKS_ENABLE = "1" MYPYC_OPT_LEVEL = "3" MYPYC_DEBUG_LEVEL = "0" -AIOHTTP_NO_EXTENSIONS = "1" # Black needs Clang to compile successfully on Linux. CC = "clang" @@ -193,8 +189,10 @@ CC = "clang" [tool.cibuildwheel.macos] build-frontend = { name = "build", args = ["--no-isolation"] } # Unfortunately, hatch doesn't respect MACOSX_DEPLOYMENT_TARGET +# Note we don't have a good test for this sed horror, so if you futz with it +# make sure to test manually before-build = [ - "python -m pip install 'hatchling==1.20.0' hatch-vcs hatch-fancy-pypi-readme 'hatch-mypyc>=0.16.0' 'mypy==1.7.1' 'click==8.1.3'", + "python -m pip install 'hatchling==1.20.0' hatch-vcs hatch-fancy-pypi-readme 'hatch-mypyc>=0.16.0' 'mypy==1.11.2' 'click>=8.1.7'", """sed -i '' -e "600,700s/'10_16'/os.environ['MACOSX_DEPLOYMENT_TARGET'].replace('.', '_')/" $(python -c 'import hatchling.builders.wheel as h; print(h.__file__)') """, ] @@ -217,23 +215,7 @@ markers = [ "incompatible_with_mypyc: run when testing mypyc compiled black" ] xfail_strict = true -filterwarnings = [ - "error", - # this is mitigated by a try/catch in https://github.com/psf/black/pull/2974/ - # this ignore can be removed when support for aiohttp 3.7 is dropped. - '''ignore:Decorator `@unittest_run_loop` is no longer needed in aiohttp 3\.8\+:DeprecationWarning''', - # this is mitigated by a try/catch in https://github.com/psf/black/pull/3198/ - # this ignore can be removed when support for aiohttp 3.x is dropped. - '''ignore:Middleware decorator is deprecated since 4\.0 and its behaviour is default, you can simply remove this decorator:DeprecationWarning''', - # aiohttp is using deprecated cgi modules - Safe to remove when fixed: - # https://github.com/aio-libs/aiohttp/issues/6905 - '''ignore:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning''', - # Work around https://github.com/pytest-dev/pytest/issues/10977 for Python 3.12 - '''ignore:(Attribute s|Attribute n|ast.Str|ast.Bytes|ast.NameConstant|ast.Num) is deprecated and will be removed in Python 3.14:DeprecationWarning''', - # Will be fixed with aiohttp 3.9.0 - # https://github.com/aio-libs/aiohttp/pull/7302 - "ignore:datetime.*utcfromtimestamp\\(\\) is deprecated and scheduled for removal:DeprecationWarning", -] +filterwarnings = ["error"] [tool.coverage.report] omit = [ "src/blib2to3/*", @@ -249,7 +231,7 @@ branch = true # Specify the target platform details in config, so your developers are # free to run mypy on Windows, Linux, or macOS and get consistent # results. -python_version = "3.8" +python_version = "3.9" mypy_path = "src" strict = true # Unreachable blocks have been an issue when compiling mypyc, let's try to avoid 'em in the first place. diff --git a/scripts/generate_schema.py b/scripts/generate_schema.py index 35765750091..dcdfc74de62 100755 --- a/scripts/generate_schema.py +++ b/scripts/generate_schema.py @@ -53,7 +53,7 @@ def main(schemastore: bool, outfile: IO[str]) -> None: schema: dict[str, Any] = { "$schema": "http://json-schema.org/draft-07/schema#", "$id": ( - "https://github.com/psf/black/blob/main/black/resources/black.schema.json" + "https://github.com/psf/black/blob/main/src/black/resources/black.schema.json" ), "$comment": "tool.black table in pyproject.toml", "type": "object", diff --git a/src/black/__init__.py b/src/black/__init__.py index 942f3160751..a94f7fc29a0 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -14,17 +14,13 @@ from typing import ( Any, Collection, - Dict, Generator, Iterator, - List, MutableMapping, Optional, Pattern, Sequence, - Set, Sized, - Tuple, Union, ) @@ -57,12 +53,12 @@ ) from black.handle_ipynb_magics import ( PYTHON_CELL_MAGICS, - TRANSFORMED_MAGICS, jupyter_dependencies_are_installed, mask_cell, put_trailing_semicolon_back, remove_trailing_semicolon, unmask_cell, + validate_cell, ) from black.linegen import LN, LineGenerator, transform_line from black.lines import EmptyLineTracker, LinesBlock @@ -176,7 +172,7 @@ def read_pyproject_toml( "line-ranges", "Cannot use line-ranges in the pyproject.toml file." ) - default_map: Dict[str, Any] = {} + default_map: dict[str, Any] = {} if ctx.default_map: default_map.update(ctx.default_map) default_map.update(config) @@ -186,9 +182,9 @@ def read_pyproject_toml( def spellcheck_pyproject_toml_keys( - ctx: click.Context, config_keys: List[str], config_file_path: str + ctx: click.Context, config_keys: list[str], config_file_path: str ) -> None: - invalid_keys: List[str] = [] + invalid_keys: list[str] = [] available_config_options = {param.name for param in ctx.command.params} for key in config_keys: if key not in available_config_options: @@ -202,8 +198,8 @@ def spellcheck_pyproject_toml_keys( def target_version_option_callback( - c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...] -) -> List[TargetVersion]: + c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...] +) -> list[TargetVersion]: """Compute the target versions from a --target-version flag. This is its own function because mypy couldn't infer the type correctly @@ -213,8 +209,8 @@ def target_version_option_callback( def enable_unstable_feature_callback( - c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...] -) -> List[Preview]: + c: click.Context, p: Union[click.Option, click.Parameter], v: tuple[str, ...] +) -> list[Preview]: """Compute the features from an --enable-unstable-feature flag.""" return [Preview[val] for val in v] @@ -519,7 +515,7 @@ def main( # noqa: C901 ctx: click.Context, code: Optional[str], line_length: int, - target_version: List[TargetVersion], + target_version: list[TargetVersion], check: bool, diff: bool, line_ranges: Sequence[str], @@ -533,7 +529,7 @@ def main( # noqa: C901 skip_magic_trailing_comma: bool, preview: bool, unstable: bool, - enable_unstable_feature: List[Preview], + enable_unstable_feature: list[Preview], quiet: bool, verbose: bool, required_version: Optional[str], @@ -543,12 +539,21 @@ def main( # noqa: C901 force_exclude: Optional[Pattern[str]], stdin_filename: Optional[str], workers: Optional[int], - src: Tuple[str, ...], + src: tuple[str, ...], config: Optional[str], ) -> None: """The uncompromising code formatter.""" ctx.ensure_object(dict) + assert sys.version_info >= (3, 9), "Black requires Python 3.9+" + if sys.version_info[:3] == (3, 12, 5): + out( + "Python 3.12.5 has a memory safety issue that can cause Black's " + "AST safety checks to fail. " + "Please upgrade to Python 3.12.6 or downgrade to Python 3.12.4" + ) + ctx.exit(1) + if src and code is not None: out( main.get_usage(ctx) @@ -634,7 +639,7 @@ def main( # noqa: C901 enabled_features=set(enable_unstable_feature), ) - lines: List[Tuple[int, int]] = [] + lines: list[tuple[int, int]] = [] if line_ranges: if ipynb: err("Cannot use --line-ranges with ipynb files.") @@ -724,7 +729,7 @@ def main( # noqa: C901 def get_sources( *, root: Path, - src: Tuple[str, ...], + src: tuple[str, ...], quiet: bool, verbose: bool, include: Pattern[str], @@ -733,14 +738,14 @@ def get_sources( force_exclude: Optional[Pattern[str]], report: "Report", stdin_filename: Optional[str], -) -> Set[Path]: +) -> set[Path]: """Compute the set of files to be formatted.""" - sources: Set[Path] = set() + sources: set[Path] = set() assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" using_default_exclude = exclude is None exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude - gitignore: Optional[Dict[Path, PathSpec]] = None + gitignore: Optional[dict[Path, PathSpec]] = None root_gitignore = get_gitignore(root) for s in src: @@ -832,7 +837,7 @@ def reformat_code( mode: Mode, report: Report, *, - lines: Collection[Tuple[int, int]] = (), + lines: Collection[tuple[int, int]] = (), ) -> None: """ Reformat and print out `content` without spawning child processes. @@ -865,7 +870,7 @@ def reformat_one( mode: Mode, report: "Report", *, - lines: Collection[Tuple[int, int]] = (), + lines: Collection[tuple[int, int]] = (), ) -> None: """Reformat a single file under `src` without spawning child processes. @@ -921,7 +926,7 @@ def format_file_in_place( write_back: WriteBack = WriteBack.NO, lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy *, - lines: Collection[Tuple[int, int]] = (), + lines: Collection[tuple[int, int]] = (), ) -> bool: """Format file under `src` path. Return True if changed. @@ -988,7 +993,7 @@ def format_stdin_to_stdout( content: Optional[str] = None, write_back: WriteBack = WriteBack.NO, mode: Mode, - lines: Collection[Tuple[int, int]] = (), + lines: Collection[tuple[int, int]] = (), ) -> bool: """Format file on stdin. Return True if changed. @@ -1039,7 +1044,7 @@ def check_stability_and_equivalence( dst_contents: str, *, mode: Mode, - lines: Collection[Tuple[int, int]] = (), + lines: Collection[tuple[int, int]] = (), ) -> None: """Perform stability and equivalence checks. @@ -1056,7 +1061,7 @@ def format_file_contents( *, fast: bool, mode: Mode, - lines: Collection[Tuple[int, int]] = (), + lines: Collection[tuple[int, int]] = (), ) -> FileContent: """Reformat contents of a file and return new contents. @@ -1079,32 +1084,6 @@ def format_file_contents( return dst_contents -def validate_cell(src: str, mode: Mode) -> None: - """Check that cell does not already contain TransformerManager transformations, - or non-Python cell magics, which might cause tokenizer_rt to break because of - indentations. - - If a cell contains ``!ls``, then it'll be transformed to - ``get_ipython().system('ls')``. However, if the cell originally contained - ``get_ipython().system('ls')``, then it would get transformed in the same way: - - >>> TransformerManager().transform_cell("get_ipython().system('ls')") - "get_ipython().system('ls')\n" - >>> TransformerManager().transform_cell("!ls") - "get_ipython().system('ls')\n" - - Due to the impossibility of safely roundtripping in such situations, cells - containing transformed magics will be ignored. - """ - if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS): - raise NothingChanged - if ( - src[:2] == "%%" - and src.split()[0][2:] not in PYTHON_CELL_MAGICS | mode.python_cell_magics - ): - raise NothingChanged - - def format_cell(src: str, *, fast: bool, mode: Mode) -> str: """Format code in given cell of Jupyter notebook. @@ -1187,7 +1166,7 @@ def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileCon def format_str( - src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = () + src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = () ) -> str: """Reformat a string and return new contents. @@ -1234,10 +1213,10 @@ def f( def _format_str_once( - src_contents: str, *, mode: Mode, lines: Collection[Tuple[int, int]] = () + src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = () ) -> str: src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) - dst_blocks: List[LinesBlock] = [] + dst_blocks: list[LinesBlock] = [] if mode.target_versions: versions = mode.target_versions else: @@ -1287,7 +1266,7 @@ def _format_str_once( return "".join(dst_contents) -def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: +def decode_bytes(src: bytes) -> tuple[FileContent, Encoding, NewLine]: """Return a tuple of (decoded_contents, encoding, newline). `newline` is either CRLF or LF but `decoded_contents` is decoded with @@ -1305,8 +1284,8 @@ def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: def get_features_used( # noqa: C901 - node: Node, *, future_imports: Optional[Set[str]] = None -) -> Set[Feature]: + node: Node, *, future_imports: Optional[set[str]] = None +) -> set[Feature]: """Return a set of (relatively) new Python features used in this file. Currently looking for: @@ -1324,7 +1303,7 @@ def get_features_used( # noqa: C901 - except* clause; - variadic generics; """ - features: Set[Feature] = set() + features: set[Feature] = set() if future_imports: features |= { FUTURE_FLAG_TO_FEATURE[future_import] @@ -1462,8 +1441,8 @@ def _contains_asexpr(node: Union[Node, Leaf]) -> bool: def detect_target_versions( - node: Node, *, future_imports: Optional[Set[str]] = None -) -> Set[TargetVersion]: + node: Node, *, future_imports: Optional[set[str]] = None +) -> set[TargetVersion]: """Detect the version to target based on the nodes used.""" features = get_features_used(node, future_imports=future_imports) return { @@ -1471,11 +1450,11 @@ def detect_target_versions( } -def get_future_imports(node: Node) -> Set[str]: +def get_future_imports(node: Node) -> set[str]: """Return a set of __future__ imports in the file.""" - imports: Set[str] = set() + imports: set[str] = set() - def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: + def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]: for child in children: if isinstance(child, Leaf): if child.type == token.NAME: @@ -1521,6 +1500,13 @@ def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: return imports +def _black_info() -> str: + return ( + f"Black {__version__} on " + f"Python ({platform.python_implementation()}) {platform.python_version()}" + ) + + def assert_equivalent(src: str, dst: str) -> None: """Raise AssertionError if `src` and `dst` aren't equivalent.""" try: @@ -1538,7 +1524,7 @@ def assert_equivalent(src: str, dst: str) -> None: except Exception as exc: log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) raise ASTSafetyError( - f"INTERNAL ERROR: Black produced invalid code: {exc}. " + f"INTERNAL ERROR: {_black_info()} produced invalid code: {exc}. " "Please report a bug on https://github.com/psf/black/issues. " f"This invalid output might be helpful: {log}" ) from None @@ -1548,14 +1534,14 @@ def assert_equivalent(src: str, dst: str) -> None: if src_ast_str != dst_ast_str: log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) raise ASTSafetyError( - "INTERNAL ERROR: Black produced code that is not equivalent to the" - " source. Please report a bug on " - f"https://github.com/psf/black/issues. This diff might be helpful: {log}" + f"INTERNAL ERROR: {_black_info()} produced code that is not equivalent to" + " the source. Please report a bug on https://github.com/psf/black/issues." + f" This diff might be helpful: {log}" ) from None def assert_stable( - src: str, dst: str, mode: Mode, *, lines: Collection[Tuple[int, int]] = () + src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = () ) -> None: """Raise AssertionError if `dst` reformats differently the second time.""" if lines: @@ -1576,9 +1562,9 @@ def assert_stable( diff(dst, newdst, "first pass", "second pass"), ) raise AssertionError( - "INTERNAL ERROR: Black produced different code on the second pass of the" - " formatter. Please report a bug on https://github.com/psf/black/issues." - f" This diff might be helpful: {log}" + f"INTERNAL ERROR: {_black_info()} produced different code on the second" + " pass of the formatter. Please report a bug on" + f" https://github.com/psf/black/issues. This diff might be helpful: {log}" ) from None diff --git a/src/black/_width_table.py b/src/black/_width_table.py index f3304e48ed0..5f6ff9febc3 100644 --- a/src/black/_width_table.py +++ b/src/black/_width_table.py @@ -1,9 +1,9 @@ # Generated by make_width_table.py # wcwidth 0.2.6 # Unicode 15.0.0 -from typing import Final, List, Tuple +from typing import Final -WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [ +WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [ (0, 0, 0), (1, 31, -1), (127, 159, -1), diff --git a/src/black/brackets.py b/src/black/brackets.py index 37e6b2590eb..4a994a9d5c7 100644 --- a/src/black/brackets.py +++ b/src/black/brackets.py @@ -1,7 +1,7 @@ """Builds on top of nodes.py to track brackets.""" from dataclasses import dataclass, field -from typing import Dict, Final, Iterable, List, Optional, Sequence, Set, Tuple, Union +from typing import Final, Iterable, Optional, Sequence, Union from black.nodes import ( BRACKET, @@ -60,12 +60,12 @@ class BracketTracker: """Keeps track of brackets on a line.""" depth: int = 0 - bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) - delimiters: Dict[LeafID, Priority] = field(default_factory=dict) + bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict) + delimiters: dict[LeafID, Priority] = field(default_factory=dict) previous: Optional[Leaf] = None - _for_loop_depths: List[int] = field(default_factory=list) - _lambda_argument_depths: List[int] = field(default_factory=list) - invisible: List[Leaf] = field(default_factory=list) + _for_loop_depths: list[int] = field(default_factory=list) + _lambda_argument_depths: list[int] = field(default_factory=list) + invisible: list[Leaf] = field(default_factory=list) def mark(self, leaf: Leaf) -> None: """Mark `leaf` with bracket-related metadata. Keep track of delimiters. @@ -353,7 +353,7 @@ def max_delimiter_priority_in_atom(node: LN) -> Priority: return 0 -def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> Set[LeafID]: +def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]: """Return leaves that are inside matching brackets. The input `leaves` can have non-matching brackets at the head or tail parts. diff --git a/src/black/cache.py b/src/black/cache.py index 35bddb573d2..8811a79d79c 100644 --- a/src/black/cache.py +++ b/src/black/cache.py @@ -7,7 +7,7 @@ import tempfile from dataclasses import dataclass, field from pathlib import Path -from typing import Dict, Iterable, NamedTuple, Set, Tuple +from typing import Iterable, NamedTuple from platformdirs import user_cache_dir @@ -55,7 +55,7 @@ def get_cache_file(mode: Mode) -> Path: class Cache: mode: Mode cache_file: Path - file_data: Dict[str, FileData] = field(default_factory=dict) + file_data: dict[str, FileData] = field(default_factory=dict) @classmethod def read(cls, mode: Mode) -> Self: @@ -76,7 +76,7 @@ def read(cls, mode: Mode) -> Self: with cache_file.open("rb") as fobj: try: - data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj) + data: dict[str, tuple[float, int, str]] = pickle.load(fobj) file_data = {k: FileData(*v) for k, v in data.items()} except (pickle.UnpicklingError, ValueError, IndexError): return cls(mode, cache_file) @@ -114,14 +114,14 @@ def is_changed(self, source: Path) -> bool: return True return False - def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: + def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]: """Split an iterable of paths in `sources` into two sets. The first contains paths of files that modified on disk or are not in the cache. The other contains paths to non-modified files. """ - changed: Set[Path] = set() - done: Set[Path] = set() + changed: set[Path] = set() + done: set[Path] = set() for src in sources: if self.is_changed(src): changed.add(src) @@ -139,9 +139,8 @@ def write(self, sources: Iterable[Path]) -> None: with tempfile.NamedTemporaryFile( dir=str(self.cache_file.parent), delete=False ) as f: - # We store raw tuples in the cache because pickling NamedTuples - # doesn't work with mypyc on Python 3.8, and because it's faster. - data: Dict[str, Tuple[float, int, str]] = { + # We store raw tuples in the cache because it's faster. + data: dict[str, tuple[float, int, str]] = { k: (*v,) for k, v in self.file_data.items() } pickle.dump(data, f, protocol=4) diff --git a/src/black/comments.py b/src/black/comments.py index a835f58a900..cd37c440290 100644 --- a/src/black/comments.py +++ b/src/black/comments.py @@ -1,7 +1,7 @@ import re from dataclasses import dataclass from functools import lru_cache -from typing import Collection, Final, Iterator, List, Optional, Tuple, Union +from typing import Collection, Final, Iterator, Optional, Union from black.mode import Mode, Preview from black.nodes import ( @@ -77,9 +77,9 @@ def generate_comments(leaf: LN) -> Iterator[Leaf]: @lru_cache(maxsize=4096) -def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: +def list_comments(prefix: str, *, is_endmarker: bool) -> list[ProtoComment]: """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" - result: List[ProtoComment] = [] + result: list[ProtoComment] = [] if not prefix or "#" not in prefix: return result @@ -166,7 +166,7 @@ def make_comment(content: str) -> str: def normalize_fmt_off( - node: Node, mode: Mode, lines: Collection[Tuple[int, int]] + node: Node, mode: Mode, lines: Collection[tuple[int, int]] ) -> None: """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" try_again = True @@ -175,7 +175,7 @@ def normalize_fmt_off( def convert_one_fmt_off_pair( - node: Node, mode: Mode, lines: Collection[Tuple[int, int]] + node: Node, mode: Mode, lines: Collection[tuple[int, int]] ) -> bool: """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. @@ -336,7 +336,7 @@ def _generate_ignored_nodes_from_fmt_skip( # statements. The ignored nodes should be previous siblings of the # parent suite node. leaf.prefix = "" - ignored_nodes: List[LN] = [] + ignored_nodes: list[LN] = [] parent_sibling = parent.prev_sibling while parent_sibling is not None and parent_sibling.type != syms.suite: ignored_nodes.insert(0, parent_sibling) @@ -376,7 +376,7 @@ def children_contains_fmt_on(container: LN) -> bool: return False -def contains_pragma_comment(comment_list: List[Leaf]) -> bool: +def contains_pragma_comment(comment_list: list[Leaf]) -> bool: """ Returns: True iff one of the comments in @comment_list is a pragma used by one diff --git a/src/black/concurrency.py b/src/black/concurrency.py index ff0a8f5fd32..8079100f8f7 100644 --- a/src/black/concurrency.py +++ b/src/black/concurrency.py @@ -13,7 +13,7 @@ from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor from multiprocessing import Manager from pathlib import Path -from typing import Any, Iterable, Optional, Set +from typing import Any, Iterable, Optional from mypy_extensions import mypyc_attr @@ -69,7 +69,7 @@ def shutdown(loop: asyncio.AbstractEventLoop) -> None: # not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 @mypyc_attr(patchable=True) def reformat_many( - sources: Set[Path], + sources: set[Path], fast: bool, write_back: WriteBack, mode: Mode, @@ -119,7 +119,7 @@ def reformat_many( async def schedule_formatting( - sources: Set[Path], + sources: set[Path], fast: bool, write_back: WriteBack, mode: Mode, diff --git a/src/black/debug.py b/src/black/debug.py index cebc48765ba..34a9f32e5cb 100644 --- a/src/black/debug.py +++ b/src/black/debug.py @@ -1,5 +1,5 @@ from dataclasses import dataclass, field -from typing import Any, Iterator, List, TypeVar, Union +from typing import Any, Iterator, TypeVar, Union from black.nodes import Visitor from black.output import out @@ -14,7 +14,7 @@ @dataclass class DebugVisitor(Visitor[T]): tree_depth: int = 0 - list_output: List[str] = field(default_factory=list) + list_output: list[str] = field(default_factory=list) print_output: bool = True def out(self, message: str, *args: Any, **kwargs: Any) -> None: diff --git a/src/black/files.py b/src/black/files.py index c0cadbfd890..82da47919c7 100644 --- a/src/black/files.py +++ b/src/black/files.py @@ -6,14 +6,11 @@ from typing import ( TYPE_CHECKING, Any, - Dict, Iterable, Iterator, - List, Optional, Pattern, Sequence, - Tuple, Union, ) @@ -43,7 +40,7 @@ @lru_cache -def _load_toml(path: Union[Path, str]) -> Dict[str, Any]: +def _load_toml(path: Union[Path, str]) -> dict[str, Any]: with open(path, "rb") as f: return tomllib.load(f) @@ -56,9 +53,12 @@ def _cached_resolve(path: Path) -> Path: @lru_cache def find_project_root( srcs: Sequence[str], stdin_filename: Optional[str] = None -) -> Tuple[Path, str]: +) -> tuple[Path, str]: """Return a directory containing .git, .hg, or pyproject.toml. + pyproject.toml files are only considered if they contain a [tool.black] + section and are ignored otherwise. + That directory will be a common parent of all files and directories passed in `srcs`. @@ -103,7 +103,7 @@ def find_project_root( def find_pyproject_toml( - path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None + path_search_start: tuple[str, ...], stdin_filename: Optional[str] = None ) -> Optional[str]: """Find the absolute filepath to a pyproject.toml if it exists""" path_project_root, _ = find_project_root(path_search_start, stdin_filename) @@ -125,13 +125,13 @@ def find_pyproject_toml( @mypyc_attr(patchable=True) -def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: +def parse_pyproject_toml(path_config: str) -> dict[str, Any]: """Parse a pyproject toml file, pulling out relevant parts for Black. If parsing fails, will raise a tomllib.TOMLDecodeError. """ pyproject_toml = _load_toml(path_config) - config: Dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {}) + config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {}) config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} if "target_version" not in config: @@ -143,8 +143,8 @@ def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: def infer_target_version( - pyproject_toml: Dict[str, Any], -) -> Optional[List[TargetVersion]]: + pyproject_toml: dict[str, Any], +) -> Optional[list[TargetVersion]]: """Infer Black's target version from the project metadata in pyproject.toml. Supports the PyPA standard format (PEP 621): @@ -167,7 +167,7 @@ def infer_target_version( return None -def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersion]]: +def parse_req_python_version(requires_python: str) -> Optional[list[TargetVersion]]: """Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion. If parsing fails, will raise a packaging.version.InvalidVersion error. @@ -182,7 +182,7 @@ def parse_req_python_version(requires_python: str) -> Optional[List[TargetVersio return None -def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVersion]]: +def parse_req_python_specifier(requires_python: str) -> Optional[list[TargetVersion]]: """Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion. If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error. @@ -193,7 +193,7 @@ def parse_req_python_specifier(requires_python: str) -> Optional[List[TargetVers return None target_version_map = {f"3.{v.value}": v for v in TargetVersion} - compatible_versions: List[str] = list(specifier_set.filter(target_version_map)) + compatible_versions: list[str] = list(specifier_set.filter(target_version_map)) if compatible_versions: return [target_version_map[v] for v in compatible_versions] return None @@ -248,7 +248,7 @@ def find_user_pyproject_toml() -> Path: def get_gitignore(root: Path) -> PathSpec: """Return a PathSpec matching gitignore content if present.""" gitignore = root / ".gitignore" - lines: List[str] = [] + lines: list[str] = [] if gitignore.is_file(): with gitignore.open(encoding="utf-8") as gf: lines = gf.readlines() @@ -269,8 +269,6 @@ def resolves_outside_root_or_cannot_stat( root directory. Also returns True if we failed to resolve the path. """ try: - if sys.version_info < (3, 8, 6): - path = path.absolute() # https://bugs.python.org/issue33660 resolved_path = _cached_resolve(path) except OSError as e: if report: @@ -301,7 +299,7 @@ def best_effort_relative_path(path: Path, root: Path) -> Path: def _path_is_ignored( root_relative_path: str, root: Path, - gitignore_dict: Dict[Path, PathSpec], + gitignore_dict: dict[Path, PathSpec], ) -> bool: path = root / root_relative_path # Note that this logic is sensitive to the ordering of gitignore_dict. Callers must @@ -309,6 +307,8 @@ def _path_is_ignored( for gitignore_path, pattern in gitignore_dict.items(): try: relative_path = path.relative_to(gitignore_path).as_posix() + if path.is_dir(): + relative_path = relative_path + "/" except ValueError: break if pattern.match_file(relative_path): @@ -332,7 +332,7 @@ def gen_python_files( extend_exclude: Optional[Pattern[str]], force_exclude: Optional[Pattern[str]], report: Report, - gitignore_dict: Optional[Dict[Path, PathSpec]], + gitignore_dict: Optional[dict[Path, PathSpec]], *, verbose: bool, quiet: bool, diff --git a/src/black/handle_ipynb_magics.py b/src/black/handle_ipynb_magics.py index 5b2847cb0c4..792d22595aa 100644 --- a/src/black/handle_ipynb_magics.py +++ b/src/black/handle_ipynb_magics.py @@ -3,17 +3,19 @@ import ast import collections import dataclasses +import re import secrets import sys from functools import lru_cache from importlib.util import find_spec -from typing import Dict, List, Optional, Tuple +from typing import Optional if sys.version_info >= (3, 10): from typing import TypeGuard else: from typing_extensions import TypeGuard +from black.mode import Mode from black.output import out from black.report import NothingChanged @@ -64,7 +66,35 @@ def jupyter_dependencies_are_installed(*, warn: bool) -> bool: return installed -def remove_trailing_semicolon(src: str) -> Tuple[str, bool]: +def validate_cell(src: str, mode: Mode) -> None: + """Check that cell does not already contain TransformerManager transformations, + or non-Python cell magics, which might cause tokenizer_rt to break because of + indentations. + + If a cell contains ``!ls``, then it'll be transformed to + ``get_ipython().system('ls')``. However, if the cell originally contained + ``get_ipython().system('ls')``, then it would get transformed in the same way: + + >>> TransformerManager().transform_cell("get_ipython().system('ls')") + "get_ipython().system('ls')\n" + >>> TransformerManager().transform_cell("!ls") + "get_ipython().system('ls')\n" + + Due to the impossibility of safely roundtripping in such situations, cells + containing transformed magics will be ignored. + """ + if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS): + raise NothingChanged + + line = _get_code_start(src) + if line.startswith("%%") and ( + line.split(maxsplit=1)[0][2:] + not in PYTHON_CELL_MAGICS | mode.python_cell_magics + ): + raise NothingChanged + + +def remove_trailing_semicolon(src: str) -> tuple[str, bool]: """Remove trailing semicolon from Jupyter notebook cell. For example, @@ -120,7 +150,7 @@ def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str: return str(tokens_to_src(tokens)) -def mask_cell(src: str) -> Tuple[str, List[Replacement]]: +def mask_cell(src: str) -> tuple[str, list[Replacement]]: """Mask IPython magics so content becomes parseable Python code. For example, @@ -135,7 +165,7 @@ def mask_cell(src: str) -> Tuple[str, List[Replacement]]: The replacements are returned, along with the transformed code. """ - replacements: List[Replacement] = [] + replacements: list[Replacement] = [] try: ast.parse(src) except SyntaxError: @@ -186,7 +216,7 @@ def get_token(src: str, magic: str) -> str: return f'"{token}"' -def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]: +def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]: """Replace cell magic with token. Note that 'src' will already have been processed by IPython's @@ -203,7 +233,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]: The replacement, along with the transformed code, is returned. """ - replacements: List[Replacement] = [] + replacements: list[Replacement] = [] tree = ast.parse(src) @@ -217,7 +247,7 @@ def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]: return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements -def replace_magics(src: str) -> Tuple[str, List[Replacement]]: +def replace_magics(src: str) -> tuple[str, list[Replacement]]: """Replace magics within body of cell. Note that 'src' will already have been processed by IPython's @@ -258,7 +288,7 @@ def replace_magics(src: str) -> Tuple[str, List[Replacement]]: return "\n".join(new_srcs), replacements -def unmask_cell(src: str, replacements: List[Replacement]) -> str: +def unmask_cell(src: str, replacements: list[Replacement]) -> str: """Remove replacements from cell. For example @@ -276,6 +306,21 @@ def unmask_cell(src: str, replacements: List[Replacement]) -> str: return src +def _get_code_start(src: str) -> str: + """Provides the first line where the code starts. + + Iterates over lines of code until it finds the first line that doesn't + contain only empty spaces and comments. It removes any empty spaces at the + start of the line and returns it. If such line doesn't exist, it returns an + empty string. + """ + for match in re.finditer(".+", src): + line = match.group(0).lstrip() + if line and not line.startswith("#"): + return line + return "" + + def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]: """Check if attribute is IPython magic. @@ -291,11 +336,11 @@ def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]: ) -def _get_str_args(args: List[ast.expr]) -> List[str]: +def _get_str_args(args: list[ast.expr]) -> list[str]: str_args = [] for arg in args: - assert isinstance(arg, ast.Str) - str_args.append(arg.s) + assert isinstance(arg, ast.Constant) and isinstance(arg.value, str) + str_args.append(arg.value) return str_args @@ -375,7 +420,7 @@ class MagicFinder(ast.NodeVisitor): """ def __init__(self) -> None: - self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list) + self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list) def visit_Assign(self, node: ast.Assign) -> None: """Look for system assign magics. diff --git a/src/black/linegen.py b/src/black/linegen.py index 8e2d2f1c654..2bbad008b38 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -7,7 +7,7 @@ from dataclasses import replace from enum import Enum, auto from functools import partial, wraps -from typing import Collection, Iterator, List, Optional, Set, Union, cast +from typing import Collection, Iterator, Optional, Union, cast from black.brackets import ( COMMA_PRIORITY, @@ -197,7 +197,7 @@ def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: yield from self.line(-1) def visit_stmt( - self, node: Node, keywords: Set[str], parens: Set[str] + self, node: Node, keywords: set[str], parens: set[str] ) -> Iterator[Line]: """Visit a statement. @@ -523,6 +523,15 @@ def visit_fstring(self, node: Node) -> Iterator[Line]: # currently we don't want to format and split f-strings at all. string_leaf = fstring_to_string(node) node.replace(string_leaf) + if "\\" in string_leaf.value and any( + "\\" in str(child) + for child in node.children + if child.type == syms.fstring_replacement_field + ): + # string normalization doesn't account for nested quotes, + # causing breakages. skip normalization when nested quotes exist + yield from self.visit_default(string_leaf) + return yield from self.visit_STRING(string_leaf) # TODO: Uncomment Implementation to format f-string children @@ -563,7 +572,7 @@ def __post_init__(self) -> None: self.current_line = Line(mode=self.mode) v = self.visit_stmt - Ø: Set[str] = set() + Ø: set[str] = set() self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) self.visit_if_stmt = partial( v, keywords={"if", "else", "elif"}, parens={"if", "elif"} @@ -630,7 +639,7 @@ def transform_line( string_split = StringSplitter(ll, sn) string_paren_wrap = StringParenWrapper(ll, sn) - transformers: List[Transformer] + transformers: list[Transformer] if ( not line.contains_uncollapsable_type_comments() and not line.should_split_rhs @@ -730,7 +739,7 @@ def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool: """If a funcdef has a magic trailing comma in the return type, then we should first split the line with rhs to respect the comma. """ - return_type_leaves: List[Leaf] = [] + return_type_leaves: list[Leaf] = [] in_return_type = False for leaf in line.leaves: @@ -772,9 +781,9 @@ def left_hand_split( Prefer RHS otherwise. This is why this function is not symmetrical with :func:`right_hand_split` which also handles optional parentheses. """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] + tail_leaves: list[Leaf] = [] + body_leaves: list[Leaf] = [] + head_leaves: list[Leaf] = [] current_leaves = head_leaves matching_bracket: Optional[Leaf] = None for leaf in line.leaves: @@ -840,9 +849,9 @@ def _first_right_hand_split( _maybe_split_omitting_optional_parens to get an opinion whether to prefer splitting on the right side of an assignment statement. """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] + tail_leaves: list[Leaf] = [] + body_leaves: list[Leaf] = [] + head_leaves: list[Leaf] = [] current_leaves = tail_leaves opening_bracket: Optional[Leaf] = None closing_bracket: Optional[Leaf] = None @@ -873,8 +882,8 @@ def _first_right_hand_split( and tail_leaves[0].opening_bracket is head_leaves[-1] ): inner_body_leaves = list(body_leaves) - hugged_opening_leaves: List[Leaf] = [] - hugged_closing_leaves: List[Leaf] = [] + hugged_opening_leaves: list[Leaf] = [] + hugged_closing_leaves: list[Leaf] = [] is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR] unpacking_offset: int = 1 if is_unpacking else 0 while ( @@ -1083,8 +1092,49 @@ def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None ) +def _ensure_trailing_comma( + leaves: list[Leaf], original: Line, opening_bracket: Leaf +) -> bool: + if not leaves: + return False + # Ensure a trailing comma for imports + if original.is_import: + return True + # ...and standalone function arguments + if not original.is_def: + return False + if opening_bracket.value != "(": + return False + # Don't add commas if we already have any commas + if any( + leaf.type == token.COMMA + and ( + Preview.typed_params_trailing_comma not in original.mode + or not is_part_of_annotation(leaf) + ) + for leaf in leaves + ): + return False + + # Find a leaf with a parent (comments don't have parents) + leaf_with_parent = next((leaf for leaf in leaves if leaf.parent), None) + if leaf_with_parent is None: + return True + # Don't add commas inside parenthesized return annotations + if get_annotation_type(leaf_with_parent) == "return": + return False + # Don't add commas inside PEP 604 unions + if ( + leaf_with_parent.parent + and leaf_with_parent.parent.next_sibling + and leaf_with_parent.parent.next_sibling.type == token.VBAR + ): + return False + return True + + def bracket_split_build_line( - leaves: List[Leaf], + leaves: list[Leaf], original: Line, opening_bracket: Leaf, *, @@ -1103,42 +1153,17 @@ def bracket_split_build_line( if component is _BracketSplitComponent.body: result.inside_brackets = True result.depth += 1 - if leaves: - no_commas = ( - # Ensure a trailing comma for imports and standalone function arguments - original.is_def - # Don't add one after any comments or within type annotations - and opening_bracket.value == "(" - # Don't add one if there's already one there - and not any( - leaf.type == token.COMMA - and ( - Preview.typed_params_trailing_comma not in original.mode - or not is_part_of_annotation(leaf) - ) - for leaf in leaves - ) - # Don't add one inside parenthesized return annotations - and get_annotation_type(leaves[0]) != "return" - # Don't add one inside PEP 604 unions - and not ( - leaves[0].parent - and leaves[0].parent.next_sibling - and leaves[0].parent.next_sibling.type == token.VBAR - ) - ) - - if original.is_import or no_commas: - for i in range(len(leaves) - 1, -1, -1): - if leaves[i].type == STANDALONE_COMMENT: - continue + if _ensure_trailing_comma(leaves, original, opening_bracket): + for i in range(len(leaves) - 1, -1, -1): + if leaves[i].type == STANDALONE_COMMENT: + continue - if leaves[i].type != token.COMMA: - new_comma = Leaf(token.COMMA, ",") - leaves.insert(i + 1, new_comma) - break + if leaves[i].type != token.COMMA: + new_comma = Leaf(token.COMMA, ",") + leaves.insert(i + 1, new_comma) + break - leaves_to_track: Set[LeafID] = set() + leaves_to_track: set[LeafID] = set() if component is _BracketSplitComponent.head: leaves_to_track = get_leaves_inside_matching_brackets(leaves) # Populate the line @@ -1330,7 +1355,7 @@ def append_to_line(leaf: Leaf) -> Iterator[Line]: def normalize_invisible_parens( # noqa: C901 - node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature] + node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature] ) -> None: """Make existing optional parentheses invisible or create new ones. @@ -1680,7 +1705,7 @@ def should_split_line(line: Line, opening_bracket: Leaf) -> bool: ) -def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: +def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]: """Generate sets of closing bracket IDs that should be omitted in a RHS. Brackets can be omitted if the entire trailer up to and including @@ -1691,14 +1716,14 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf the one that needs to explode are omitted. """ - omit: Set[LeafID] = set() + omit: set[LeafID] = set() if not line.magic_trailing_comma: yield omit length = 4 * line.depth opening_bracket: Optional[Leaf] = None closing_bracket: Optional[Leaf] = None - inner_brackets: Set[LeafID] = set() + inner_brackets: set[LeafID] = set() for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True): length += leaf_length if length > line_length: @@ -1763,10 +1788,10 @@ def run_transformer( features: Collection[Feature], *, line_str: str = "", -) -> List[Line]: +) -> list[Line]: if not line_str: line_str = line_to_string(line) - result: List[Line] = [] + result: list[Line] = [] for transformed_line in transform(line, features, mode): if str(transformed_line).strip("\n") == line_str: raise CannotTransform("Line transformer returned an unchanged result") diff --git a/src/black/lines.py b/src/black/lines.py index b9b3add9e2a..a8c6ef66f68 100644 --- a/src/black/lines.py +++ b/src/black/lines.py @@ -1,18 +1,7 @@ import itertools import math from dataclasses import dataclass, field -from typing import ( - Callable, - Dict, - Iterator, - List, - Optional, - Sequence, - Tuple, - TypeVar, - Union, - cast, -) +from typing import Callable, Iterator, Optional, Sequence, TypeVar, Union, cast from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker from black.mode import Mode, Preview @@ -52,9 +41,9 @@ class Line: mode: Mode = field(repr=False) depth: int = 0 - leaves: List[Leaf] = field(default_factory=list) + leaves: list[Leaf] = field(default_factory=list) # keys ordered like `leaves` - comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) + comments: dict[LeafID, list[Leaf]] = field(default_factory=dict) bracket_tracker: BracketTracker = field(default_factory=BracketTracker) inside_brackets: bool = False should_split_rhs: bool = False @@ -426,7 +415,7 @@ def append_comment(self, comment: Leaf) -> bool: self.comments.setdefault(id(last_leaf), []).append(comment) return True - def comments_after(self, leaf: Leaf) -> List[Leaf]: + def comments_after(self, leaf: Leaf) -> list[Leaf]: """Generate comments that should appear directly after `leaf`.""" return self.comments.get(id(leaf), []) @@ -459,13 +448,13 @@ def is_complex_subscript(self, leaf: Leaf) -> bool: def enumerate_with_length( self, is_reversed: bool = False - ) -> Iterator[Tuple[Index, Leaf, int]]: + ) -> Iterator[tuple[Index, Leaf, int]]: """Return an enumeration of leaves with their length. Stops prematurely on multiline strings and standalone comments. """ op = cast( - Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], + Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]], enumerate_reversed if is_reversed else enumerate, ) for index, leaf in op(self.leaves): @@ -531,11 +520,11 @@ class LinesBlock: previous_block: Optional["LinesBlock"] original_line: Line before: int = 0 - content_lines: List[str] = field(default_factory=list) + content_lines: list[str] = field(default_factory=list) after: int = 0 form_feed: bool = False - def all_lines(self) -> List[str]: + def all_lines(self) -> list[str]: empty_line = str(Line(mode=self.mode)) prefix = make_simple_prefix(self.before, self.form_feed, empty_line) return [prefix] + self.content_lines + [empty_line * self.after] @@ -554,7 +543,7 @@ class EmptyLineTracker: mode: Mode previous_line: Optional[Line] = None previous_block: Optional[LinesBlock] = None - previous_defs: List[Line] = field(default_factory=list) + previous_defs: list[Line] = field(default_factory=list) semantic_leading_comment: Optional[LinesBlock] = None def maybe_empty_lines(self, current_line: Line) -> LinesBlock: @@ -607,7 +596,7 @@ def maybe_empty_lines(self, current_line: Line) -> LinesBlock: self.previous_block = block return block - def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C901 + def _maybe_empty_lines(self, current_line: Line) -> tuple[int, int]: # noqa: C901 max_allowed = 1 if current_line.depth == 0: max_allowed = 1 if self.mode.is_pyi else 2 @@ -693,7 +682,7 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: # noqa: C9 def _maybe_empty_lines_for_class_or_def( # noqa: C901 self, current_line: Line, before: int, user_had_newline: bool - ) -> Tuple[int, int]: + ) -> tuple[int, int]: assert self.previous_line is not None if self.previous_line.is_decorator: @@ -772,7 +761,7 @@ def _maybe_empty_lines_for_class_or_def( # noqa: C901 return newlines, 0 -def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: +def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]: """Like `reversed(enumerate(sequence))` if that were possible.""" index = len(sequence) - 1 for element in reversed(sequence): @@ -781,7 +770,7 @@ def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: def append_leaves( - new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False + new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False ) -> None: """ Append leaves (taken from @old_line) to @new_line, making sure to fix the @@ -838,10 +827,10 @@ def is_line_short_enough( # noqa: C901 # Depth (which is based on the existing bracket_depth concept) # is needed to determine nesting level of the MLS. # Includes special case for trailing commas. - commas: List[int] = [] # tracks number of commas per depth level + commas: list[int] = [] # tracks number of commas per depth level multiline_string: Optional[Leaf] = None # store the leaves that contain parts of the MLS - multiline_string_contexts: List[LN] = [] + multiline_string_contexts: list[LN] = [] max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS for i, leaf in enumerate(line.leaves): @@ -865,7 +854,7 @@ def is_line_short_enough( # noqa: C901 if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA: # Inside brackets, ignore trailing comma # directly after MLS/MLS-containing expression - ignore_ctxs: List[Optional[LN]] = [None] + ignore_ctxs: list[Optional[LN]] = [None] ignore_ctxs += multiline_string_contexts if (line.inside_brackets or leaf.bracket_depth > 0) and ( i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs diff --git a/src/black/mode.py b/src/black/mode.py index 6af64db7c31..109c7997f23 100644 --- a/src/black/mode.py +++ b/src/black/mode.py @@ -8,7 +8,7 @@ from enum import Enum, auto from hashlib import sha256 from operator import attrgetter -from typing import Dict, Final, Set +from typing import Final from black.const import DEFAULT_LINE_LENGTH @@ -26,6 +26,10 @@ class TargetVersion(Enum): PY312 = 12 PY313 = 13 + def pretty(self) -> str: + assert self.name[:2] == "PY" + return f"Python {self.name[2]}.{self.name[3:]}" + class Feature(Enum): F_STRINGS = 2 @@ -60,7 +64,7 @@ class Feature(Enum): } -VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { +VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = { TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS}, @@ -185,7 +189,7 @@ class Feature(Enum): } -def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: +def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool: return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) @@ -209,9 +213,10 @@ class Preview(Enum): # NOTE: remove_lone_list_item_parens requires # hug_parens_with_braces_and_square_brackets to remove parens in some cases remove_lone_list_item_parens = auto() + pep646_typed_star_arg_type_var_tuple = auto() -UNSTABLE_FEATURES: Set[Preview] = { +UNSTABLE_FEATURES: set[Preview] = { # Many issues, see summary in https://github.com/psf/black/issues/4042 Preview.string_processing, # See issues #3452 and #4158 @@ -234,17 +239,17 @@ class Deprecated(UserWarning): @dataclass class Mode: - target_versions: Set[TargetVersion] = field(default_factory=set) + target_versions: set[TargetVersion] = field(default_factory=set) line_length: int = DEFAULT_LINE_LENGTH string_normalization: bool = True is_pyi: bool = False is_ipynb: bool = False skip_source_first_line: bool = False magic_trailing_comma: bool = True - python_cell_magics: Set[str] = field(default_factory=set) + python_cell_magics: set[str] = field(default_factory=set) preview: bool = False unstable: bool = False - enabled_features: Set[Preview] = field(default_factory=set) + enabled_features: set[Preview] = field(default_factory=set) def __contains__(self, feature: Preview) -> bool: """ @@ -290,6 +295,7 @@ def get_cache_key(self) -> str: str(int(self.skip_source_first_line)), str(int(self.magic_trailing_comma)), str(int(self.preview)), + str(int(self.unstable)), features_and_magics, ] return ".".join(parts) diff --git a/src/black/nodes.py b/src/black/nodes.py index 9579b715ad2..470dc248488 100644 --- a/src/black/nodes.py +++ b/src/black/nodes.py @@ -3,18 +3,7 @@ """ import sys -from typing import ( - Final, - Generic, - Iterator, - List, - Literal, - Optional, - Set, - Tuple, - TypeVar, - Union, -) +from typing import Final, Generic, Iterator, Literal, Optional, TypeVar, Union if sys.version_info >= (3, 10): from typing import TypeGuard @@ -254,9 +243,15 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str: # no elif ( prevp.type == token.STAR and parent_type(prevp) == syms.star_expr - and parent_type(prevp.parent) == syms.subscriptlist + and ( + parent_type(prevp.parent) == syms.subscriptlist + or ( + Preview.pep646_typed_star_arg_type_var_tuple in mode + and parent_type(prevp.parent) == syms.tname_star + ) + ) ): - # No space between typevar tuples. + # No space between typevar tuples or unpacking them. return NO elif prevp.type in VARARGS_SPECIALS: @@ -456,7 +451,7 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: return None -def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool: +def prev_siblings_are(node: Optional[LN], tokens: list[Optional[NodeType]]) -> bool: """Return if the `node` and its previous siblings match types against the provided list of tokens; the provided `node`has its type matched against the last element in the list. `None` can be used as the first element to declare that the start of the @@ -628,8 +623,8 @@ def is_tuple_containing_walrus(node: LN) -> bool: def is_one_sequence_between( opening: Leaf, closing: Leaf, - leaves: List[Leaf], - brackets: Tuple[int, int] = (token.LPAR, token.RPAR), + leaves: list[Leaf], + brackets: tuple[int, int] = (token.LPAR, token.RPAR), ) -> bool: """Return True if content between `opening` and `closing` is a one-sequence.""" if (opening.type, closing.type) != brackets: @@ -739,7 +734,7 @@ def is_yield(node: LN) -> bool: return False -def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: +def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool: """Return True if `leaf` is a star or double star in a vararg or kwarg. If `within` includes VARARGS_PARENTS, this applies to function signatures. @@ -1006,6 +1001,7 @@ def get_annotation_type(leaf: Leaf) -> Literal["return", "param", None]: def is_part_of_annotation(leaf: Leaf) -> bool: """Returns whether this leaf is part of a type annotation.""" + assert leaf.parent is not None return get_annotation_type(leaf) is not None diff --git a/src/black/output.py b/src/black/output.py index 7c7dd0fe14e..0dbd74e5e22 100644 --- a/src/black/output.py +++ b/src/black/output.py @@ -6,7 +6,7 @@ import json import re import tempfile -from typing import Any, List, Optional +from typing import Any, Optional from click import echo, style from mypy_extensions import mypyc_attr @@ -59,7 +59,7 @@ def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str: _line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))") -def _splitlines_no_ff(source: str) -> List[str]: +def _splitlines_no_ff(source: str) -> list[str]: """Split a string into lines ignoring form feed and other chars. This mimics how the Python parser splits source code. diff --git a/src/black/parsing.py b/src/black/parsing.py index e8664b5ee23..e139963183a 100644 --- a/src/black/parsing.py +++ b/src/black/parsing.py @@ -5,7 +5,7 @@ import ast import sys import warnings -from typing import Iterable, Iterator, List, Set, Tuple +from typing import Collection, Iterator from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature from black.nodes import syms @@ -21,7 +21,7 @@ class InvalidInput(ValueError): """Raised when input source code fails all parse attempts.""" -def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: +def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]: if not target_versions: # No target_version specified, so try all grammars. return [ @@ -52,12 +52,20 @@ def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: return grammars -def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: +def lib2to3_parse( + src_txt: str, target_versions: Collection[TargetVersion] = () +) -> Node: """Given a string with source, return the lib2to3 Node.""" if not src_txt.endswith("\n"): src_txt += "\n" grammars = get_grammars(set(target_versions)) + if target_versions: + max_tv = max(target_versions, key=lambda tv: tv.value) + tv_str = f" for target version {max_tv.pretty()}" + else: + tv_str = "" + errors = {} for grammar in grammars: drv = driver.Driver(grammar) @@ -73,14 +81,14 @@ def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) - except IndexError: faulty_line = "" errors[grammar.version] = InvalidInput( - f"Cannot parse: {lineno}:{column}: {faulty_line}" + f"Cannot parse{tv_str}: {lineno}:{column}: {faulty_line}" ) except TokenError as te: # In edge cases these are raised; and typically don't have a "faulty_line". lineno, column = te.args[1] errors[grammar.version] = InvalidInput( - f"Cannot parse: {lineno}:{column}: {te.args[0]}" + f"Cannot parse{tv_str}: {lineno}:{column}: {te.args[0]}" ) else: @@ -115,7 +123,7 @@ class ASTSafetyError(Exception): def _parse_single_version( - src: str, version: Tuple[int, int], *, type_comments: bool + src: str, version: tuple[int, int], *, type_comments: bool ) -> ast.AST: filename = "" with warnings.catch_warnings(): @@ -151,7 +159,7 @@ def parse_ast(src: str) -> ast.AST: def _normalize(lineend: str, value: str) -> str: # To normalize, we strip any leading and trailing space from # each line... - stripped: List[str] = [i.strip() for i in value.splitlines()] + stripped: list[str] = [i.strip() for i in value.splitlines()] normalized = lineend.join(stripped) # ...and remove any blank lines at the beginning and end of # the whole string @@ -164,14 +172,14 @@ def stringify_ast(node: ast.AST) -> Iterator[str]: def _stringify_ast_with_new_parent( - node: ast.AST, parent_stack: List[ast.AST], new_parent: ast.AST + node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST ) -> Iterator[str]: parent_stack.append(new_parent) yield from _stringify_ast(node, parent_stack) parent_stack.pop() -def _stringify_ast(node: ast.AST, parent_stack: List[ast.AST]) -> Iterator[str]: +def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]: if ( isinstance(node, ast.Constant) and isinstance(node.value, str) diff --git a/src/black/ranges.py b/src/black/ranges.py index 1ecaf7b0aed..f8b09a67a01 100644 --- a/src/black/ranges.py +++ b/src/black/ranges.py @@ -2,7 +2,7 @@ import difflib from dataclasses import dataclass -from typing import Collection, Iterator, List, Sequence, Set, Tuple, Union +from typing import Collection, Iterator, Sequence, Union from black.nodes import ( LN, @@ -18,8 +18,8 @@ from blib2to3.pgen2.token import ASYNC, NEWLINE -def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]: - lines: List[Tuple[int, int]] = [] +def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]: + lines: list[tuple[int, int]] = [] for lines_str in line_ranges: parts = lines_str.split("-") if len(parts) != 2: @@ -40,14 +40,14 @@ def parse_line_ranges(line_ranges: Sequence[str]) -> List[Tuple[int, int]]: return lines -def is_valid_line_range(lines: Tuple[int, int]) -> bool: +def is_valid_line_range(lines: tuple[int, int]) -> bool: """Returns whether the line range is valid.""" return not lines or lines[0] <= lines[1] def sanitized_lines( - lines: Collection[Tuple[int, int]], src_contents: str -) -> Collection[Tuple[int, int]]: + lines: Collection[tuple[int, int]], src_contents: str +) -> Collection[tuple[int, int]]: """Returns the valid line ranges for the given source. This removes ranges that are entirely outside the valid lines. @@ -74,10 +74,10 @@ def sanitized_lines( def adjusted_lines( - lines: Collection[Tuple[int, int]], + lines: Collection[tuple[int, int]], original_source: str, modified_source: str, -) -> List[Tuple[int, int]]: +) -> list[tuple[int, int]]: """Returns the adjusted line ranges based on edits from the original code. This computes the new line ranges by diffing original_source and @@ -153,7 +153,7 @@ def adjusted_lines( return new_lines -def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]]) -> None: +def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None: """Converts unchanged lines to STANDALONE_COMMENT. The idea is similar to how `# fmt: on/off` is implemented. It also converts the @@ -177,7 +177,7 @@ def convert_unchanged_lines(src_node: Node, lines: Collection[Tuple[int, int]]) more formatting to pass (1). However, it's hard to get it correct when incorrect indentations are used. So we defer this to future optimizations. """ - lines_set: Set[int] = set() + lines_set: set[int] = set() for start, end in lines: lines_set.update(range(start, end + 1)) visitor = _TopLevelStatementsVisitor(lines_set) @@ -205,7 +205,7 @@ class _TopLevelStatementsVisitor(Visitor[None]): classes/functions/statements. """ - def __init__(self, lines_set: Set[int]): + def __init__(self, lines_set: set[int]): self._lines_set = lines_set def visit_simple_stmt(self, node: Node) -> Iterator[None]: @@ -249,7 +249,7 @@ def visit_suite(self, node: Node) -> Iterator[None]: _convert_node_to_standalone_comment(semantic_parent) -def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None: +def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None: """Converts unchanged to STANDALONE_COMMENT line by line.""" for leaf in node.leaves(): if leaf.type != NEWLINE: @@ -261,7 +261,7 @@ def _convert_unchanged_line_by_line(node: Node, lines_set: Set[int]) -> None: # match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT # Here we need to check `subject_expr`. The `case_block+` will be # checked by their own NEWLINEs. - nodes_to_ignore: List[LN] = [] + nodes_to_ignore: list[LN] = [] prev_sibling = leaf.prev_sibling while prev_sibling: nodes_to_ignore.insert(0, prev_sibling) @@ -382,7 +382,7 @@ def _leaf_line_end(leaf: Leaf) -> int: return leaf.lineno + str(leaf).count("\n") -def _get_line_range(node_or_nodes: Union[LN, List[LN]]) -> Set[int]: +def _get_line_range(node_or_nodes: Union[LN, list[LN]]) -> set[int]: """Returns the line range of this node or list of nodes.""" if isinstance(node_or_nodes, list): nodes = node_or_nodes @@ -463,7 +463,7 @@ def _calculate_lines_mappings( modified_source.splitlines(keepends=True), ) matching_blocks = matcher.get_matching_blocks() - lines_mappings: List[_LinesMapping] = [] + lines_mappings: list[_LinesMapping] = [] # matching_blocks is a sequence of "same block of code ranges", see # https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks # Each block corresponds to a _LinesMapping with is_changed_block=False, diff --git a/src/black/resources/black.schema.json b/src/black/resources/black.schema.json index 0ff0f1019dd..521468da2a5 100644 --- a/src/black/resources/black.schema.json +++ b/src/black/resources/black.schema.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "https://github.com/psf/black/blob/main/black/resources/black.schema.json", + "$id": "https://github.com/psf/black/blob/main/src/black/resources/black.schema.json", "$comment": "tool.black table in pyproject.toml", "type": "object", "additionalProperties": false, @@ -91,7 +91,8 @@ "docstring_check_for_newline", "remove_redundant_guard_parens", "parens_for_long_if_clauses_in_case_block", - "remove_lone_list_item_parens" + "remove_lone_list_item_parens", + "pep646_typed_star_arg_type_var_tuple" ] }, "description": "Enable specific features included in the `--unstable` style. Requires `--preview`. No compatibility guarantees are provided on the behavior or existence of any unstable features." diff --git a/src/black/schema.py b/src/black/schema.py index 78e9564cdbc..f534dbb028d 100644 --- a/src/black/schema.py +++ b/src/black/schema.py @@ -1,6 +1,5 @@ import importlib.resources import json -import sys from typing import Any @@ -11,10 +10,6 @@ def get_schema(tool_name: str = "black") -> Any: pkg = "black.resources" fname = "black.schema.json" - if sys.version_info < (3, 9): - with importlib.resources.open_text(pkg, fname, encoding="utf-8") as f: - return json.load(f) - - schema = importlib.resources.files(pkg).joinpath(fname) # type: ignore[unreachable] + schema = importlib.resources.files(pkg).joinpath(fname) with schema.open(encoding="utf-8") as f: return json.load(f) diff --git a/src/black/strings.py b/src/black/strings.py index 69a8c8002e9..0973907bd3c 100644 --- a/src/black/strings.py +++ b/src/black/strings.py @@ -5,7 +5,7 @@ import re import sys from functools import lru_cache -from typing import Final, List, Match, Pattern, Tuple +from typing import Final, Match, Pattern from black._width_table import WIDTH_TABLE from blib2to3.pytree import Leaf @@ -43,7 +43,7 @@ def has_triple_quotes(string: str) -> bool: return raw_string[:3] in {'"""', "'''"} -def lines_with_leading_tabs_expanded(s: str) -> List[str]: +def lines_with_leading_tabs_expanded(s: str) -> list[str]: """ Splits string into lines and expands only leading tabs (following the normal Python rules) @@ -242,9 +242,9 @@ def normalize_string_quotes(s: str) -> str: def normalize_fstring_quotes( quote: str, - middles: List[Leaf], + middles: list[Leaf], is_raw_fstring: bool, -) -> Tuple[List[Leaf], str]: +) -> tuple[list[Leaf], str]: """Prefer double quotes but only if it doesn't cause more escaping. Adds or removes backslashes as appropriate. diff --git a/src/black/trans.py b/src/black/trans.py index 29a978c6b71..b44e3cdf0e7 100644 --- a/src/black/trans.py +++ b/src/black/trans.py @@ -11,16 +11,12 @@ Callable, ClassVar, Collection, - Dict, Final, Iterable, Iterator, - List, Literal, Optional, Sequence, - Set, - Tuple, TypeVar, Union, ) @@ -68,7 +64,7 @@ class CannotTransform(Exception): ParserState = int StringID = int TResult = Result[T, CannotTransform] # (T)ransform Result -TMatchResult = TResult[List[Index]] +TMatchResult = TResult[list[Index]] SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops @@ -179,7 +175,7 @@ def original_is_simple_lookup_func( return True -def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int]) -> bool: +def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool: """ Handling the determination of is_simple_lookup for the lines prior to the doublestar token. This is required because of the need to isolate the chained expression @@ -202,7 +198,7 @@ def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: Set[int]) def handle_is_simple_lookup_forward( - line: Line, index: int, disallowed: Set[int] + line: Line, index: int, disallowed: set[int] ) -> bool: """ Handling decision is_simple_lookup for the lines behind the doublestar token. @@ -227,7 +223,7 @@ def handle_is_simple_lookup_forward( return True -def is_expression_chained(chained_leaves: List[Leaf]) -> bool: +def is_expression_chained(chained_leaves: list[Leaf]) -> bool: """ Function to determine if the variable is a chained call. (e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call) @@ -298,7 +294,7 @@ def do_match(self, line: Line) -> TMatchResult: @abstractmethod def do_transform( - self, line: Line, string_indices: List[int] + self, line: Line, string_indices: list[int] ) -> Iterator[TResult[Line]]: """ Yields: @@ -388,8 +384,8 @@ class CustomSplitMapMixin: the resultant substrings go over the configured max line length. """ - _Key: ClassVar = Tuple[StringID, str] - _CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict( + _Key: ClassVar = tuple[StringID, str] + _CUSTOM_SPLIT_MAP: ClassVar[dict[_Key, tuple[CustomSplit, ...]]] = defaultdict( tuple ) @@ -413,7 +409,7 @@ def add_custom_splits( key = self._get_key(string) self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) - def pop_custom_splits(self, string: str) -> List[CustomSplit]: + def pop_custom_splits(self, string: str) -> list[CustomSplit]: """Custom Split Map Getter Method Returns: @@ -488,7 +484,7 @@ def do_match(self, line: Line) -> TMatchResult: break i += 1 - if not is_part_of_annotation(leaf) and not contains_comment: + if not contains_comment and not is_part_of_annotation(leaf): string_indices.append(idx) # Advance to the next non-STRING leaf. @@ -512,7 +508,7 @@ def do_match(self, line: Line) -> TMatchResult: return TErr("This line has no strings that need merging.") def do_transform( - self, line: Line, string_indices: List[int] + self, line: Line, string_indices: list[int] ) -> Iterator[TResult[Line]]: new_line = line @@ -543,7 +539,7 @@ def do_transform( @staticmethod def _remove_backslash_line_continuation_chars( - line: Line, string_indices: List[int] + line: Line, string_indices: list[int] ) -> TResult[Line]: """ Merge strings that were split across multiple lines using @@ -584,7 +580,7 @@ def _remove_backslash_line_continuation_chars( return Ok(new_line) def _merge_string_group( - self, line: Line, string_indices: List[int] + self, line: Line, string_indices: list[int] ) -> TResult[Line]: """ Merges string groups (i.e. set of adjacent strings). @@ -603,7 +599,7 @@ def _merge_string_group( is_valid_index = is_valid_index_factory(LL) # A dict of {string_idx: tuple[num_of_strings, string_leaf]}. - merged_string_idx_dict: Dict[int, Tuple[int, Leaf]] = {} + merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {} for string_idx in string_indices: vresult = self._validate_msg(line, string_idx) if isinstance(vresult, Err): @@ -639,8 +635,8 @@ def _merge_string_group( return Ok(new_line) def _merge_one_string_group( - self, LL: List[Leaf], string_idx: int, is_valid_index: Callable[[int], bool] - ) -> Tuple[int, Leaf]: + self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool] + ) -> tuple[int, Leaf]: """ Merges one string group where the first string in the group is `LL[string_idx]`. @@ -1004,11 +1000,11 @@ def do_match(self, line: Line) -> TMatchResult: return TErr("This line has no strings wrapped in parens.") def do_transform( - self, line: Line, string_indices: List[int] + self, line: Line, string_indices: list[int] ) -> Iterator[TResult[Line]]: LL = line.leaves - string_and_rpar_indices: List[int] = [] + string_and_rpar_indices: list[int] = [] for string_idx in string_indices: string_parser = StringParser() rpar_idx = string_parser.parse(LL, string_idx) @@ -1031,7 +1027,7 @@ def do_transform( ) def _transform_to_new_line( - self, line: Line, string_and_rpar_indices: List[int] + self, line: Line, string_and_rpar_indices: list[int] ) -> Line: LL = line.leaves @@ -1284,7 +1280,7 @@ def _get_max_string_length(self, line: Line, string_idx: int) -> int: return max_string_length @staticmethod - def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]: + def _prefer_paren_wrap_match(LL: list[Leaf]) -> Optional[int]: """ Returns: string_idx such that @LL[string_idx] is equal to our target (i.e. @@ -1329,14 +1325,14 @@ def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]: return None -def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]: +def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]: """ Yields spans corresponding to expressions in a given f-string. Spans are half-open ranges (left inclusive, right exclusive). Assumes the input string is a valid f-string, but will not crash if the input string is invalid. """ - stack: List[int] = [] # our curly paren stack + stack: list[int] = [] # our curly paren stack i = 0 while i < len(s): if s[i] == "{": @@ -1499,7 +1495,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult: return Ok([string_idx]) def do_transform( - self, line: Line, string_indices: List[int] + self, line: Line, string_indices: list[int] ) -> Iterator[TResult[Line]]: LL = line.leaves assert len(string_indices) == 1, ( @@ -1601,7 +1597,7 @@ def more_splits_should_be_made() -> bool: else: return str_width(rest_value) > max_last_string_column() - string_line_results: List[Ok[Line]] = [] + string_line_results: list[Ok[Line]] = [] while more_splits_should_be_made(): if use_custom_breakpoints: # Custom User Split (manual) @@ -1730,7 +1726,7 @@ def more_splits_should_be_made() -> bool: last_line.comments = line.comments.copy() yield Ok(last_line) - def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]: """ Yields: All ranges of @string which, if @string were to be split there, @@ -1761,7 +1757,7 @@ def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") yield begin, end - def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]: """ Yields: All ranges of @string which, if @string were to be split there, @@ -1772,8 +1768,8 @@ def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: return yield from iter_fexpr_spans(string) - def _get_illegal_split_indices(self, string: str) -> Set[Index]: - illegal_indices: Set[Index] = set() + def _get_illegal_split_indices(self, string: str) -> set[Index]: + illegal_indices: set[Index] = set() iterators = [ self._iter_fexpr_slices(string), self._iter_nameescape_slices(string), @@ -1899,7 +1895,7 @@ def _normalize_f_string(self, string: str, prefix: str) -> str: else: return string - def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]: + def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]: LL = list(leaves) string_op_leaves = [] @@ -2008,7 +2004,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult: return TErr("This line does not contain any non-atomic strings.") @staticmethod - def _return_match(LL: List[Leaf]) -> Optional[int]: + def _return_match(LL: list[Leaf]) -> Optional[int]: """ Returns: string_idx such that @LL[string_idx] is equal to our target (i.e. @@ -2033,7 +2029,7 @@ def _return_match(LL: List[Leaf]) -> Optional[int]: return None @staticmethod - def _else_match(LL: List[Leaf]) -> Optional[int]: + def _else_match(LL: list[Leaf]) -> Optional[int]: """ Returns: string_idx such that @LL[string_idx] is equal to our target (i.e. @@ -2060,7 +2056,7 @@ def _else_match(LL: List[Leaf]) -> Optional[int]: return None @staticmethod - def _assert_match(LL: List[Leaf]) -> Optional[int]: + def _assert_match(LL: list[Leaf]) -> Optional[int]: """ Returns: string_idx such that @LL[string_idx] is equal to our target (i.e. @@ -2095,7 +2091,7 @@ def _assert_match(LL: List[Leaf]) -> Optional[int]: return None @staticmethod - def _assign_match(LL: List[Leaf]) -> Optional[int]: + def _assign_match(LL: list[Leaf]) -> Optional[int]: """ Returns: string_idx such that @LL[string_idx] is equal to our target (i.e. @@ -2142,7 +2138,7 @@ def _assign_match(LL: List[Leaf]) -> Optional[int]: return None @staticmethod - def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]: + def _dict_or_lambda_match(LL: list[Leaf]) -> Optional[int]: """ Returns: string_idx such that @LL[string_idx] is equal to our target (i.e. @@ -2181,7 +2177,7 @@ def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]: return None def do_transform( - self, line: Line, string_indices: List[int] + self, line: Line, string_indices: list[int] ) -> Iterator[TResult[Line]]: LL = line.leaves assert len(string_indices) == 1, ( @@ -2347,7 +2343,7 @@ class StringParser: DONE: Final = 8 # Lookup Table for Next State - _goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = { + _goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = { # A string trailer may start with '.' OR '%'. (START, token.DOT): DOT, (START, token.PERCENT): PERCENT, @@ -2376,7 +2372,7 @@ def __init__(self) -> None: self._state = self.START self._unmatched_lpars = 0 - def parse(self, leaves: List[Leaf], string_idx: int) -> int: + def parse(self, leaves: list[Leaf], string_idx: int) -> int: """ Pre-conditions: * @leaves[@string_idx].type == token.STRING diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py index 7041671f596..d51b9cec284 100644 --- a/src/blackd/__init__.py +++ b/src/blackd/__init__.py @@ -4,7 +4,6 @@ from datetime import datetime, timezone from functools import partial from multiprocessing import freeze_support -from typing import Set, Tuple try: from aiohttp import web @@ -191,7 +190,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode: preview = bool(headers.get(PREVIEW, False)) unstable = bool(headers.get(UNSTABLE, False)) - enable_features: Set[black.Preview] = set() + enable_features: set[black.Preview] = set() enable_unstable_features = headers.get(ENABLE_UNSTABLE_FEATURE, "").split(",") for piece in enable_unstable_features: piece = piece.strip() @@ -216,7 +215,7 @@ def parse_mode(headers: MultiMapping[str]) -> black.Mode: ) -def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]: +def parse_python_variant_header(value: str) -> tuple[bool, set[black.TargetVersion]]: if value == "pyi": return True, set() else: diff --git a/src/blackd/middlewares.py b/src/blackd/middlewares.py index 370e0ae222e..75ec9267bd0 100644 --- a/src/blackd/middlewares.py +++ b/src/blackd/middlewares.py @@ -1,21 +1,11 @@ -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, TypeVar +from typing import Awaitable, Callable, Iterable +from aiohttp.typedefs import Middleware +from aiohttp.web_middlewares import middleware from aiohttp.web_request import Request from aiohttp.web_response import StreamResponse -if TYPE_CHECKING: - F = TypeVar("F", bound=Callable[..., Any]) - middleware: Callable[[F], F] -else: - try: - from aiohttp.web_middlewares import middleware - except ImportError: - # @middleware is deprecated and its behaviour is the default since aiohttp 4.0 - # so if it doesn't exist anymore, define a no-op for forward compatibility. - middleware = lambda x: x # noqa: E731 - Handler = Callable[[Request], Awaitable[StreamResponse]] -Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] def cors(allow_headers: Iterable[str]) -> Middleware: diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index 71a147cbcd8..df52ac93ca6 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -24,7 +24,7 @@ from contextlib import contextmanager from dataclasses import dataclass, field from logging import Logger -from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast +from typing import IO, Any, Iterable, Iterator, Optional, Union, cast from blib2to3.pgen2.grammar import Grammar from blib2to3.pgen2.tokenize import GoodTokenInfo @@ -40,7 +40,7 @@ class ReleaseRange: start: int end: Optional[int] = None - tokens: List[Any] = field(default_factory=list) + tokens: list[Any] = field(default_factory=list) def lock(self) -> None: total_eaten = len(self.tokens) @@ -51,7 +51,7 @@ class TokenProxy: def __init__(self, generator: Any) -> None: self._tokens = generator self._counter = 0 - self._release_ranges: List[ReleaseRange] = [] + self._release_ranges: list[ReleaseRange] = [] @contextmanager def release(self) -> Iterator["TokenProxy"]: @@ -121,7 +121,7 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> lineno = 1 column = 0 - indent_columns: List[int] = [] + indent_columns: list[int] = [] type = value = start = end = line_text = None prefix = "" @@ -202,8 +202,8 @@ def parse_string(self, text: str, debug: bool = False) -> NL: ) return self.parse_tokens(tokens, debug) - def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]: - lines: List[str] = [] + def _partially_consume_prefix(self, prefix: str, column: int) -> tuple[str, str]: + lines: list[str] = [] current_line = "" current_column = 0 wait_for_nl = False diff --git a/src/blib2to3/pgen2/grammar.py b/src/blib2to3/pgen2/grammar.py index 804db1ad985..9cf24037754 100644 --- a/src/blib2to3/pgen2/grammar.py +++ b/src/blib2to3/pgen2/grammar.py @@ -16,15 +16,15 @@ import os import pickle import tempfile -from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Optional, TypeVar, Union # Local imports from . import token _P = TypeVar("_P", bound="Grammar") -Label = Tuple[int, Optional[str]] -DFA = List[List[Tuple[int, int]]] -DFAS = Tuple[DFA, Dict[int, int]] +Label = tuple[int, Optional[str]] +DFA = list[list[tuple[int, int]]] +DFAS = tuple[DFA, dict[int, int]] Path = Union[str, "os.PathLike[str]"] @@ -83,16 +83,16 @@ class Grammar: """ def __init__(self) -> None: - self.symbol2number: Dict[str, int] = {} - self.number2symbol: Dict[int, str] = {} - self.states: List[DFA] = [] - self.dfas: Dict[int, DFAS] = {} - self.labels: List[Label] = [(0, "EMPTY")] - self.keywords: Dict[str, int] = {} - self.soft_keywords: Dict[str, int] = {} - self.tokens: Dict[int, int] = {} - self.symbol2label: Dict[str, int] = {} - self.version: Tuple[int, int] = (0, 0) + self.symbol2number: dict[str, int] = {} + self.number2symbol: dict[int, str] = {} + self.states: list[DFA] = [] + self.dfas: dict[int, DFAS] = {} + self.labels: list[Label] = [(0, "EMPTY")] + self.keywords: dict[str, int] = {} + self.soft_keywords: dict[str, int] = {} + self.tokens: dict[int, int] = {} + self.symbol2label: dict[str, int] = {} + self.version: tuple[int, int] = (0, 0) self.start = 256 # Python 3.7+ parses async as a keyword, not an identifier self.async_keywords = False @@ -114,7 +114,7 @@ def dump(self, filename: Path) -> None: pickle.dump(d, f, pickle.HIGHEST_PROTOCOL) os.replace(f.name, filename) - def _update(self, attrs: Dict[str, Any]) -> None: + def _update(self, attrs: dict[str, Any]) -> None: for k, v in attrs.items(): setattr(self, k, v) diff --git a/src/blib2to3/pgen2/literals.py b/src/blib2to3/pgen2/literals.py index 53c0b8ac2bb..3b219c42f93 100644 --- a/src/blib2to3/pgen2/literals.py +++ b/src/blib2to3/pgen2/literals.py @@ -4,9 +4,9 @@ """Safely evaluate Python string literals without using eval().""" import re -from typing import Dict, Match +from typing import Match -simple_escapes: Dict[str, str] = { +simple_escapes: dict[str, str] = { "a": "\a", "b": "\b", "f": "\f", diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index ad1d795b51a..2ac89c97094 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -10,19 +10,7 @@ """ from contextlib import contextmanager -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterator, - List, - Optional, - Set, - Tuple, - Union, - cast, -) +from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Union, cast from blib2to3.pgen2.grammar import Grammar from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert @@ -34,10 +22,10 @@ from blib2to3.pgen2.driver import TokenProxy -Results = Dict[str, NL] +Results = dict[str, NL] Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]] -DFA = List[List[Tuple[int, int]]] -DFAS = Tuple[DFA, Dict[int, int]] +DFA = list[list[tuple[int, int]]] +DFAS = tuple[DFA, dict[int, int]] def lam_sub(grammar: Grammar, node: RawNode) -> NL: @@ -50,24 +38,24 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL: def stack_copy( - stack: List[Tuple[DFAS, int, RawNode]], -) -> List[Tuple[DFAS, int, RawNode]]: + stack: list[tuple[DFAS, int, RawNode]], +) -> list[tuple[DFAS, int, RawNode]]: """Nodeless stack copy.""" return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack] class Recorder: - def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None: + def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None: self.parser = parser self._ilabels = ilabels self.context = context # not really matter - self._dead_ilabels: Set[int] = set() + self._dead_ilabels: set[int] = set() self._start_point = self.parser.stack self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels} @property - def ilabels(self) -> Set[int]: + def ilabels(self) -> set[int]: return self._dead_ilabels.symmetric_difference(self._ilabels) @contextmanager @@ -233,9 +221,9 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None: # where children is a list of nodes or None, and context may be None. newnode: RawNode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) - self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry] + self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry] self.rootnode: Optional[NL] = None - self.used_names: Set[str] = set() + self.used_names: set[str] = set() self.proxy = proxy self.last_token = None @@ -333,7 +321,7 @@ def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> boo # No success finding a transition raise ParseError("bad input", type, value, context) - def classify(self, type: int, value: str, context: Context) -> List[int]: + def classify(self, type: int, value: str, context: Context) -> list[int]: """Turn a token into a label. (Internal) Depending on whether the value is a soft-keyword or not, diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index 3ece9bb41ed..2be7b877909 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -2,18 +2,7 @@ # Licensed to PSF under a Contributor Agreement. import os -from typing import ( - IO, - Any, - Dict, - Iterator, - List, - NoReturn, - Optional, - Sequence, - Tuple, - Union, -) +from typing import IO, Any, Iterator, NoReturn, Optional, Sequence, Union from blib2to3.pgen2 import grammar, token, tokenize from blib2to3.pgen2.tokenize import GoodTokenInfo @@ -29,7 +18,7 @@ class ParserGenerator: filename: Path stream: IO[str] generator: Iterator[GoodTokenInfo] - first: Dict[str, Optional[Dict[str, int]]] + first: dict[str, Optional[dict[str, int]]] def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None: close_stream = None @@ -71,7 +60,7 @@ def make_grammar(self) -> PgenGrammar: c.start = c.symbol2number[self.startsymbol] return c - def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]: + def make_first(self, c: PgenGrammar, name: str) -> dict[int, int]: rawfirst = self.first[name] assert rawfirst is not None first = {} @@ -144,7 +133,7 @@ def calcfirst(self, name: str) -> None: dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] - totalset: Dict[str, int] = {} + totalset: dict[str, int] = {} overlapcheck = {} for label in state.arcs: if label in self.dfas: @@ -161,7 +150,7 @@ def calcfirst(self, name: str) -> None: else: totalset[label] = 1 overlapcheck[label] = {label: 1} - inverse: Dict[str, str] = {} + inverse: dict[str, str] = {} for label, itsfirst in overlapcheck.items(): for symbol in itsfirst: if symbol in inverse: @@ -172,7 +161,7 @@ def calcfirst(self, name: str) -> None: inverse[symbol] = label self.first[name] = totalset - def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]: + def parse(self) -> tuple[dict[str, list["DFAState"]], str]: dfas = {} startsymbol: Optional[str] = None # MSTART: (NEWLINE | RULE)* ENDMARKER @@ -197,7 +186,7 @@ def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]: assert startsymbol is not None return dfas, startsymbol - def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]: + def make_dfa(self, start: "NFAState", finish: "NFAState") -> list["DFAState"]: # To turn an NFA into a DFA, we define the states of the DFA # to correspond to *sets* of states of the NFA. Then do some # state reduction. Let's represent sets as dicts with 1 for @@ -205,12 +194,12 @@ def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]: assert isinstance(start, NFAState) assert isinstance(finish, NFAState) - def closure(state: NFAState) -> Dict[NFAState, int]: - base: Dict[NFAState, int] = {} + def closure(state: NFAState) -> dict[NFAState, int]: + base: dict[NFAState, int] = {} addclosure(state, base) return base - def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None: + def addclosure(state: NFAState, base: dict[NFAState, int]) -> None: assert isinstance(state, NFAState) if state in base: return @@ -221,7 +210,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None: states = [DFAState(closure(start), finish)] for state in states: # NB states grows while we're iterating - arcs: Dict[str, Dict[NFAState, int]] = {} + arcs: dict[str, dict[NFAState, int]] = {} for nfastate in state.nfaset: for label, next in nfastate.arcs: if label is not None: @@ -259,7 +248,7 @@ def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None: for label, next in sorted(state.arcs.items()): print(" %s -> %d" % (label, dfa.index(next))) - def simplify_dfa(self, dfa: List["DFAState"]) -> None: + def simplify_dfa(self, dfa: list["DFAState"]) -> None: # This is not theoretically optimal, but works well enough. # Algorithm: repeatedly look for two states that have the same # set of arcs (same labels pointing to the same nodes) and @@ -280,7 +269,7 @@ def simplify_dfa(self, dfa: List["DFAState"]) -> None: changes = True break - def parse_rhs(self) -> Tuple["NFAState", "NFAState"]: + def parse_rhs(self) -> tuple["NFAState", "NFAState"]: # RHS: ALT ('|' ALT)* a, z = self.parse_alt() if self.value != "|": @@ -297,7 +286,7 @@ def parse_rhs(self) -> Tuple["NFAState", "NFAState"]: z.addarc(zz) return aa, zz - def parse_alt(self) -> Tuple["NFAState", "NFAState"]: + def parse_alt(self) -> tuple["NFAState", "NFAState"]: # ALT: ITEM+ a, b = self.parse_item() while self.value in ("(", "[") or self.type in (token.NAME, token.STRING): @@ -306,7 +295,7 @@ def parse_alt(self) -> Tuple["NFAState", "NFAState"]: b = d return a, b - def parse_item(self) -> Tuple["NFAState", "NFAState"]: + def parse_item(self) -> tuple["NFAState", "NFAState"]: # ITEM: '[' RHS ']' | ATOM ['+' | '*'] if self.value == "[": self.gettoken() @@ -326,7 +315,7 @@ def parse_item(self) -> Tuple["NFAState", "NFAState"]: else: return a, a - def parse_atom(self) -> Tuple["NFAState", "NFAState"]: + def parse_atom(self) -> tuple["NFAState", "NFAState"]: # ATOM: '(' RHS ')' | NAME | STRING if self.value == "(": self.gettoken() @@ -371,7 +360,7 @@ def raise_error(self, msg: str, *args: Any) -> NoReturn: class NFAState: - arcs: List[Tuple[Optional[str], "NFAState"]] + arcs: list[tuple[Optional[str], "NFAState"]] def __init__(self) -> None: self.arcs = [] # list of (label, NFAState) pairs @@ -383,11 +372,11 @@ def addarc(self, next: "NFAState", label: Optional[str] = None) -> None: class DFAState: - nfaset: Dict[NFAState, Any] + nfaset: dict[NFAState, Any] isfinal: bool - arcs: Dict[str, "DFAState"] + arcs: dict[str, "DFAState"] - def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: + def __init__(self, nfaset: dict[NFAState, Any], final: NFAState) -> None: assert isinstance(nfaset, dict) assert isinstance(next(iter(nfaset)), NFAState) assert isinstance(final, NFAState) diff --git a/src/blib2to3/pgen2/token.py b/src/blib2to3/pgen2/token.py index 3068c3157fc..10c7c63bfe9 100644 --- a/src/blib2to3/pgen2/token.py +++ b/src/blib2to3/pgen2/token.py @@ -1,6 +1,6 @@ """Token constants (from "token.h").""" -from typing import Dict, Final +from typing import Final # Taken from Python (r53757) and modified to include some tokens # originally monkeypatched in by pgen2.tokenize @@ -74,7 +74,7 @@ NT_OFFSET: Final = 256 # --end constants-- -tok_name: Final[Dict[int, str]] = {} +tok_name: Final[dict[int, str]] = {} for _name, _value in list(globals().items()): if type(_value) is int: tok_name[_value] = _name diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index 28972a9bd78..f7d0215c4b5 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -29,18 +29,7 @@ import builtins import sys -from typing import ( - Callable, - Final, - Iterable, - Iterator, - List, - Optional, - Pattern, - Set, - Tuple, - Union, -) +from typing import Callable, Final, Iterable, Iterator, Optional, Pattern, Union from blib2to3.pgen2.grammar import Grammar from blib2to3.pgen2.token import ( @@ -93,7 +82,7 @@ def maybe(*choices: str) -> str: return group(*choices) + "?" -def _combinations(*l: str) -> Set[str]: +def _combinations(*l: str) -> set[str]: return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()} @@ -136,12 +125,12 @@ def _combinations(*l: str) -> Set[str]: ) # beginning of a single quoted f-string. must not end with `{{` or `\N{` -SingleLbrace = r"(?:\\N{|\\.|{{|[^'\\{])*(? Set[str]: _string_middle_double = r'(?:[^\n"\\]|\\.)*' # FSTRING_MIDDLE and LBRACE, must not end with a `{{` or `\N{` -_fstring_middle_single = r"(?:\\N{|\\[^{]|{{|[^\n'{\\])*(? None: tokeneater(*token_info) -GoodTokenInfo = Tuple[int, str, Coord, Coord, str] -TokenInfo = Union[Tuple[int, str], GoodTokenInfo] +GoodTokenInfo = tuple[int, str, Coord, Coord, str] +TokenInfo = Union[tuple[int, str], GoodTokenInfo] class Untokenizer: - tokens: List[str] + tokens: list[str] prev_row: int prev_col: int @@ -324,7 +313,7 @@ def untokenize(self, iterable: Iterable[TokenInfo]) -> str: self.prev_col = 0 return "".join(self.tokens) - def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None: + def compat(self, token: tuple[int, str], iterable: Iterable[TokenInfo]) -> None: startline = False indents = [] toks_append = self.tokens.append @@ -370,7 +359,7 @@ def _get_normal_name(orig_enc: str) -> str: return orig_enc -def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: +def detect_encoding(readline: Callable[[], bytes]) -> tuple[str, list[bytes]]: """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, @@ -471,7 +460,7 @@ def is_fstring_start(token: str) -> bool: return builtins.any(token.startswith(prefix) for prefix in fstring_prefix) -def _split_fstring_start_and_middle(token: str) -> Tuple[str, str]: +def _split_fstring_start_and_middle(token: str) -> tuple[str, str]: for prefix in fstring_prefix: _, prefix, rest = token.partition(prefix) if prefix != "": @@ -525,7 +514,7 @@ class FStringState: """ def __init__(self) -> None: - self.stack: List[int] = [STATE_NOT_FSTRING] + self.stack: list[int] = [STATE_NOT_FSTRING] def is_in_fstring_expression(self) -> bool: return self.stack[-1] not in (STATE_MIDDLE, STATE_NOT_FSTRING) @@ -581,7 +570,7 @@ def generate_tokens( logical line; continuation lines are included. """ lnum = parenlev = continued = 0 - parenlev_stack: List[int] = [] + parenlev_stack: list[int] = [] fstring_state = FStringState() formatspec = "" numchars: Final[str] = "0123456789" @@ -598,9 +587,9 @@ def generate_tokens( async_def_indent = 0 async_def_nl = False - strstart: Tuple[int, int] - endprog_stack: List[Pattern[str]] = [] - formatspec_start: Tuple[int, int] + strstart: tuple[int, int] + endprog_stack: list[Pattern[str]] = [] + formatspec_start: tuple[int, int] while 1: # loop over lines in stream try: @@ -638,7 +627,7 @@ def generate_tokens( else: if is_fstring_start(token): fstring_start, token = _split_fstring_start_and_middle(token) - fstring_start_epos = (lnum, spos[1] + len(fstring_start)) + fstring_start_epos = (spos[0], spos[1] + len(fstring_start)) yield ( FSTRING_START, fstring_start, diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index 4c55d7ac77d..d2d135e7d1d 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -12,18 +12,7 @@ # mypy: allow-untyped-defs, allow-incomplete-defs -from typing import ( - Any, - Dict, - Iterable, - Iterator, - List, - Optional, - Set, - Tuple, - TypeVar, - Union, -) +from typing import Any, Iterable, Iterator, Optional, TypeVar, Union from blib2to3.pgen2.grammar import Grammar @@ -34,7 +23,7 @@ HUGE: int = 0x7FFFFFFF # maximum repeat count, default max -_type_reprs: Dict[int, Union[str, int]] = {} +_type_reprs: dict[int, Union[str, int]] = {} def type_repr(type_num: int) -> Union[str, int]: @@ -57,8 +46,8 @@ def type_repr(type_num: int) -> Union[str, int]: _P = TypeVar("_P", bound="Base") NL = Union["Node", "Leaf"] -Context = Tuple[str, Tuple[int, int]] -RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]] +Context = tuple[str, tuple[int, int]] +RawNode = tuple[int, Optional[str], Optional[Context], Optional[list[NL]]] class Base: @@ -74,7 +63,7 @@ class Base: # Default values for instance variables type: int # int: token number (< 256) or symbol number (>= 256) parent: Optional["Node"] = None # Parent node pointer, or None - children: List[NL] # List of subnodes + children: list[NL] # List of subnodes was_changed: bool = False was_checked: bool = False @@ -135,7 +124,7 @@ def pre_order(self) -> Iterator[NL]: """ raise NotImplementedError - def replace(self, new: Union[NL, List[NL]]) -> None: + def replace(self, new: Union[NL, list[NL]]) -> None: """Replace this node with a new one in the parent.""" assert self.parent is not None, str(self) assert new is not None @@ -242,16 +231,16 @@ def get_suffix(self) -> str: class Node(Base): """Concrete implementation for interior nodes.""" - fixers_applied: Optional[List[Any]] - used_names: Optional[Set[str]] + fixers_applied: Optional[list[Any]] + used_names: Optional[set[str]] def __init__( self, type: int, - children: List[NL], + children: list[NL], context: Optional[Any] = None, prefix: Optional[str] = None, - fixers_applied: Optional[List[Any]] = None, + fixers_applied: Optional[list[Any]] = None, ) -> None: """ Initializer. @@ -363,12 +352,12 @@ def append_child(self, child: NL) -> None: self.invalidate_sibling_maps() def invalidate_sibling_maps(self) -> None: - self.prev_sibling_map: Optional[Dict[int, Optional[NL]]] = None - self.next_sibling_map: Optional[Dict[int, Optional[NL]]] = None + self.prev_sibling_map: Optional[dict[int, Optional[NL]]] = None + self.next_sibling_map: Optional[dict[int, Optional[NL]]] = None def update_sibling_maps(self) -> None: - _prev: Dict[int, Optional[NL]] = {} - _next: Dict[int, Optional[NL]] = {} + _prev: dict[int, Optional[NL]] = {} + _next: dict[int, Optional[NL]] = {} self.prev_sibling_map = _prev self.next_sibling_map = _next previous: Optional[NL] = None @@ -384,11 +373,11 @@ class Leaf(Base): # Default values for instance variables value: str - fixers_applied: List[Any] + fixers_applied: list[Any] bracket_depth: int # Changed later in brackets.py opening_bracket: Optional["Leaf"] = None - used_names: Optional[Set[str]] + used_names: Optional[set[str]] _prefix = "" # Whitespace and comments preceding this token in the input lineno: int = 0 # Line where this token starts in the input column: int = 0 # Column where this token starts in the input @@ -403,7 +392,7 @@ def __init__( value: str, context: Optional[Context] = None, prefix: Optional[str] = None, - fixers_applied: List[Any] = [], + fixers_applied: list[Any] = [], opening_bracket: Optional["Leaf"] = None, fmt_pass_converted_first_leaf: Optional["Leaf"] = None, ) -> None: @@ -421,7 +410,7 @@ def __init__( self.value = value if prefix is not None: self._prefix = prefix - self.fixers_applied: Optional[List[Any]] = fixers_applied[:] + self.fixers_applied: Optional[list[Any]] = fixers_applied[:] self.children = [] self.opening_bracket = opening_bracket self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf @@ -503,7 +492,7 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL: return Leaf(type, value or "", context=context) -_Results = Dict[str, NL] +_Results = dict[str, NL] class BasePattern: @@ -576,7 +565,7 @@ def match(self, node: NL, results: Optional[_Results] = None) -> bool: results[self.name] = node return True - def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool: + def match_seq(self, nodes: list[NL], results: Optional[_Results] = None) -> bool: """ Does this pattern exactly match a sequence of nodes? @@ -586,7 +575,7 @@ def match_seq(self, nodes: List[NL], results: Optional[_Results] = None) -> bool return False return self.match(nodes[0], results) - def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]: + def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]: """ Generator yielding all matches for this pattern. @@ -816,7 +805,7 @@ def match_seq(self, nodes, results=None) -> bool: return True return False - def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: + def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]: """ Generator yielding matches for a sequence of nodes. @@ -861,7 +850,7 @@ def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: if hasattr(sys, "getrefcount"): sys.stderr = save_stderr - def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: + def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]: """Helper to iteratively yield the matches.""" nodelen = len(nodes) if 0 >= self.min: @@ -890,7 +879,7 @@ def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: new_results.append((c0 + c1, r)) results = new_results - def _bare_name_matches(self, nodes) -> Tuple[int, _Results]: + def _bare_name_matches(self, nodes) -> tuple[int, _Results]: """Special optimized matcher for bare_name.""" count = 0 r = {} # type: _Results @@ -907,7 +896,7 @@ def _bare_name_matches(self, nodes) -> Tuple[int, _Results]: r[self.name] = nodes[:count] return count, r - def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]: + def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]: """Helper to recursively yield the matches.""" assert self.content is not None if count >= self.min: @@ -944,7 +933,7 @@ def match_seq(self, nodes, results=None) -> bool: # We only match an empty sequence of nodes in its entirety return len(nodes) == 0 - def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]: + def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]: if self.content is None: # Return a match if there is an empty sequence if len(nodes) == 0: @@ -957,8 +946,8 @@ def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]: def generate_matches( - patterns: List[BasePattern], nodes: List[NL] -) -> Iterator[Tuple[int, _Results]]: + patterns: list[BasePattern], nodes: list[NL] +) -> Iterator[tuple[int, _Results]]: """ Generator yielding matches for a sequence of patterns and nodes. diff --git a/tests/data/cases/funcdef_return_type_trailing_comma.py b/tests/data/cases/funcdef_return_type_trailing_comma.py index 9b9b9c673de..14fd763d9d1 100644 --- a/tests/data/cases/funcdef_return_type_trailing_comma.py +++ b/tests/data/cases/funcdef_return_type_trailing_comma.py @@ -142,6 +142,7 @@ def SimplePyFn( Buffer[UInt8, 2], Buffer[UInt8, 2], ]: ... + # output # normal, short, function definition def foo(a, b) -> tuple[int, float]: ... diff --git a/tests/data/cases/function_trailing_comma.py b/tests/data/cases/function_trailing_comma.py index 92f46e27516..63cf3999c2e 100644 --- a/tests/data/cases/function_trailing_comma.py +++ b/tests/data/cases/function_trailing_comma.py @@ -60,6 +60,64 @@ def func() -> ((also_super_long_type_annotation_that_may_cause_an_AST_related_cr argument1, (one, two,), argument4, argument5, argument6 ) +def foo() -> ( + # comment inside parenthesised return type + int +): + ... + +def foo() -> ( + # comment inside parenthesised return type + # more + int + # another +): + ... + +def foo() -> ( + # comment inside parenthesised new union return type + int | str | bytes +): + ... + +def foo() -> ( + # comment inside plain tuple +): + pass + +def foo(arg: (# comment with non-return annotation + int + # comment with non-return annotation +)): + pass + +def foo(arg: (# comment with non-return annotation + int | range | memoryview + # comment with non-return annotation +)): + pass + +def foo(arg: (# only before + int +)): + pass + +def foo(arg: ( + int + # only after +)): + pass + +variable: ( # annotation + because + # why not +) + +variable: ( + because + # why not +) + # output def f( @@ -176,3 +234,75 @@ def func() -> ( argument5, argument6, ) + + +def foo() -> ( + # comment inside parenthesised return type + int +): ... + + +def foo() -> ( + # comment inside parenthesised return type + # more + int + # another +): ... + + +def foo() -> ( + # comment inside parenthesised new union return type + int + | str + | bytes +): ... + + +def foo() -> ( + # comment inside plain tuple +): + pass + + +def foo( + arg: ( # comment with non-return annotation + int + # comment with non-return annotation + ), +): + pass + + +def foo( + arg: ( # comment with non-return annotation + int + | range + | memoryview + # comment with non-return annotation + ), +): + pass + + +def foo(arg: int): # only before + pass + + +def foo( + arg: ( + int + # only after + ), +): + pass + + +variable: ( # annotation + because + # why not +) + +variable: ( + because + # why not +) diff --git a/tests/data/cases/pep_701.py b/tests/data/cases/pep_701.py index d72d91c6799..9acee951e71 100644 --- a/tests/data/cases/pep_701.py +++ b/tests/data/cases/pep_701.py @@ -128,6 +128,14 @@ f"""{''' '''}""" +f"{'\''}" +f"{f'\''}" + +f'{1}\{{' +f'{2} foo \{{[\}}' +f'\{3}' +rf"\{"a"}" + # output x = f"foo" @@ -258,3 +266,11 @@ f"""{''' '''}""" + +f"{'\''}" +f"{f'\''}" + +f"{1}\{{" +f"{2} foo \{{[\}}" +f"\{3}" +rf"\{"a"}" diff --git a/tests/data/cases/preview_pep646_typed_star_arg_type_var_tuple.py b/tests/data/cases/preview_pep646_typed_star_arg_type_var_tuple.py new file mode 100644 index 00000000000..fb79e9983b1 --- /dev/null +++ b/tests/data/cases/preview_pep646_typed_star_arg_type_var_tuple.py @@ -0,0 +1,8 @@ +# flags: --minimum-version=3.11 --preview + + +def fn(*args: *tuple[*A, B]) -> None: + pass + + +fn.__annotations__ diff --git a/tests/data/ignore_directory_gitignore_tests/.gitignore b/tests/data/ignore_directory_gitignore_tests/.gitignore new file mode 100644 index 00000000000..4573ac5b3ac --- /dev/null +++ b/tests/data/ignore_directory_gitignore_tests/.gitignore @@ -0,0 +1,3 @@ +large_ignored_dir/ +large_ignored_dir_two +abc.py diff --git a/tests/data/ignore_directory_gitignore_tests/abc.py b/tests/data/ignore_directory_gitignore_tests/abc.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/a.py b/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/a.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/inner/b.py b/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/inner/b.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/inner2/c.py b/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/inner2/c.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/inner3/d.py b/tests/data/ignore_directory_gitignore_tests/large_ignored_dir_two/inner3/d.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/data/ignore_directory_gitignore_tests/z.py b/tests/data/ignore_directory_gitignore_tests/z.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/test_black.py b/tests/test_black.py index 85141ae9124..c448c602713 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -12,7 +12,7 @@ import types from concurrent.futures import ThreadPoolExecutor from contextlib import contextmanager, redirect_stderr -from dataclasses import replace +from dataclasses import fields, replace from io import BytesIO from pathlib import Path, WindowsPath from platform import system @@ -906,6 +906,9 @@ def test_get_features_used(self) -> None: self.check_features_used("a[*b]", {Feature.VARIADIC_GENERICS}) self.check_features_used("a[x, *y(), z] = t", {Feature.VARIADIC_GENERICS}) self.check_features_used("def fn(*args: *T): pass", {Feature.VARIADIC_GENERICS}) + self.check_features_used( + "def fn(*args: *tuple[*T]): pass", {Feature.VARIADIC_GENERICS} + ) self.check_features_used("with a: pass", set()) self.check_features_used("with a, b: pass", set()) @@ -2154,8 +2157,9 @@ def test_cache_single_file_already_cached(self) -> None: @event_loop() def test_cache_multiple_files(self) -> None: mode = DEFAULT_MODE - with cache_dir() as workspace, patch( - "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor + with ( + cache_dir() as workspace, + patch("concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor), ): one = (workspace / "one.py").resolve() one.write_text("print('hello')", encoding="utf-8") @@ -2177,9 +2181,10 @@ def test_no_cache_when_writeback_diff(self, color: bool) -> None: with cache_dir() as workspace: src = (workspace / "test.py").resolve() src.write_text("print('hello')", encoding="utf-8") - with patch.object(black.Cache, "read") as read_cache, patch.object( - black.Cache, "write" - ) as write_cache: + with ( + patch.object(black.Cache, "read") as read_cache, + patch.object(black.Cache, "write") as write_cache, + ): cmd = [str(src), "--diff"] if color: cmd.append("--color") @@ -2308,8 +2313,9 @@ def test_write_cache_creates_directory_if_needed(self) -> None: @event_loop() def test_failed_formatting_does_not_get_cached(self) -> None: mode = DEFAULT_MODE - with cache_dir() as workspace, patch( - "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor + with ( + cache_dir() as workspace, + patch("concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor), ): failing = (workspace / "failing.py").resolve() failing.write_text("not actually python", encoding="utf-8") @@ -2341,6 +2347,36 @@ def test_read_cache_line_lengths(self) -> None: two = black.Cache.read(short_mode) assert two.is_changed(path) + def test_cache_key(self) -> None: + # Test that all members of the mode enum affect the cache key. + for field in fields(Mode): + values: List[Any] + if field.name == "target_versions": + values = [ + {TargetVersion.PY312}, + {TargetVersion.PY313}, + ] + elif field.name == "python_cell_magics": + values = [{"magic1"}, {"magic2"}] + elif field.name == "enabled_features": + # If you are looking to remove one of these features, just + # replace it with any other feature. + values = [ + {Preview.docstring_check_for_newline}, + {Preview.hex_codes_in_unicode_sequences}, + ] + elif field.type is bool: + values = [True, False] + elif field.type is int: + values = [1, 2] + else: + raise AssertionError( + f"Unhandled field type: {field.type} for field {field.name}" + ) + modes = [replace(DEFAULT_MODE, **{field.name: value}) for value in values] + keys = [mode.get_cache_key() for mode in modes] + assert len(set(keys)) == len(modes) + def assert_collected_sources( src: Sequence[Union[str, Path]], @@ -2537,6 +2573,12 @@ def test_gitignore_that_ignores_subfolders(self) -> None: expected = [target / "b.py"] assert_collected_sources([target], expected, root=root) + def test_gitignore_that_ignores_directory(self) -> None: + # If gitignore with a directory is in root + root = Path(DATA_DIR, "ignore_directory_gitignore_tests") + expected = [root / "z.py"] + assert_collected_sources([root], expected, root=root) + def test_empty_include(self) -> None: path = DATA_DIR / "include_exclude_tests" src = [path] diff --git a/tests/test_blackd.py b/tests/test_blackd.py index 59703036dc0..bef5eaec4f9 100644 --- a/tests/test_blackd.py +++ b/tests/test_blackd.py @@ -1,5 +1,5 @@ +import gc import re -from typing import TYPE_CHECKING, Any, Callable, TypeVar from unittest.mock import patch import pytest @@ -15,23 +15,14 @@ except ImportError as e: raise RuntimeError("Please install Black with the 'd' extra") from e -if TYPE_CHECKING: - F = TypeVar("F", bound=Callable[..., Any]) - - unittest_run_loop: Callable[[F], F] = lambda x: x -else: - try: - from aiohttp.test_utils import unittest_run_loop - except ImportError: - # unittest_run_loop is unnecessary and a no-op since aiohttp 3.8, and - # aiohttp 4 removed it. To maintain compatibility we can make our own - # no-op decorator. - def unittest_run_loop(func, *args, **kwargs): - return func - @pytest.mark.blackd class BlackDTestCase(AioHTTPTestCase): + def tearDown(self) -> None: + # Work around https://github.com/python/cpython/issues/124706 + gc.collect() + super().tearDown() + def test_blackd_main(self) -> None: with patch("blackd.web.run_app"): result = CliRunner().invoke(blackd.main, []) @@ -42,20 +33,17 @@ def test_blackd_main(self) -> None: async def get_application(self) -> web.Application: return blackd.make_app() - @unittest_run_loop async def test_blackd_request_needs_formatting(self) -> None: response = await self.client.post("/", data=b"print('hello world')") self.assertEqual(response.status, 200) self.assertEqual(response.charset, "utf8") self.assertEqual(await response.read(), b'print("hello world")\n') - @unittest_run_loop async def test_blackd_request_no_change(self) -> None: response = await self.client.post("/", data=b'print("hello world")\n') self.assertEqual(response.status, 204) self.assertEqual(await response.read(), b"") - @unittest_run_loop async def test_blackd_request_syntax_error(self) -> None: response = await self.client.post("/", data=b"what even ( is") self.assertEqual(response.status, 400) @@ -65,21 +53,18 @@ async def test_blackd_request_syntax_error(self) -> None: msg=f"Expected error to start with 'Cannot parse', got {repr(content)}", ) - @unittest_run_loop async def test_blackd_unsupported_version(self) -> None: response = await self.client.post( "/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "2"} ) self.assertEqual(response.status, 501) - @unittest_run_loop async def test_blackd_supported_version(self) -> None: response = await self.client.post( "/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "1"} ) self.assertEqual(response.status, 200) - @unittest_run_loop async def test_blackd_invalid_python_variant(self) -> None: async def check(header_value: str, expected_status: int = 400) -> None: response = await self.client.post( @@ -102,7 +87,6 @@ async def check(header_value: str, expected_status: int = 400) -> None: await check("pypy3.0") await check("jython3.4") - @unittest_run_loop async def test_blackd_pyi(self) -> None: source, expected = read_data("cases", "stub.py") response = await self.client.post( @@ -111,7 +95,6 @@ async def test_blackd_pyi(self) -> None: self.assertEqual(response.status, 200) self.assertEqual(await response.text(), expected) - @unittest_run_loop async def test_blackd_diff(self) -> None: diff_header = re.compile( r"(In|Out)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d" @@ -129,7 +112,6 @@ async def test_blackd_diff(self) -> None: actual = diff_header.sub(DETERMINISTIC_HEADER, actual) self.assertEqual(actual, expected) - @unittest_run_loop async def test_blackd_python_variant(self) -> None: code = ( "def f(\n" @@ -161,14 +143,12 @@ async def check(header_value: str, expected_status: int) -> None: await check("py34,py36", 204) await check("34", 204) - @unittest_run_loop async def test_blackd_line_length(self) -> None: response = await self.client.post( "/", data=b'print("hello")\n', headers={blackd.LINE_LENGTH_HEADER: "7"} ) self.assertEqual(response.status, 200) - @unittest_run_loop async def test_blackd_invalid_line_length(self) -> None: response = await self.client.post( "/", @@ -177,7 +157,6 @@ async def test_blackd_invalid_line_length(self) -> None: ) self.assertEqual(response.status, 400) - @unittest_run_loop async def test_blackd_skip_first_source_line(self) -> None: invalid_first_line = b"Header will be skipped\r\ni = [1,2,3]\nj = [1,2,3]\n" expected_result = b"Header will be skipped\r\ni = [1, 2, 3]\nj = [1, 2, 3]\n" @@ -191,19 +170,16 @@ async def test_blackd_skip_first_source_line(self) -> None: self.assertEqual(response.status, 200) self.assertEqual(await response.read(), expected_result) - @unittest_run_loop async def test_blackd_preview(self) -> None: response = await self.client.post( "/", data=b'print("hello")\n', headers={blackd.PREVIEW: "true"} ) self.assertEqual(response.status, 204) - @unittest_run_loop async def test_blackd_response_black_version_header(self) -> None: response = await self.client.post("/") self.assertIsNotNone(response.headers.get(blackd.BLACK_VERSION_HEADER)) - @unittest_run_loop async def test_cors_preflight(self) -> None: response = await self.client.options( "/", @@ -218,13 +194,11 @@ async def test_cors_preflight(self) -> None: self.assertIsNotNone(response.headers.get("Access-Control-Allow-Headers")) self.assertIsNotNone(response.headers.get("Access-Control-Allow-Methods")) - @unittest_run_loop async def test_cors_headers_present(self) -> None: response = await self.client.post("/", headers={"Origin": "*"}) self.assertIsNotNone(response.headers.get("Access-Control-Allow-Origin")) self.assertIsNotNone(response.headers.get("Access-Control-Expose-Headers")) - @unittest_run_loop async def test_preserves_line_endings(self) -> None: for data in (b"c\r\nc\r\n", b"l\nl\n"): # test preserved newlines when reformatted @@ -234,14 +208,12 @@ async def test_preserves_line_endings(self) -> None: response = await self.client.post("/", data=data) self.assertEqual(response.status, 204) - @unittest_run_loop async def test_normalizes_line_endings(self) -> None: for data, expected in ((b"c\r\nc\n", "c\r\nc\r\n"), (b"l\nl\r\n", "l\nl\n")): response = await self.client.post("/", data=data) self.assertEqual(await response.text(), expected) self.assertEqual(response.status, 200) - @unittest_run_loop async def test_single_character(self) -> None: response = await self.client.post("/", data="1") self.assertEqual(await response.text(), "1\n") diff --git a/tests/test_format.py b/tests/test_format.py index 9162c585c08..ade7761a029 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -87,4 +87,6 @@ def test_patma_invalid() -> None: with pytest.raises(black.parsing.InvalidInput) as exc_info: assert_format(source, expected, mode, minimum_version=(3, 10)) - exc_info.match("Cannot parse: 10:11") + exc_info.match( + "Cannot parse for target version Python 3.10: 10:11: case a := b:" + ) diff --git a/tests/test_ipynb.py b/tests/test_ipynb.py index 59897190304..bdc2f27fcdb 100644 --- a/tests/test_ipynb.py +++ b/tests/test_ipynb.py @@ -208,6 +208,22 @@ def test_cell_magic_with_custom_python_magic( assert result == expected_output +@pytest.mark.parametrize( + "src", + ( + " %%custom_magic \nx=2", + "\n\n%%custom_magic\nx=2", + "# comment\n%%custom_magic\nx=2", + "\n \n # comment with %%time\n\t\n %%custom_magic # comment \nx=2", + ), +) +def test_cell_magic_with_custom_python_magic_after_spaces_and_comments_noop( + src: str, +) -> None: + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + def test_cell_magic_nested() -> None: src = "%%time\n%%time\n2+2" result = format_cell(src, fast=True, mode=JUPYTER_MODE) diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py new file mode 100644 index 00000000000..3798a9b6a92 --- /dev/null +++ b/tests/test_tokenize.py @@ -0,0 +1,120 @@ +"""Tests for the blib2to3 tokenizer.""" + +import io +import sys +import textwrap +from dataclasses import dataclass +from typing import List + +import black +from blib2to3.pgen2 import token, tokenize + + +@dataclass +class Token: + type: str + string: str + start: tokenize.Coord + end: tokenize.Coord + + +def get_tokens(text: str) -> List[Token]: + """Return the tokens produced by the tokenizer.""" + readline = io.StringIO(text).readline + tokens: List[Token] = [] + + def tokeneater( + type: int, string: str, start: tokenize.Coord, end: tokenize.Coord, line: str + ) -> None: + tokens.append(Token(token.tok_name[type], string, start, end)) + + tokenize.tokenize(readline, tokeneater) + return tokens + + +def assert_tokenizes(text: str, tokens: List[Token]) -> None: + """Assert that the tokenizer produces the expected tokens.""" + actual_tokens = get_tokens(text) + assert actual_tokens == tokens + + +def test_simple() -> None: + assert_tokenizes( + "1", + [Token("NUMBER", "1", (1, 0), (1, 1)), Token("ENDMARKER", "", (2, 0), (2, 0))], + ) + assert_tokenizes( + "'a'", + [ + Token("STRING", "'a'", (1, 0), (1, 3)), + Token("ENDMARKER", "", (2, 0), (2, 0)), + ], + ) + assert_tokenizes( + "a", + [Token("NAME", "a", (1, 0), (1, 1)), Token("ENDMARKER", "", (2, 0), (2, 0))], + ) + + +def test_fstring() -> None: + assert_tokenizes( + 'f"x"', + [ + Token("FSTRING_START", 'f"', (1, 0), (1, 2)), + Token("FSTRING_MIDDLE", "x", (1, 2), (1, 3)), + Token("FSTRING_END", '"', (1, 3), (1, 4)), + Token("ENDMARKER", "", (2, 0), (2, 0)), + ], + ) + assert_tokenizes( + 'f"{x}"', + [ + Token("FSTRING_START", 'f"', (1, 0), (1, 2)), + Token("FSTRING_MIDDLE", "", (1, 2), (1, 2)), + Token("LBRACE", "{", (1, 2), (1, 3)), + Token("NAME", "x", (1, 3), (1, 4)), + Token("RBRACE", "}", (1, 4), (1, 5)), + Token("FSTRING_MIDDLE", "", (1, 5), (1, 5)), + Token("FSTRING_END", '"', (1, 5), (1, 6)), + Token("ENDMARKER", "", (2, 0), (2, 0)), + ], + ) + assert_tokenizes( + 'f"{x:y}"\n', + [ + Token(type="FSTRING_START", string='f"', start=(1, 0), end=(1, 2)), + Token(type="FSTRING_MIDDLE", string="", start=(1, 2), end=(1, 2)), + Token(type="LBRACE", string="{", start=(1, 2), end=(1, 3)), + Token(type="NAME", string="x", start=(1, 3), end=(1, 4)), + Token(type="OP", string=":", start=(1, 4), end=(1, 5)), + Token(type="FSTRING_MIDDLE", string="y", start=(1, 5), end=(1, 6)), + Token(type="RBRACE", string="}", start=(1, 6), end=(1, 7)), + Token(type="FSTRING_MIDDLE", string="", start=(1, 7), end=(1, 7)), + Token(type="FSTRING_END", string='"', start=(1, 7), end=(1, 8)), + Token(type="NEWLINE", string="\n", start=(1, 8), end=(1, 9)), + Token(type="ENDMARKER", string="", start=(2, 0), end=(2, 0)), + ], + ) + assert_tokenizes( + 'f"x\\\n{a}"\n', + [ + Token(type="FSTRING_START", string='f"', start=(1, 0), end=(1, 2)), + Token(type="FSTRING_MIDDLE", string="x\\\n", start=(1, 2), end=(2, 0)), + Token(type="LBRACE", string="{", start=(2, 0), end=(2, 1)), + Token(type="NAME", string="a", start=(2, 1), end=(2, 2)), + Token(type="RBRACE", string="}", start=(2, 2), end=(2, 3)), + Token(type="FSTRING_MIDDLE", string="", start=(2, 3), end=(2, 3)), + Token(type="FSTRING_END", string='"', start=(2, 3), end=(2, 4)), + Token(type="NEWLINE", string="\n", start=(2, 4), end=(2, 5)), + Token(type="ENDMARKER", string="", start=(3, 0), end=(3, 0)), + ], + ) + + +# Run "echo some code | python tests/test_tokenize.py" to generate test cases. +if __name__ == "__main__": + code = sys.stdin.read() + tokens = get_tokens(code) + text = f"assert_tokenizes({code!r}, {tokens!r})" + text = black.format_str(text, mode=black.Mode()) + print(textwrap.indent(text, " "))