From c9ef4065ff6324315a0c5c1ffef4b1cc83d34598 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Mon, 15 May 2023 22:07:04 -0400 Subject: [PATCH 1/2] feat: use tomllib for Python 3.11+ --- .pre-commit-config.yaml | 1 - pyproject.toml | 11 +++-- src/docformatter/__init__.py | 12 ++--- src/docformatter/configuration.py | 77 +++++++++++++++---------------- src/docformatter/format.py | 63 ++++++++----------------- src/docformatter/strings.py | 31 +++++++++---- src/docformatter/syntax.py | 22 +++------ 7 files changed, 96 insertions(+), 121 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7fb6809..ba6b688 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - - id: check-docstring-first - id: check-merge-conflict - id: check-toml - id: check-yaml diff --git a/pyproject.toml b/pyproject.toml index 1e03932..28899c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ include = ["LICENSE"] [tool.poetry.dependencies] python = "^3.7" charset_normalizer = "^3.0.0" -tomli = {version = "^2.0.0", optional = true} +tomli = {version = "^2.0.0", python = "<3.11", optional = true} untokenize = "^0.1.1" [tool.poetry.dev-dependencies] @@ -52,6 +52,7 @@ pylint = [ ] pytest = "^7.1.0" pytest-cov = "^4.0.0" +ruff = "^0.0.267" rstcheck = "^6.1.0" tox = "<4.0.0" Sphinx = [ @@ -134,7 +135,7 @@ show_missing = true output = 'coverage.xml' [tool.black] -line-length = 79 +line-length = 88 target-version = ['py37', 'py38', 'py39', 'py310', 'py311'] include = '\.pyi?$' exclude = ''' @@ -164,7 +165,7 @@ include_trailing_comma = true force_grid_wrap = 0 use_parentheses = true ensure_newline_before_comments = true -line_length = 79 +line_length = 88 [tool.tox] legacy_tox_ini = """ @@ -242,7 +243,7 @@ deps = charset_normalizer pycodestyle pydocstyle - pylint + ruff rstcheck toml untokenize @@ -252,6 +253,6 @@ commands = docformatter --black --recursive {toxinidir}/src/docformatter pycodestyle --exclude=.git,.tox,*.pyc,*.pyo,build,dist,*.egg-info,config,docs,locale,tests,tools --ignore=C326,C330,E121,E123,E126,E133,E203,E242,E265,E402,W503,W504 --format=pylint --max-line-length=88 {toxinidir}/src/docformatter pydocstyle {toxinidir}/src/docformatter - pylint --rcfile={toxinidir}/pyproject.toml {toxinidir}/src/docformatter + ruff check --select "PL" --select "F" {toxinidir}/src/docformatter rstcheck --report-level=1 {toxinidir}/README.rst """ diff --git a/src/docformatter/__init__.py b/src/docformatter/__init__.py index 876b013..6788346 100644 --- a/src/docformatter/__init__.py +++ b/src/docformatter/__init__.py @@ -27,11 +27,11 @@ # docformatter Local Imports from .__pkginfo__ import __version__ -from .strings import * -from .syntax import * -from .util import * +from .strings import * # noqa F403 +from .syntax import * # noqa F403 +from .util import * # noqa F403 # Have isort skip these they require the functions above. -from .configuration import Configurater # isort: skip -from .encode import Encoder # isort: skip -from .format import Formatter, FormatResult # isort: skip +from .configuration import Configurater # isort: skip # noqa F401 +from .encode import Encoder # isort: skip # noqa F401 +from .format import Formatter, FormatResult # isort: skip # noqa F401 diff --git a/src/docformatter/configuration.py b/src/docformatter/configuration.py index ea26e29..939102d 100644 --- a/src/docformatter/configuration.py +++ b/src/docformatter/configuration.py @@ -23,19 +23,28 @@ # SOFTWARE. """This module provides docformatter's Configurater class.""" + # Standard Library Imports import argparse +import contextlib import os +import sys from configparser import ConfigParser from typing import Dict, List, Union -try: - # Third Party Imports - import tomli +TOMLLIB_INSTALLED = False +TOMLI_INSTALLED = False +with contextlib.suppress(ImportError): + if sys.version_info >= (3, 11): + # Standard Library Imports + import tomllib + + TOMLLIB_INSTALLED = True + else: + # Third Party Imports + import tomli - TOMLI_INSTALLED = True -except ImportError: - TOMLI_INSTALLED = False + TOMLI_INSTALLED = True # docformatter Package Imports from docformatter import __pkginfo__ @@ -44,7 +53,7 @@ class Configurater: """Read and store all the docformatter configuration information.""" - parser = None + parser: argparse.ArgumentParser = argparse.ArgumentParser() """Parser object.""" flargs_dct: Dict[str, Union[bool, float, int, str]] = {} @@ -57,7 +66,7 @@ class Configurater: ] """List of supported configuration files.""" - args: argparse.Namespace = None + args: argparse.Namespace = argparse.Namespace() def __init__(self, args: List[Union[bool, int, str]]) -> None: """Initialize a Configurater class instance. @@ -75,9 +84,7 @@ def __init__(self, args: List[Union[bool, int, str]]) -> None: ) try: - self.config_file = self.args_lst[ - self.args_lst.index("--config") + 1 - ] + self.config_file = self.args_lst[self.args_lst.index("--config") + 1] except ValueError: for _configuration_file in self.configuration_file_lst: if os.path.isfile(_configuration_file): @@ -116,8 +123,7 @@ def do_parse_arguments(self) -> None: "-r", "--recursive", action="store_true", - default=self.flargs_dct.get("recursive", "false").lower() - == "true", + default=self.flargs_dct.get("recursive", "false").lower() == "true", help="drill down directories recursively", ) self.parser.add_argument( @@ -163,13 +169,11 @@ def do_parse_arguments(self) -> None: "--style", default=self.flargs_dct.get("style", "sphinx"), help="name of the docstring style to use when formatting " - "parameter lists (default: sphinx)", + "parameter lists (default: sphinx)", ) self.parser.add_argument( "--wrap-summaries", - default=int( - self.flargs_dct.get("wrap-summaries", _default_wrap_summaries) - ), + default=int(self.flargs_dct.get("wrap-summaries", _default_wrap_summaries)), type=int, metavar="length", help="wrap long summary lines at this length; " @@ -179,9 +183,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--wrap-descriptions", default=int( - self.flargs_dct.get( - "wrap-descriptions", _default_wrap_descriptions - ) + self.flargs_dct.get("wrap-descriptions", _default_wrap_descriptions) ), type=int, metavar="length", @@ -192,8 +194,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--force-wrap", action="store_true", - default=self.flargs_dct.get("force-wrap", "false").lower() - == "true", + default=self.flargs_dct.get("force-wrap", "false").lower() == "true", help="force descriptions to be wrapped even if it may " "result in a mess (default: False)", ) @@ -228,15 +229,12 @@ def do_parse_arguments(self) -> None: "pre-summary-space", _default_pre_summary_space ).lower() == "true", - help="add a space after the opening triple quotes " - "(default: False)", + help="add a space after the opening triple quotes " "(default: False)", ) self.parser.add_argument( "--make-summary-multi-line", action="store_true", - default=self.flargs_dct.get( - "make-summary-multi-line", "false" - ).lower() + default=self.flargs_dct.get("make-summary-multi-line", "false").lower() == "true", help="add a newline before and after the summary of a one-line " "docstring (default: False)", @@ -244,9 +242,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--close-quotes-on-newline", action="store_true", - default=self.flargs_dct.get( - "close-quotes-on-newline", "false" - ).lower() + default=self.flargs_dct.get("close-quotes-on-newline", "false").lower() == "true", help="place closing triple quotes on a new-line when a " "one-line docstring wraps to two or more lines " @@ -275,8 +271,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--non-strict", action="store_true", - default=self.flargs_dct.get("non-strict", "false").lower() - == "true", + default=self.flargs_dct.get("non-strict", "false").lower() == "true", help="don't strictly follow reST syntax to identify lists (see " "issue #67) (default: False)", ) @@ -309,9 +304,7 @@ def do_parse_arguments(self) -> None: if self.args.length_range: if self.args.length_range[0] <= 0: - self.parser.error( - "--docstring-length must be positive numbers" - ) + self.parser.error("--docstring-length must be positive numbers") if self.args.length_range[0] > self.args.length_range[1]: self.parser.error( "First value of --docstring-length should be less " @@ -328,7 +321,11 @@ def _do_read_configuration_file(self) -> None: fullpath, ext = os.path.splitext(self.config_file) filename = os.path.basename(fullpath) - if ext == ".toml" and TOMLI_INSTALLED and filename == "pyproject": + if ( + ext == ".toml" + and (TOMLI_INSTALLED or TOMLLIB_INSTALLED) + and filename == "pyproject" + ): self._do_read_toml_configuration() if (ext == ".cfg" and filename == "setup") or ( @@ -339,13 +336,15 @@ def _do_read_configuration_file(self) -> None: def _do_read_toml_configuration(self) -> None: """Load configuration information from a *.toml file.""" with open(self.config_file, "rb") as f: - config = tomli.load(f) + if TOMLI_INSTALLED: + config = tomli.load(f) + elif TOMLLIB_INSTALLED: + config = tomllib.load(f) result = config.get("tool", {}).get("docformatter", None) if result is not None: self.flargs_dct = { - k: v if isinstance(v, list) else str(v) - for k, v in result.items() + k: v if isinstance(v, list) else str(v) for k, v in result.items() } def _do_read_parser_configuration(self) -> None: diff --git a/src/docformatter/format.py b/src/docformatter/format.py index 8881298..6731bb9 100644 --- a/src/docformatter/format.py +++ b/src/docformatter/format.py @@ -65,7 +65,7 @@ def _do_remove_blank_lines_after_definitions( definition removed. """ for _idx, _token in enumerate(modified_tokens): - if _token[0] == 3: + if _token[0] == 3: # noqa PLR2004 j = 1 # Remove newline between variable definition and docstring @@ -75,9 +75,7 @@ def _do_remove_blank_lines_after_definitions( # * The import section. while ( modified_tokens[_idx - j][4] == "\n" - and not ( - modified_tokens[_idx - j - 1][4].strip().endswith('"""') - ) + and not (modified_tokens[_idx - j - 1][4].strip().endswith('"""')) and not modified_tokens[_idx - j - 1][4].startswith("#!/") and "import" not in modified_tokens[_idx - j - 1][4] ): @@ -120,12 +118,8 @@ def _do_remove_blank_lines_after_docstring(modified_tokens): _num_blank_lines += 1 with contextlib.suppress(IndexError): - _is_definition = ( - _token[4].lstrip().startswith(("class ", "def ", "@")) - ) - _is_docstring = ( - modified_tokens[_idx - 2][4].strip().endswith('"""') - ) + _is_definition = _token[4].lstrip().startswith(("class ", "def ", "@")) + _is_docstring = modified_tokens[_idx - 2][4].strip().endswith('"""') _after_definition = ( modified_tokens[_idx - _num_blank_lines - 4][4] .lstrip() @@ -134,9 +128,7 @@ def _do_remove_blank_lines_after_docstring(modified_tokens): _after_docstring = modified_tokens[_idx - 5][4].strip().endswith( '"""' ) or modified_tokens[_idx - 5][4].strip().startswith('"""') - _comment_follows = re.search( - r"\"\"\" *#", modified_tokens[_idx - 4][4] - ) + _comment_follows = re.search(r"\"\"\" *#", modified_tokens[_idx - 4][4]) if ( _token[0] == 1 @@ -337,9 +329,7 @@ def _do_format_code(self, source): The text from the source file. """ try: - _original_newline = self.encodor.do_find_newline( - source.splitlines(True) - ) + _original_newline = self.encodor.do_find_newline(source.splitlines(True)) _code = self._format_code(source) return _strings.normalize_line_endings( @@ -371,9 +361,7 @@ def _format_code( assert self.args.line_range[0] > 0 and self.args.line_range[1] > 0 if self.args.length_range is not None: - assert ( - self.args.length_range[0] > 0 and self.args.length_range[1] > 0 - ) + assert self.args.length_range[0] > 0 and self.args.length_range[1] > 0 modified_tokens = [] @@ -389,6 +377,7 @@ def _format_code( end, line, ) in tokenize.generate_tokens(sio.readline): + _token_string = token_string if ( token_type == tokenize.STRING and token_string.startswith(self.QUOTE_TYPES) @@ -397,15 +386,13 @@ def _format_code( or previous_token_type == tokenize.NEWLINE or only_comments_so_far ) - and _util.is_in_range( - self.args.line_range, start[0], end[0] - ) + and _util.is_in_range(self.args.line_range, start[0], end[0]) and _util.has_correct_length( self.args.length_range, start[0], end[0] ) ): indentation = " " * (len(line) - len(line.lstrip())) - token_string = self._do_format_docstring( + _token_string = self._do_format_docstring( indentation, token_string, ) @@ -418,22 +405,16 @@ def _format_code( only_comments_so_far = False previous_token_type = token_type - modified_tokens.append( - (token_type, token_string, start, end, line) - ) + modified_tokens.append((token_type, _token_string, start, end, line)) - modified_tokens = _do_remove_blank_lines_after_definitions( - modified_tokens - ) - modified_tokens = _do_remove_blank_lines_after_docstring( - modified_tokens - ) + modified_tokens = _do_remove_blank_lines_after_definitions(modified_tokens) + modified_tokens = _do_remove_blank_lines_after_docstring(modified_tokens) return untokenize.untokenize(modified_tokens) except tokenize.TokenError: return source - def _do_format_docstring( + def _do_format_docstring( # noqa PLR0911 self, indentation: str, docstring: str, @@ -479,10 +460,7 @@ def _do_format_docstring( summary, description = _strings.split_summary_and_description(contents) # Leave docstrings with underlined summaries alone. - if ( - _syntax.remove_section_header(description).strip() - != description.strip() - ): + if _syntax.remove_section_header(description).strip() != description.strip(): return docstring if not self.args.force_wrap and ( @@ -595,13 +573,9 @@ def _do_format_multiline_docstring( # Compensate for triple quotes by temporarily prepending 3 spaces. # This temporary prepending is undone below. initial_indent = ( - indentation - if self.args.pre_summary_newline - else 3 * " " + indentation - ) - pre_summary = ( - "\n" + indentation if self.args.pre_summary_newline else "" + indentation if self.args.pre_summary_newline else 3 * " " + indentation ) + pre_summary = "\n" + indentation if self.args.pre_summary_newline else "" summary = _syntax.wrap_summary( _strings.normalize_summary(summary, self.args.non_cap), wrap_length=self.args.wrap_summaries, @@ -659,6 +633,5 @@ def _do_strip_docstring(self, docstring: str) -> Tuple[str, str]: ].strip(), quote.replace("'", '"') raise ValueError( - "docformatter only handles triple-quoted (single or double) " - "strings" + "docformatter only handles triple-quoted (single or double) " "strings" ) diff --git a/src/docformatter/strings.py b/src/docformatter/strings.py index bb7344d..b86b38b 100644 --- a/src/docformatter/strings.py +++ b/src/docformatter/strings.py @@ -27,10 +27,10 @@ # Standard Library Imports import contextlib import re -from typing import List +from typing import List, Match, Optional, Union -def find_shortest_indentation(lines): +def find_shortest_indentation(lines: List[str]) -> str: """Determine the shortest indentation in a list of lines. Parameters @@ -58,17 +58,17 @@ def find_shortest_indentation(lines): return indentation or "" -def is_probably_beginning_of_sentence(line): +def is_probably_beginning_of_sentence(line: str) -> Union[Match[str], None, bool]: """Determine if the line begins a sentence. Parameters ---------- - line: + line : str The line to be tested. Returns ------- - is_beginning: bool + is_beginning : bool True if this token is the beginning of a sentence. """ # Check heuristically for a parameter list. @@ -83,10 +83,22 @@ def is_probably_beginning_of_sentence(line): return is_beginning_of_sentence and not is_pydoc_ref -def normalize_line(line, newline): +def normalize_line(line: str, newline: str) -> str: """Return line with fixed ending, if ending was present in line. Otherwise, does nothing. + + Parameters + ---------- + line : str + The line to normalize. + newline : str + The newline character to use for line endings. + + Returns + ------- + normalized_line : str + The supplied line with line endings replaced by the newline. """ stripped = line.rstrip("\n\r") return stripped + newline if stripped != line else line @@ -100,7 +112,7 @@ def normalize_line_endings(lines, newline): return "".join([normalize_line(line, newline) for line in lines]) -def normalize_summary(summary: str, noncap: List[str] = None) -> str: +def normalize_summary(summary: str, noncap: Optional[List[str]] = None) -> str: """Return normalized docstring summary. A normalized docstring summary will have the first word capitalized and @@ -161,7 +173,7 @@ def split_first_sentence(text): while rest: split = re.split(r"(\s)", rest, maxsplit=1) word = split[0] - if len(split) == 3: + if len(split) == 3: # noqa PLR2004 delimiter = split[1] rest = split[2] else: @@ -211,8 +223,7 @@ def split_summary_and_description(contents): if split[0].strip() and split[1].strip(): return ( split[0].strip(), - find_shortest_indentation(split[1].splitlines()[1:]) - + split[1].strip(), + find_shortest_indentation(split[1].splitlines()[1:]) + split[1].strip(), ) return contents, "" diff --git a/src/docformatter/syntax.py b/src/docformatter/syntax.py index dc28209..e2896fa 100644 --- a/src/docformatter/syntax.py +++ b/src/docformatter/syntax.py @@ -297,9 +297,7 @@ def do_skip_link(text: str, index: Tuple[int, int]) -> bool: _do_skip = re.search(URL_SKIP_REGEX, text[index[0] : index[1]]) is not None with contextlib.suppress(IndexError): - _do_skip = _do_skip or ( - text[index[0]] == "<" and text[index[1]] != ">" - ) + _do_skip = _do_skip or (text[index[0]] == "<" and text[index[1]] != ">") return _do_skip @@ -371,9 +369,7 @@ def do_split_description( # Finally, add everything after the last field list directive. with contextlib.suppress(IndexError): _text = ( - text[_text_idx + 1 :] - if text[_text_idx] == "\n" - else text[_text_idx:] + text[_text_idx + 1 :] if text[_text_idx] == "\n" else text[_text_idx:] ).splitlines() for _idx, _line in enumerate(_text): if _line not in ["", "\n", f"{indentation}"]: @@ -442,7 +438,8 @@ def do_wrap_parameter_lists( # noqa: PLR0913 lines.extend( textwrap.wrap( textwrap.dedent( - f"{text[_parameter[0]:_parameter[1]]} {_parameter_description}" + f"{text[_parameter[0]:_parameter[1]]} " + f"{_parameter_description.replace(2*indentation, '')}" ), width=wrap_length, initial_indent=indentation, @@ -507,9 +504,7 @@ def do_wrap_urls( _lines.pop(-1) # Add the URL. - _lines.append( - f"{do_clean_url(text[_url[0] : _url[1]], indentation)}" - ) + _lines.append(f"{do_clean_url(text[_url[0] : _url[1]], indentation)}") text_idx = _url[1] @@ -545,8 +540,7 @@ def is_some_sort_of_list( # Very large number of lines but short columns probably means a list of # items. if ( - len(split_lines) - / max([len(line.strip()) for line in split_lines] + [1]) + len(split_lines) / max([len(line.strip()) for line in split_lines] + [1]) > HEURISTIC_MIN_LIST_ASPECT_RATIO ) and not strict: return True @@ -680,9 +674,7 @@ def strip_leading_blank_lines(text): """Return text with leading blank lines removed.""" split = text.splitlines() - found = next( - (index for index, line in enumerate(split) if line.strip()), 0 - ) + found = next((index for index, line in enumerate(split) if line.strip()), 0) return "\n".join(split[found:]) From f2b15c94026c75e66e714d88af02f3ffdbb75c94 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Mon, 15 May 2023 22:15:11 -0400 Subject: [PATCH 2/2] doc: update documentation for installing with TOML support --- README.rst | 3 ++- docs/source/installation.rst | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index c20b2fa..4c1f12a 100644 --- a/README.rst +++ b/README.rst @@ -77,7 +77,8 @@ From pip:: $ pip install --upgrade docformatter -Or, if you want to use pyproject.toml to configure docformatter:: +Or, if you want to use pyproject.toml to configure docformatter and you're using +Python < 3.11:: $ pip install --upgrade docformatter[tomli] diff --git a/docs/source/installation.rst b/docs/source/installation.rst index 5134793..158a4cd 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -19,6 +19,9 @@ to install with TOML support: $ pip install --upgrade docformatter[tomli] +This is only necessary if you are using Python < 3.11. Beginning with Python 3.11, +docformatter will utilize ``tomllib`` from the standard library. + Install from GitHub -------------------