From 257c50712bc61a8732eda72d07d14f68e8d7fa06 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Mon, 12 Apr 2021 13:03:41 +0100 Subject: [PATCH 01/13] python: Integrate linting tools Signed-off-by: Ryan Northey --- .gitignore | 1 + .yapfignore | 7 + bazel/repositories_extra.bzl | 7 - ci/do_ci.sh | 8 +- ci/format_pre.sh | 12 +- tools/base/BUILD | 2 + tools/base/checker.py | 361 +++++++++ tools/base/tests/test_checker.py | 786 +++++++++++++++++++ tools/base/tests/test_utils.py | 74 ++ tools/base/utils.py | 42 +- tools/code_format/BUILD | 11 +- tools/code_format/python_check.py | 124 +++ tools/code_format/python_flake8.py | 19 - tools/code_format/tests/test_python_check.py | 377 +++++++++ tools/testing/plugin.py | 1 - 15 files changed, 1785 insertions(+), 47 deletions(-) create mode 100644 .yapfignore create mode 100644 tools/base/checker.py create mode 100644 tools/base/tests/test_checker.py create mode 100755 tools/code_format/python_check.py delete mode 100644 tools/code_format/python_flake8.py create mode 100644 tools/code_format/tests/test_python_check.py diff --git a/.gitignore b/.gitignore index ae1f29656b59..94d0778d9287 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ cmake-build-debug /linux bazel.output.txt *~ +.coverage diff --git a/.yapfignore b/.yapfignore new file mode 100644 index 000000000000..0409026f278a --- /dev/null +++ b/.yapfignore @@ -0,0 +1,7 @@ +*generated* +*venv* +*protos* +*~ +*_pb2.py +*tests* +*#* diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 9b4274271d3a..bda3919952dd 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -6,13 +6,6 @@ load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_fetch_ def _python_deps(): py_repositories() - # REMOVE!!! - pip_install( - name = "sometools_pip3", - requirements = "@envoy//tools/sometools:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) - pip_install( name = "config_validation_pip3", requirements = "@envoy//tools/config_validation:requirements.txt", diff --git a/ci/do_ci.sh b/ci/do_ci.sh index c9421a0c90ae..f906a04bbd7b 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -7,9 +7,9 @@ set -e build_setup_args="" if [[ "$1" == "format_pre" || "$1" == "fix_format" || "$1" == "check_format" || "$1" == "check_repositories" || \ - "$1" == "check_spelling" || "$1" == "fix_spelling" || "$1" == "bazel.clang_tidy" || \ - "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" ]]; then - build_setup_args="-nofetch" + "$1" == "check_spelling" || "$1" == "fix_spelling" || "$1" == "bazel.clang_tidy" || "$1" == "tooling" || \ + "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" ]]; then + build_setup_args="-nofetch" fi # TODO(phlax): Clarify and/or integrate SRCDIR and ENVOY_SRCDIR @@ -470,8 +470,10 @@ elif [[ "$CI_TARGET" == "tooling" ]]; then echo "Run pytest tooling tests..." bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_pytest -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_coverage -- --cov-collect /tmp/.coverage-envoy + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_checker -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_runner -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_utils -- --cov-collect /tmp/.coverage-envoy + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:pytest_python_check -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:python_coverage -- --fail-under=95 /tmp/.coverage-envoy /source/generated/tooling exit 0 elif [[ "$CI_TARGET" == "verify_examples" ]]; then diff --git a/ci/format_pre.sh b/ci/format_pre.sh index 86ae89d484e2..92df44517097 100755 --- a/ci/format_pre.sh +++ b/ci/format_pre.sh @@ -43,18 +43,8 @@ CURRENT=shellcheck CURRENT=configs bazel run "${BAZEL_BUILD_OPTIONS[@]}" //configs:example_configs_validation -# TODO(phlax): update to use bazel and python_flake8/python_check -# this will simplify this code to a single line CURRENT=python -"${ENVOY_SRCDIR}"/tools/code_format/format_python_tools.sh check || { - "${ENVOY_SRCDIR}"/tools/code_format/format_python_tools.sh fix - git diff HEAD | tee "${DIFF_OUTPUT}" - raise () { - # this throws an error without exiting - return 1 - } - raise -} +bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix "$(pwd)" if [[ "${#FAILED[@]}" -ne "0" ]]; then echo "TESTS FAILED:" >&2 diff --git a/tools/base/BUILD b/tools/base/BUILD index c5656e230a90..3dff32bc6aa0 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -5,6 +5,8 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_py_library("tools.base.checker") + envoy_py_library("tools.base.runner") envoy_py_library("tools.base.utils") diff --git a/tools/base/checker.py b/tools/base/checker.py new file mode 100644 index 000000000000..03f78f4e6eba --- /dev/null +++ b/tools/base/checker.py @@ -0,0 +1,361 @@ +import argparse +import logging +import os +import subprocess +import sys +from functools import cached_property + +LOG_LEVELS = (("info", logging.INFO), ("debug", logging.DEBUG), ("warn", logging.WARN), + ("error", logging.ERROR)) + + +class BazelRunError(Exception): + pass + + +class LogFilter(logging.Filter): + + def filter(self, rec): + return rec.levelno in (logging.DEBUG, logging.INFO) + + +class Checker(object): + """Runs check methods prefixed with `check_` and named in `self.checks` + + check methods should return the count of errors and log warnings and errors + """ + checks = () + + def __init__(self, *args): + self._args = args + self.success = {} + self.errors = {} + self.warnings = {} + + @cached_property + def args(self) -> argparse.Namespace: + """Parsed args""" + return self.parser.parse_args(self._args) + + @property + def diff(self) -> bool: + """Flag to determine whether the checker should print diffs to the console""" + return self.args.diff + + @property + def error_count(self) -> int: + """Count of all errors found""" + return sum(len(e) for e in self.errors.values()) + + @property + def failed(self) -> dict: + """Dictionary of errors per check""" + return dict((k, (len(v))) for k, v in self.errors.items()) + + @property + def fix(self) -> bool: + """Flag to determine whether the checker should attempt to fix found problems""" + return self.args.fix + + @property + def has_failed(self) -> bool: + """Shows whether there are any failures""" + # add logic for warn/error + return self.failed or self.warned + + @cached_property + def log(self) -> logging.Logger: + """Instantiated logger""" + logger = logging.getLogger(self.name) + logger.setLevel(self.log_level) + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setLevel(logging.DEBUG) + stdout_handler.addFilter(LogFilter()) + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setLevel(logging.WARN) + logger.addHandler(stdout_handler) + logger.addHandler(stderr_handler) + return logger + + @cached_property + def log_level(self) -> int: + """Log level parsed from args""" + return dict(LOG_LEVELS)[self.args.log_level] + + @property + def name(self) -> bool: + """Name of the checker""" + return self.__class__.__name__ + + @cached_property + def parser(self) -> argparse.ArgumentParser: + """Argparse parser""" + parser = argparse.ArgumentParser() + self.add_arguments(parser) + return parser + + @cached_property + def path(self) -> str: + """The "path" - usually Envoy src dir. This is used for finding configs for the tooling and should be a dir""" + try: + path = self.args.path or self.args.paths[0] + except IndexError: + raise self.parser.error( + "Missing path: `path` must be set either as an arg or with --path") + if not os.path.isdir(path): + raise self.parser.error( + "Incorrect path: `path` must be a directory, set either as first arg or with --path" + ) + return path + + @property + def paths(self) -> list: + """List of paths to apply checks to""" + return self.args.paths or [self.path] + + @property + def recurse(self) -> bool: + """Flag to determine whether to apply checks recursively (where appropriate)""" + return self.args.recurse + + @property + def show_summary(self) -> bool: + """Show a summary at the end or not""" + return bool(self.args.summary or self.error_count or self.warning_count) + + @property + def status(self) -> dict: + """Dictionary showing current success/warnings/errors""" + return dict( + success=self.success_count, + errors=self.error_count, + warnings=self.warning_count, + failed=self.failed, + warned=self.warned, + succeeded=self.succeeded) + + @property + def succeeded(self) -> dict: + """Dictionary of successful checks grouped by check type""" + return dict((k, (len(v))) for k, v in self.success.items()) + + @property + def success_count(self) -> int: + """Current count of successful checks""" + return sum(len(e) for e in self.success.values()) + + @cached_property + def summary(self) -> bool: + """Instance of the checker's summary class""" + return self.summary_class(self) + + @property + def summary_class(self): + """Checker's summary class""" + return CheckerSummary + + @property + def warned(self) -> dict: + """Dictionary of warned checks grouped by check type""" + return dict((k, (len(v))) for k, v in self.warnings.items()) + + @property + def warning_count(self) -> int: + """Current count of warned checks""" + return sum(len(e) for e in self.warnings.values()) + + def add_arguments(self, parser: argparse.ArgumentParser) -> None: + """Add arguments to the arg parser""" + parser.add_argument( + "--fix", action="store_true", default=False, help="Attempt to fix in place") + parser.add_argument( + "--diff", + action="store_true", + default=False, + help="Display a diff in the console where available") + parser.add_argument( + "--recurse", + "-r", + choices=["yes", "no"], + default="yes", + help="Recurse path or paths directories") + parser.add_argument( + "--warning", + "-w", + choices=["warn", "error"], + default="warn", + help="Handle warnings as warnings or errors") + parser.add_argument( + "--summary", action="store_true", default=False, help="Show a summary of check runs") + parser.add_argument( + "--summary-errors", + type=int, + default=5, + help="Number of errors to show in the summary, -1 shows all") + parser.add_argument( + "--summary-warnings", + type=int, + default=5, + help="Number of warnings to show in the summary, -1 shows all") + parser.add_argument( + "--check", + "-c", + choices=self.checks, + nargs="*", + help="Specify which checks to run, can be specified for multiple checks") + for check in self.checks: + parser.add_argument( + f"--config-{check}", default="", help=f"Custom configuration for the {check} check") + parser.add_argument( + "--path", + "-p", + default=None, + help= + "Path to the test root (usually Envoy source dir). If not specified the first path of paths is used" + ) + parser.add_argument( + "--log-level", + "-l", + choices=["info", "warn", "debug", "error"], + default="info", + help="Log level to display") + parser.add_argument( + "paths", + nargs="*", + help= + "Paths to check. At least one path must be specified, or the `path` argument should be provided" + ) + + def error(self, name: str, errors: list, log: bool = True) -> None: + """Record (and log) errors for a check type""" + self.errors[name] = self.errors.get(name, []) + self.errors[name].extend(errors) + if log: + self.log.error("\n".join(errors)) + return 1 + + def get_checks(self) -> list: + """Get list of checks for this checker class filtered according to user args""" + return ( + self.checks if not self.args.check else + [check for check in self.args.check if check in self.checks]) + + def on_check_run(self, check: str) -> None: + """Callback hook called after each check run""" + pass + + def on_checks_begin(self) -> None: + """Callback hook called before all checks""" + pass + + def on_checks_complete(self) -> int: + """Callback hook called after all checks have run, and returning the final outcome of a checks_run""" + if self.show_summary: + self.summary.print_summary() + return 1 if self.has_failed else 0 + + def run_checks(self) -> int: + """Run all configured checks and return the sum of their error counts""" + checks = self.get_checks() + self.on_checks_begin() + for check in checks: + self.log.info(f"[CHECKS:{self.name}] {check}") + getattr(self, f"check_{check}")() + self.on_check_run(check) + return self.on_checks_complete() + + def succeed(self, name: str, success: list, log: bool = True) -> None: + """Record (and log) success for a check type""" + + self.success[name] = self.success.get(name, []) + self.success[name].extend(success) + if log: + self.log.info("\n".join(success)) + + def warn(self, name: str, warnings: list, log: bool = True) -> None: + """Record (and log) warnings for a check type""" + + self.warnings[name] = self.warnings.get(name, []) + self.warnings[name].extend(warnings) + if log: + self.log.warning("\n".join(warnings)) + + +class ForkingChecker(Checker): + + def fork(self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess: + """Fork a subprocess, using self.path as the cwd by default""" + kwargs["cwd"] = kwargs.get("cwd", self.path) + return subprocess.run(*args, capture_output=capture_output, **kwargs) + + +class BazelChecker(ForkingChecker): + + def bazel_query(self, query: str) -> list: + """Run a bazel query and return stdout as list of lines""" + resp = self.fork(["bazel", "query", f"'{query}'"]) + if resp.returncode: + raise BazelRunError(f"Bazel query failed: {resp}") + return resp.stdout.decode("utf-8").split("\n") + + def bazel_run( + self, + target: str, + *args, + capture_output: bool = False, + cwd: str = "", + raises: bool = True) -> subprocess.CompletedProcess: + """Run a bazel target and return the subprocess response""" + args = (("--",) + args) if args else args + bazel_args = ("bazel", "run", target) + args + resp = self.fork(bazel_args, capture_output=capture_output, cwd=cwd or self.path) + if resp.returncode and raises: + raise BazelRunError(f"Bazel run failed: {resp}") + return resp + + +class CheckerSummary(object): + + def __init__(self, checker: Checker): + self.checker = checker + + @property + def max_errors(self) -> int: + """Maximum errors to display in summary""" + return self.checker.args.summary_errors + + @property + def max_warnings(self) -> int: + """Maximum warnings to display in summary""" + return self.checker.args.summary_warnings + + def print_failed(self, problem_type): + _out = [] + _max = getattr(self, f"max_{problem_type}") + for check, problems in getattr(self.checker, problem_type).items(): + _msg = f"[{problem_type.upper()}:{self.checker.name}] {check}" + _max = (min(len(problems), _max) if _max >= 0 else len(problems)) + msg = ( + f"{_msg}: (showing first {_max} of {len(problems)})" if + (len(problems) > _max and _max > 0) else (f"{_msg}:" if _max != 0 else _msg)) + _out.extend(self._section(msg, problems[:_max])) + if _out: + self.checker.log.error("\n".join(_out)) + + def print_status(self) -> None: + """Print summary status to stderr""" + self.checker.log.warning( + "\n".join(self._section(f"[SUMMARY:{self.checker.name}] {self.checker.status}"))) + + def print_summary(self) -> None: + """Write summary to stderr""" + self.print_failed("warnings") + self.print_failed("errors") + self.print_status() + + def _section(self, message: str, lines: list = None) -> list: + """Print a summary section""" + section = ["", "-" * 80, "", f"{message}"] + if lines: + section += lines + return section diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py new file mode 100644 index 000000000000..fd6160f55e6d --- /dev/null +++ b/tools/base/tests/test_checker.py @@ -0,0 +1,786 @@ +import logging +import sys +from unittest.mock import MagicMock, patch, PropertyMock + +import pytest + +from tools.base import pytest_checker +from tools.base.checker import ( + Checker, CheckerSummary, ForkingChecker, + LogFilter, LOG_LEVELS) + + +class DummyChecker(Checker): + + def __init__(self): + self.args = PropertyMock() + + +class DummyCheckerWithChecks(Checker): + checks = ("check1", "check2") + + def __init__(self, *args): + self.check1 = MagicMock() + self.check2 = MagicMock() + + def check_check1(self): + self.check1() + + def check_check2(self): + self.check2() + + +def test_pytest_checker(): + with patch("tools.base.pytest_checker.python_pytest") as m_pytest: + pytest_checker.main("arg1", "arg2", "arg3") + + assert ( + list(m_pytest.main.call_args) + == [('arg1', 'arg2', 'arg3', '--cov', 'tools.base'), {}]) + + +def test_checker_constructor(): + checker = Checker("path1", "path2", "path3") + assert checker._args == ("path1", "path2", "path3") + assert checker.summary_class == CheckerSummary + + +def test_checker_args(): + checker = Checker("path1", "path2", "path3") + parser_mock = patch( + "tools.base.checker.Checker.parser", + new_callable=PropertyMock) + + with parser_mock as m_parser: + assert checker.args == m_parser.return_value.parse_args.return_value + + assert ( + list(m_parser.return_value.parse_args.call_args) + == [(('path1', 'path2', 'path3'),), {}]) + assert "args" in checker.__dict__ + + +def test_checker_diff(): + checker = Checker("path1", "path2", "path3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.diff == m_args.return_value.diff + assert "diff" not in checker.__dict__ + + +def test_checker_error_count(): + checker = Checker("path1", "path2", "path3") + checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) + assert checker.error_count == 15 + assert "error_count" not in checker.__dict__ + + +def test_checker_failed(): + checker = Checker("path1", "path2", "path3") + checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) + assert checker.failed == {'foo': 3, 'bar': 5, 'baz': 7} + assert "failed" not in checker.__dict__ + + +def test_checker_fix(): + checker = Checker("path1", "path2", "path3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.fix == m_args.return_value.fix + assert "fix" not in checker.__dict__ + + +@pytest.mark.parametrize("failed", [True, False]) +@pytest.mark.parametrize("warned", [True, False]) +def test_checker_has_failed(patches, failed, warned): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.failed", dict(new_callable=PropertyMock)), + ("Checker.warned", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_failed, m_warned): + m_failed.return_value = failed + m_warned.return_value = warned + result = checker.has_failed + + if failed or warned: + assert result == True + else: + assert result == False + assert "has_failed" not in checker.__dict__ + + +def test_checker_log(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + "logging.getLogger", + "logging.StreamHandler", + "LogFilter", + ("Checker.log_level", dict(new_callable=PropertyMock)), + ("Checker.name", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_logger, m_stream, m_filter, m_level, m_name): + _loggers = (MagicMock(), MagicMock()) + m_stream.side_effect = _loggers + assert checker.log == m_logger.return_value + assert ( + list(m_logger.return_value.setLevel.call_args) + == [(m_level.return_value,), {}]) + assert ( + list(list(c) for c in m_stream.call_args_list) + == [[(sys.stdout,), {}], + [(sys.stderr,), {}]]) + assert ( + list(_loggers[0].setLevel.call_args) + == [(logging.DEBUG,), {}]) + assert ( + list(_loggers[0].addFilter.call_args) + == [(m_filter.return_value,), {}]) + assert ( + list(_loggers[1].setLevel.call_args) + == [(logging.WARN,), {}]) + assert ( + list(list(c) for c in m_logger.return_value.addHandler.call_args_list) + == [[(_loggers[0],), {}], [(_loggers[1],), {}]]) + assert "log" in checker.__dict__ + + +def test_checker_log_level(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + "dict", + ("Checker.args", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + with patched as (m_dict, m_args): + assert checker.log_level == m_dict.return_value.__getitem__.return_value + + assert ( + list(m_dict.call_args) + == [(LOG_LEVELS, ), {}]) + assert ( + list(m_dict.return_value.__getitem__.call_args) + == [(m_args.return_value.log_level,), {}]) + assert "log_level" in checker.__dict__ + + +def test_checker_name(): + checker = DummyChecker() + assert checker.name == checker.__class__.__name__ + assert "name" not in checker.__dict__ + + +def test_checker_parser(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + "argparse.ArgumentParser", + "Checker.add_arguments", + prefix="tools.base.checker") + with patched as (m_parser, m_add_args): + assert checker.parser == m_parser.return_value + assert ( + list(m_parser.call_args) + == [(), {}]) + assert ( + list(m_add_args.call_args) + == [(m_parser.return_value,), {}]) + assert "parser" in checker.__dict__ + + +@pytest.mark.parametrize("path", [None, "PATH"]) +@pytest.mark.parametrize("paths", [[], ["PATH0"]]) +@pytest.mark.parametrize("isdir", [True, False]) +def test_checker_path(patches, path, paths, isdir): + class DummyError(Exception): + pass + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.args", dict(new_callable=PropertyMock)), + ("Checker.parser", dict(new_callable=PropertyMock)), + "os.path.isdir", + prefix="tools.base.checker") + + with patched as (m_args, m_parser, m_isdir): + m_parser.return_value.error = DummyError + m_args.return_value.path = path + m_args.return_value.paths = paths + m_isdir.return_value = isdir + if not path and not paths: + with pytest.raises(DummyError) as e: + checker.path + assert ( + e.value.args + == ('Missing path: `path` must be set either as an arg or with --path',)) + elif not isdir: + with pytest.raises(DummyError) as e: + checker.path + assert ( + e.value.args + == ('Incorrect path: `path` must be a directory, set either as first arg or with --path',)) + else: + assert checker.path == path or paths[0] + assert "path" in checker.__dict__ + if path or paths: + assert ( + list(m_isdir.call_args) + == [(path or paths[0],), {}]) + + +@pytest.mark.parametrize("paths", [[], ["path1", "path2"]]) +def test_checker_paths(patches, paths): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.args", dict(new_callable=PropertyMock)), + ("Checker.path", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_args, m_path): + m_args.return_value.paths = paths + result = checker.paths + + if paths: + assert result == paths + else: + assert result == [m_path.return_value] + assert "paths" not in checker.__dict__ + + +def test_checker_recurse(): + checker = Checker("path1", "path2", "path3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.recurse == m_args.return_value.recurse + assert "recurse" not in checker.__dict__ + + +@pytest.mark.parametrize("summary", [True, False]) +@pytest.mark.parametrize("error_count", [0, 1]) +@pytest.mark.parametrize("warning_count", [0, 1]) +def test_checker_show_summary(patches, summary, error_count, warning_count): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.args", dict(new_callable=PropertyMock)), + ("Checker.error_count", dict(new_callable=PropertyMock)), + ("Checker.warning_count", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_args, m_errors, m_warnings): + m_args.return_value.summary = summary + m_errors.return_value = error_count + m_warnings.return_value = warning_count + result = checker.show_summary + + if summary or error_count or warning_count: + assert result == True + else: + assert result == False + assert "show_summary" not in checker.__dict__ + + +def test_checker_status(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.success_count", dict(new_callable=PropertyMock)), + ("Checker.error_count", dict(new_callable=PropertyMock)), + ("Checker.warning_count", dict(new_callable=PropertyMock)), + ("Checker.failed", dict(new_callable=PropertyMock)), + ("Checker.warned", dict(new_callable=PropertyMock)), + ("Checker.succeeded", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as args: + (m_success_count, m_error_count, m_warning_count, + m_failed, m_warned, m_succeeded) = args + assert ( + checker.status + == dict( + success=m_success_count.return_value, + errors=m_error_count.return_value, + warnings=m_warning_count.return_value, + failed=m_failed.return_value, + warned=m_warned.return_value, + succeeded=m_succeeded.return_value)) + assert "status" not in checker.__dict__ + + +def test_checker_succeeded(): + checker = Checker("path1", "path2", "path3") + checker.success = dict( + foo=["check"] * 3, + bar=["check"] * 5, + baz=["check"] * 7) + assert ( + checker.succeeded + == dict(foo=3, bar=5, baz=7)) + assert "succeeded" not in checker.__dict__ + + +def test_checker_success_count(): + checker = Checker("path1", "path2", "path3") + checker.success = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) + assert checker.success_count == 15 + assert "success_count" not in checker.__dict__ + + +def test_checker_summary(): + checker = Checker("path1", "path2", "path3") + summary_mock = patch( + "tools.base.checker.Checker.summary_class", + new_callable=PropertyMock) + + with summary_mock as m_summary: + assert checker.summary == m_summary.return_value.return_value + + assert ( + list(m_summary.return_value.call_args) + == [(checker,), {}]) + assert "summary" in checker.__dict__ + + +def test_checker_warned(): + checker = Checker("path1", "path2", "path3") + checker.warnings = dict( + foo=["check"] * 3, + bar=["check"] * 5, + baz=["check"] * 7) + assert ( + checker.warned + == dict(foo=3, bar=5, baz=7)) + assert "warned" not in checker.__dict__ + + +def test_checker_warning_count(): + checker = Checker("path1", "path2", "path3") + checker.warnings = dict(foo=["warn"] * 3, bar=["warn"] * 5, baz=["warn"] * 7) + assert checker.warning_count == 15 + assert "warning_count" not in checker.__dict__ + + +def test_checker_add_arguments(): + checker = DummyCheckerWithChecks("path1", "path2", "path3") + parser = MagicMock() + checker.add_arguments(parser) + assert ( + list(list(c) for c in parser.add_argument.call_args_list) + == [[('--fix',), + {'action': 'store_true', + 'default': False, + 'help': 'Attempt to fix in place'}], + [('--diff',), + {'action': 'store_true', + 'default': False, + 'help': 'Display a diff in the console where available'}], + [('--recurse', '-r'), + {'choices': ['yes', 'no'], + 'default': 'yes', + 'help': 'Recurse path or paths directories'}], + [('--warning', '-w'), + {'choices': ['warn', 'error'], + 'default': 'warn', + 'help': 'Handle warnings as warnings or errors'}], + [('--summary',), + {'action': 'store_true', + 'default': False, + 'help': 'Show a summary of check runs'}], + [('--summary-errors',), + {'type': int, + 'default': 5, + 'help': 'Number of errors to show in the summary, -1 shows all'}], + [('--summary-warnings',), + {'type': int, + 'default': 5, + 'help': 'Number of warnings to show in the summary, -1 shows all'}], + [('--check', '-c'), + {'choices': ("check1", "check2"), + 'nargs': '*', + 'help': 'Specify which checks to run, can be specified for multiple checks'}], + [('--config-check1',), + {'default': '', + 'help': 'Custom configuration for the check1 check'}], + [('--config-check2',), + {'default': '', + 'help': 'Custom configuration for the check2 check'}], + [('--path', '-p'), + {'default': None, + 'help': 'Path to the test root (usually Envoy source dir). If not specified the first path of paths is used'}], + [('--log-level', '-l'), + {'choices': ['info', 'warn', 'debug', 'error'], + 'default': 'info', 'help': 'Log level to display'}], + [('paths',), + {'nargs': '*', + 'help': 'Paths to check. At least one path must be specified, or the `path` argument should be provided'}]]) + + +TEST_ERRORS = ( + {}, + dict(myerror=[]), + dict(myerror=["a", "b", "c"]), + dict(othererror=["other1", "other2", "other3"]), + dict(othererror=["other1", "other2", "other3"], myerror=["a", "b", "c"])) + + +@pytest.mark.parametrize("log", [True, False]) +@pytest.mark.parametrize("errors", TEST_ERRORS) +def test_checker_error(patches, log, errors): + checker = Checker("path1", "path2", "path3") + log_mock = patch( + "tools.base.checker.Checker.log", + new_callable=PropertyMock) + checker.errors = errors.copy() + + with log_mock as m_log: + assert checker.error("mycheck", ["err1", "err2", "err3"], log) == 1 + + assert checker.errors["mycheck"] == errors.get("mycheck", []) + ["err1", "err2", "err3"] + for k, v in errors.items(): + if k != "mycheck": + assert checker.errors[k] == v + if log: + assert ( + list(m_log.return_value.error.call_args) + == [('err1\nerr2\nerr3',), {}]) + else: + assert not m_log.return_value.error.called + + +TEST_CHECKS = ( + None, + (), + ("check1", ), + ("check1", "check2", "check3"), + ("check3", "check4", "check5"), + ("check4", "check5")) + + +@pytest.mark.parametrize("checks", TEST_CHECKS) +def test_checker_get_checks(checks): + checker = Checker("path1", "path2", "path3") + checker.checks = ("check1", "check2", "check3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + m_args.return_value.check = checks + if checks: + assert ( + checker.get_checks() + == [check for check in checker.checks if check in checks or []]) + else: + assert checker.get_checks() == checker.checks + + +def test_checker_on_check_run(): + checker = Checker("path1", "path2", "path3") + assert not checker.on_check_run("checkname") + + +def test_checker_on_checks_begin(): + checker = Checker("path1", "path2", "path3") + assert checker.on_checks_begin() is None + + +@pytest.mark.parametrize("failed", [True, False]) +@pytest.mark.parametrize("show_summary", [True, False]) +def test_checker_on_checks_complete(patches, failed, show_summary): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.has_failed", dict(new_callable=PropertyMock)), + ("Checker.show_summary", dict(new_callable=PropertyMock)), + ("Checker.summary", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_failed, m_show_summary, m_summary): + m_failed.return_value = failed + m_show_summary.return_value = show_summary + assert checker.on_checks_complete() is (1 if failed else 0) + + if show_summary: + assert ( + list(m_summary.return_value.print_summary.call_args) + == [(), {}]) + else: + assert not m_summary.return_value.print_summary.called + + +def test_checker_run_checks(patches): + checker = DummyCheckerWithChecks("path1", "path2", "path3") + patched = patches( + "Checker.get_checks", + "Checker.on_checks_begin", + "Checker.on_checks_complete", + ("Checker.log", dict(new_callable=PropertyMock)), + ("Checker.name", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_get, m_begin, m_complete, m_log, m_name): + m_get.return_value = ("check1", "check2") + assert checker.run_checks() == m_complete.return_value + + assert ( + list(m_get.call_args) + == [(), {}]) + assert ( + list(m_begin.call_args) + == [(), {}]) + assert ( + list(m_complete.call_args) + == [(), {}]) + assert ( + list(list(c) for c in m_log.return_value.info.call_args_list) + == [[(f"[CHECKS:{m_name.return_value}] check1",), {}], + [(f"[CHECKS:{m_name.return_value}] check2",), {}]]) + assert ( + list(checker.check1.call_args) + == [(), {}]) + assert ( + list(checker.check2.call_args) + == [(), {}]) + + +TEST_WARNS = ( + {}, + dict(mywarn=[]), + dict(mywarn=["a", "b", "c"]), + dict(otherwarn=["other1", "other2", "other3"]), + dict(otherwarn=["other1", "other2", "other3"], mywarn=["a", "b", "c"])) + + +@pytest.mark.parametrize("log", [True, False]) +@pytest.mark.parametrize("warns", TEST_WARNS) +def test_checker_warn(patches, log, warns): + checker = Checker("path1", "path2", "path3") + log_mock = patch( + "tools.base.checker.Checker.log", + new_callable=PropertyMock) + checker.warnings = warns.copy() + + with log_mock as m_log: + checker.warn("mycheck", ["warn1", "warn2", "warn3"], log) + + assert checker.warnings["mycheck"] == warns.get("mycheck", []) + ["warn1", "warn2", "warn3"] + for k, v in warns.items(): + if k != "mycheck": + assert checker.warnings[k] == v + if log: + assert ( + list(m_log.return_value.warning.call_args) + == [('warn1\nwarn2\nwarn3',), {}]) + else: + assert not m_log.return_value.warn.called + + +TEST_SUCCESS = ( + {}, + dict(mysuccess=[]), + dict(mysuccess=["a", "b", "c"]), + dict(othersuccess=["other1", "other2", "other3"]), + dict(othersuccess=["other1", "other2", "other3"], mysuccess=["a", "b", "c"])) + + +@pytest.mark.parametrize("log", [True, False]) +@pytest.mark.parametrize("success", TEST_SUCCESS) +def test_checker_succeed(patches, log, success): + checker = Checker("path1", "path2", "path3") + log_mock = patch( + "tools.base.checker.Checker.log", + new_callable=PropertyMock) + checker.success = success.copy() + + with log_mock as m_log: + checker.succeed("mycheck", ["success1", "success2", "success3"], log) + + assert checker.success["mycheck"] == success.get("mycheck", []) + ["success1", "success2", "success3"] + for k, v in success.items(): + if k != "mycheck": + assert checker.success[k] == v + if log: + assert ( + list(m_log.return_value.info.call_args) + == [('success1\nsuccess2\nsuccess3',), {}]) + else: + assert not m_log.return_value.info.called + + +# ForkingChecker tests + +@pytest.mark.parametrize("args", [(), ("a", "b")]) +@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"]) +@pytest.mark.parametrize("capture_output", ["NONE", True, False]) +def test_forkingchecker_fork(patches, args, cwd, capture_output): + checker = ForkingChecker("path1", "path2", "path3") + patched = patches( + "subprocess.run", + ("Checker.path", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_run, m_path): + kwargs = {} + if cwd != "NONE": + kwargs["cwd"] = cwd + if capture_output != "NONE": + kwargs["capture_output"] = capture_output + assert checker.fork(*args, **kwargs) == m_run.return_value + + expected = {'capture_output': True, 'cwd': cwd} + if capture_output is False: + expected["capture_output"] = False + if cwd == "NONE": + expected["cwd"] = m_path.return_value + assert ( + list(m_run.call_args) + == [args, expected]) + + +# CheckerSummary tests + +def test_checker_summary_constructor(): + checker = DummyChecker() + summary = CheckerSummary(checker) + assert summary.checker == checker + + +@pytest.mark.parametrize("max_errors", [-1, 0, 1, 23]) +def test_checker_summary_max_errors(max_errors): + checker = DummyChecker() + summary = CheckerSummary(checker) + checker.args.summary_errors = max_errors + assert summary.max_errors == max_errors + + +@pytest.mark.parametrize("max_warnings", [-1, 0, 1, 23]) +def test_checker_summary_max_warnings(max_warnings): + checker = DummyChecker() + summary = CheckerSummary(checker) + checker.args.summary_warnings = max_warnings + assert summary.max_warnings == max_warnings + + +def test_checker_summary_print_summary(patches): + checker = DummyChecker() + summary = CheckerSummary(checker) + patched = patches( + "CheckerSummary.print_failed", + "CheckerSummary.print_status", + prefix="tools.base.checker") + + with patched as (m_failed, m_status): + summary.print_summary() + assert ( + list(list(c) for c in m_failed.call_args_list) + == [[('warnings',), {}], [('errors',), {}]]) + assert m_status.called + + +TEST_SECTIONS = ( + ("MSG1", ["a", "b", "c"]), + ("MSG2", []), + ("MSG3", None)) + + +@pytest.mark.parametrize("section", TEST_SECTIONS) +def test_checker_summary_section(section): + checker = DummyChecker() + summary = CheckerSummary(checker) + message, lines = section + expected = [ + "", + "-" * 80, + "", + f"{message}"] + if lines: + expected += lines + assert summary._section(message, lines) == expected + + +def test_checker_summary_print_status(patches): + checker = DummyChecker() + summary = CheckerSummary(checker) + patched = patches( + "CheckerSummary._section", + prefix="tools.base.checker") + + summary.checker = MagicMock() + with patched as (m_section, ): + m_section.return_value = ["A", "B", "C"] + summary.print_status() + assert ( + list(m_section.call_args) + == [(f"[SUMMARY:{summary.checker.name}] {summary.checker.status}",), {}]) + assert ( + list(summary.checker.log.warning.call_args) + == [('A\nB\nC',), {}]) + + +@pytest.mark.parametrize("problem_type", ("errors", "warnings")) +@pytest.mark.parametrize("max_display", (-1, 0, 1, 23)) +@pytest.mark.parametrize("problems", ({}, dict(foo=["problem1"]), dict(foo=["problem1", "problem2"], bar=["problem3", "problem4"]))) +def test_checker_summary_print_failed(patches, problem_type, max_display, problems): + checker = DummyChecker() + summary = CheckerSummary(checker) + patched = patches( + "CheckerSummary._section", + (f"CheckerSummary.max_{problem_type}", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_section, m_max): + summary.checker = MagicMock() + setattr(summary.checker, f"{problem_type}", problems) + m_max.return_value = max_display + m_section.return_value = ["A", "B", "C"] + summary.print_failed(problem_type) + + if not problems: + assert not summary.checker.log.error.called + assert not m_section.called + return + assert ( + list(summary.checker.log.error.call_args) + == [("\n".join(['A\nB\nC'] * len(problems)),), {}]) + if max_display == 0: + expected = [ + [(f"[{problem_type.upper()}:{summary.checker.name}] {prob}", []), {}] + for prob in problems] + else: + def _problems(prob): + return ( + problems[prob][:max_display] + if max_display > 0 + else problems[prob]) + def _extra(prob): + return ( + f": (showing first {max_display} of {len(problems)})" + if len(problems[prob]) > max_display and max_display >= 0 + else (":" + if max_display != 0 + else "")) + expected = [ + [(f"[{problem_type.upper()}:{summary.checker.name}] {prob}{_extra(prob)}", _problems(prob)), {}] + for prob in problems] + assert ( + list(list(c) for c in m_section.call_args_list) + == expected) + + +# LogFilter tests +@pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"]) +def test_checker_log_filter(level): + logfilter = LogFilter() + + class DummyRecord(object): + levelno = level + + if level in [logging.DEBUG, logging.INFO]: + assert logfilter.filter(DummyRecord()) + else: + assert not logfilter.filter(DummyRecord()) diff --git a/tools/base/tests/test_utils.py b/tools/base/tests/test_utils.py index 78f83a5ae999..4d48a9ac5d41 100644 --- a/tools/base/tests/test_utils.py +++ b/tools/base/tests/test_utils.py @@ -1,4 +1,8 @@ import importlib +import sys +from contextlib import contextmanager + +import pytest from tools.base import utils @@ -8,6 +12,76 @@ importlib.reload(utils) +def test_util_buffered_stdout(): + stdout = [] + + with utils.buffered(stdout=stdout): + print("test1") + print("test2") + sys.stdout.write("test3\n") + sys.stderr.write("error0\n") + + assert stdout == ["test1", "test2", "test3"] + + +def test_util_buffered_stderr(): + stderr = [] + + with utils.buffered(stderr=stderr): + print("test1") + print("test2") + sys.stdout.write("test3\n") + sys.stderr.write("error0\n") + sys.stderr.write("error1\n") + + assert stderr == ["error0", "error1"] + + +def test_util_buffered_stdout_stderr(): + stdout = [] + stderr = [] + + with utils.buffered(stdout=stdout, stderr=stderr): + print("test1") + print("test2") + sys.stdout.write("test3\n") + sys.stderr.write("error0\n") + sys.stderr.write("error1\n") + + assert stdout == ["test1", "test2", "test3"] + assert stderr == ["error0", "error1"] + + +def test_util_buffered_no_stdout_stderr(): + + with pytest.raises(utils.BufferUtilError): + with utils.buffered(): + pass + + +def test_util_nested(): + + fun1_args = [] + fun2_args = [] + + @contextmanager + def fun1(arg): + fun1_args.append(arg) + yield "FUN1" + + @contextmanager + def fun2(arg): + fun2_args.append(arg) + yield "FUN2" + + with utils.nested(fun1("A"), fun2("B")) as (fun1_yield, fun2_yield): + assert fun1_yield == "FUN1" + assert fun2_yield == "FUN2" + + assert fun1_args == ["A"] + assert fun2_args == ["B"] + + def test_util_coverage_with_data_file(patches): patched = patches( "ConfigParser", diff --git a/tools/base/utils.py b/tools/base/utils.py index 203910c37f41..3ce0294218ec 100644 --- a/tools/base/utils.py +++ b/tools/base/utils.py @@ -2,10 +2,11 @@ # Provides shared utils used by other python modules # +import io import os import tempfile from configparser import ConfigParser -from contextlib import contextmanager +from contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout from typing import Iterator @@ -26,3 +27,42 @@ def coverage_with_data_file(data_file: str) -> Iterator[str]: with open(tmprc, "w") as f: parser.write(f) yield tmprc + + +class BufferUtilError(Exception): + pass + + +@contextmanager +def nested(*contexts): + with ExitStack() as stack: + yield [stack.enter_context(context) for context in contexts] + + +@contextmanager +def buffered(stdout: list = None, stderr: list = None, mangle=None) -> None: + """Captures stdout and stderr and feeds lines to supplied lists""" + + mangle = mangle or (lambda lines: lines) + + if stdout is None and stderr is None: + raise BufferUtilError("You must specify stdout and/or stderr") + + contexts = [] + + if stdout is not None: + _stdout = io.StringIO() + contexts.append(redirect_stdout(_stdout)) + if stderr is not None: + _stderr = io.StringIO() + contexts.append(redirect_stderr(_stderr)) + + with nested(*contexts): + yield + + if stdout is not None: + _stdout.seek(0) + stdout.extend(mangle(_stdout.read().strip().split("\n"))) + if stderr is not None: + _stderr.seek(0) + stderr.extend(mangle(_stderr.read().strip().split("\n"))) diff --git a/tools/code_format/BUILD b/tools/code_format/BUILD index 78297275b0fe..95784d0ef412 100644 --- a/tools/code_format/BUILD +++ b/tools/code_format/BUILD @@ -1,6 +1,6 @@ -load("@rules_python//python:defs.bzl", "py_binary") load("@pylint_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") +load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -12,12 +12,13 @@ exports_files([ "envoy_build_fixer.py", ]) -py_binary( - name = "python_flake8", - srcs = ["python_flake8.py"], - visibility = ["//visibility:public"], +envoy_py_binary( + name = "tools.code_format.python_check", deps = [ + "//tools/base:checker", + "//tools/base:utils", requirement("flake8"), requirement("pep8-naming"), + requirement("yapf"), ], ) diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py new file mode 100755 index 000000000000..40d0cfd7faa7 --- /dev/null +++ b/tools/code_format/python_check.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 + +# usage +# +# with bazel: +# +# bazel run //tools/code_format:python_check -- -h +# +# alternatively, if you have the necessary python deps available +# +# ./tools/code_format/python_check.py -h +# +# python requires: flake8, yapf +# + +import os +import sys +from functools import cached_property + +from flake8.main.application import Application as Flake8Application + +import yapf + +from tools.base import checker, utils + +FLAKE8_CONFIG = '.flake8' +YAPF_CONFIG = '.style.yapf' + +# TODO(phlax): add checks for: +# - isort + + +class PythonChecker(checker.ForkingChecker): + checks = ("flake8", "yapf") + + @property + def diff_file_path(self) -> str: + return self.args.diff_file + + @cached_property + def flake8_app(self) -> Flake8Application: + flake8_app = Flake8Application() + flake8_app.initialize(self.flake8_args) + return flake8_app + + @property + def flake8_args(self) -> list: + return ["--config", self.flake8_config_path, self.path] + + @property + def flake8_config_path(self) -> str: + return os.path.join(self.path, FLAKE8_CONFIG) + + @property + def yapf_config_path(self) -> str: + return os.path.join(self.path, YAPF_CONFIG) + + @property + def yapf_files(self): + return yapf.file_resources.GetCommandLineFiles( + self.args.paths, + recursive=self.args.recurse, + exclude=yapf.file_resources.GetExcludePatternsForDir(self.path)) + + def add_arguments(self, parser) -> None: + super().add_arguments(parser) + parser.add_argument( + "--diff-file", default=None, help="Specify the path to a diff file with fixes") + + def check_flake8(self) -> None: + """Run flake8 on files and/or repo""" + errors = [] + with utils.buffered(stdout=errors, mangle=self._strip_lines): + self.flake8_app.run_checks() + self.flake8_app.report() + if errors: + self.error("flake8", errors) + + def check_yapf(self) -> None: + """Run flake8 on files and/or repo""" + for python_file in self.yapf_files: + self.yapf_run(python_file) + + def on_check_run(self, check: str) -> None: + if check not in self.failed and check not in self.warned: + self.succeed(check, [f"[CHECKS:{self.name}] {check}: success"]) + + def on_checks_complete(self) -> int: + if self.diff_file_path and self.has_failed: + result = self.fork(["git", "diff", "HEAD"]) + with open(self.diff_file_path, "wb") as f: + f.write(result.stdout) + return super().on_checks_complete() + + def yapf_format(self, python_file: str) -> tuple: + return yapf.yapf_api.FormatFile( + python_file, + style_config=self.yapf_config_path, + in_place=self.fix, + print_diff=not self.fix) + + def yapf_run(self, python_file: str) -> None: + reformatted_source, encoding, changed = self.yapf_format(python_file) + if not changed: + return self.succeed("yapf", [f"{python_file}: success"]) + if self.fix: + return self.warn("yapf", [f"{python_file}: reformatted"]) + if reformatted_source: + return self.warn("yapf", [reformatted_source]) + self.error("yapf", [python_file]) + + def _strip_line(self, line) -> str: + return line[len(self.path) + 1:] if line.startswith(f"{self.path}/") else line + + def _strip_lines(self, lines) -> list: + return [self._strip_line(line) for line in lines if line] + + +def main(*args: list) -> None: + return PythonChecker(*args).run_checks() + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) diff --git a/tools/code_format/python_flake8.py b/tools/code_format/python_flake8.py deleted file mode 100644 index 49ba8ced694d..000000000000 --- a/tools/code_format/python_flake8.py +++ /dev/null @@ -1,19 +0,0 @@ -import subprocess -import sys - -# explicitly use python3 linter -FLAKE8_COMMAND = ("python3", "-m", "flake8", ".") - - -def main(): - resp = subprocess.run(FLAKE8_COMMAND, capture_output=True, cwd=sys.argv[1]) - if resp.returncode: - # stdout and stderr are dumped to ensure we capture all errors - raise SystemExit( - "ERROR: flake8 linting failed: \n" - f"{resp.stdout.decode('utf-8')}\n" - f"{resp.stderr.decode('utf-8')}") - - -if __name__ == "__main__": - main() diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py new file mode 100644 index 000000000000..7ade3552ff54 --- /dev/null +++ b/tools/code_format/tests/test_python_check.py @@ -0,0 +1,377 @@ +from contextlib import contextmanager +from unittest.mock import patch, MagicMock, PropertyMock + +import pytest + +from tools.code_format import python_check + + +def test_python_checker_constructor(): + checker = python_check.PythonChecker("path1", "path2", "path3") + assert checker.checks == ("flake8", "yapf") + assert checker.args.paths == ['path1', 'path2', 'path3'] + + +def test_python_diff_path(): + checker = python_check.PythonChecker("path1", "path2", "path3") + args_mock = patch("tools.code_format.python_check.PythonChecker.args", new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.diff_file_path == m_args.return_value.diff_file + + +def test_python_flake8_app(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.flake8_args", dict(new_callable=PropertyMock)), + "Flake8Application", + prefix="tools.code_format.python_check") + + with patched as (m_flake8_args, m_flake8_app): + assert checker.flake8_app == m_flake8_app.return_value + + assert ( + list(m_flake8_app.call_args) + == [(), {}]) + assert ( + list(m_flake8_app.return_value.initialize.call_args) + == [(m_flake8_args.return_value,), {}]) + + +def test_python_flake8_args(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.flake8_config_path", dict(new_callable=PropertyMock)), + ("PythonChecker.path", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_flake8_config, m_path): + assert ( + checker.flake8_args + == ['--config', + m_flake8_config.return_value, m_path.return_value]) + + +def test_python_flake8_config_path(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.path", dict(new_callable=PropertyMock)), + "os.path.join", + prefix="tools.code_format.python_check") + + with patched as (m_path, m_join): + assert checker.flake8_config_path == m_join.return_value + + assert ( + list(m_join.call_args) + == [(m_path.return_value, python_check.FLAKE8_CONFIG), {}]) + + +def test_python_yapf_config_path(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.path", dict(new_callable=PropertyMock)), + "os.path.join", + prefix="tools.code_format.python_check") + + with patched as (m_path, m_join): + assert checker.yapf_config_path == m_join.return_value + + assert ( + list(m_join.call_args) + == [(m_path.return_value, python_check.YAPF_CONFIG), {}]) + + +def test_python_yapf_files(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + + patched = patches( + ("PythonChecker.args", dict(new_callable=PropertyMock)), + ("PythonChecker.path", dict(new_callable=PropertyMock)), + "yapf.file_resources.GetCommandLineFiles", + "yapf.file_resources.GetExcludePatternsForDir", + prefix="tools.code_format.python_check") + + with patched as (m_args, m_path, m_yapf_files, m_yapf_exclude): + assert checker.yapf_files == m_yapf_files.return_value + + assert ( + list(m_yapf_files.call_args) + == [(m_args.return_value.paths,), + {'recursive': m_args.return_value.recurse, + 'exclude': m_yapf_exclude.return_value}]) + assert ( + list(m_yapf_exclude.call_args) + == [(m_path.return_value,), {}]) + + +def test_python_add_arguments(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + add_mock = patch("tools.code_format.python_check.checker.ForkingChecker.add_arguments") + m_parser = MagicMock() + + with add_mock as m_add: + checker.add_arguments(m_parser) + + assert ( + list(m_add.call_args) + == [(m_parser,), {}]) + assert ( + list(list(c) for c in m_parser.add_argument.call_args_list) + == [[('--diff-file',), + {'default': None, 'help': 'Specify the path to a diff file with fixes'}]]) + + +@pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) +def test_python_check_flake8(patches, errors): + checker = python_check.PythonChecker("path1", "path2", "path3") + + patched = patches( + "utils.buffered", + "PythonChecker.error", + "PythonChecker._strip_lines", + ("PythonChecker.flake8_app", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + @contextmanager + def mock_buffered(stdout=None, mangle=None): + yield + stdout.extend(errors) + + with patched as (m_buffered, m_error, m_mangle, m_flake8_app): + m_buffered.side_effect = mock_buffered + checker.check_flake8() + + assert ( + list(m_buffered.call_args) + == [(), {'stdout': errors, 'mangle': m_mangle}]) + assert ( + list(m_flake8_app.return_value.run_checks.call_args) + == [(), {}]) + assert ( + list(m_flake8_app.return_value.report.call_args) + == [(), {}]) + + if errors: + assert ( + list(m_error.call_args) + == [('flake8', ['err1', 'err2']), {}]) + else: + assert not m_error.called + + +def test_python_check_yapf(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + "PythonChecker.yapf_run", + ("PythonChecker.yapf_files", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_yapf_run, m_yapf_files): + m_yapf_files.return_value = ["file1", "file2", "file3"] + checker.check_yapf() + + assert ( + list(list(c) for c in m_yapf_files.call_args_list) + == [[(), {}]]) + assert ( + list(list(c) for c in m_yapf_run.call_args_list) + == [[('file1',), {}], [('file2',), {}], [('file3',), {}]]) + + +TEST_CHECK_RESULTS = ( + ("check1", [], []), + ("check1", ["check2", "check3"], ["check4", "check5"]), + ("check1", ["check1", "check3"], ["check4", "check5"]), + ("check1", ["check2", "check3"], ["check1", "check5"]), + ("check1", ["check1", "check3"], ["check1", "check5"])) + + +@pytest.mark.parametrize("results", TEST_CHECK_RESULTS) +def test_python_on_check_run(patches, results): + checker = python_check.PythonChecker("path1", "path2", "path3") + checkname, errors, warnings = results + patched = patches( + "PythonChecker.succeed", + ("PythonChecker.name", dict(new_callable=PropertyMock)), + ("PythonChecker.failed", dict(new_callable=PropertyMock)), + ("PythonChecker.warned", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_succeed, m_name, m_failed, m_warned): + m_failed.return_value = errors + m_warned.return_value = warnings + checker.on_check_run(checkname) + + if checkname in warnings or checkname in errors: + assert not m_succeed.called + else: + assert ( + list(m_succeed.call_args) + == [(checkname, [f"[CHECKS:{m_name.return_value}] {checkname}: success"]), {}]) + + +TEST_CHECKS_COMPLETE = ( + ("DIFF1", False), + ("DIFF1", True), + ("", False), + ("", True)) + + +@pytest.mark.parametrize("results", TEST_CHECKS_COMPLETE) +def test_python_on_checks_complete(patches, results): + checker = python_check.PythonChecker("path1", "path2", "path3") + diff_path, failed = results + patched = patches( + "open", + "PythonChecker.fork", + "checker.ForkingChecker.on_checks_complete", + ("PythonChecker.diff_file_path", dict(new_callable=PropertyMock)), + ("PythonChecker.has_failed", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_open, m_fork, m_super, m_diff, m_failed): + m_diff.return_value = diff_path + m_failed.return_value = failed + assert checker.on_checks_complete() == m_super.return_value + + if diff_path and failed: + assert ( + list(m_fork.call_args) + == [(['git', 'diff', 'HEAD'],), {}]) + assert ( + list(m_open.call_args) + == [(diff_path, 'wb'), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.write.call_args) + == [(m_fork.return_value.stdout,), {}]) + else: + assert not m_fork.called + assert not m_open.called + + assert ( + list(m_super.call_args) + == [(), {}]) + + +@pytest.mark.parametrize("fix", [True, False]) +def test_python_yapf_format(patches, fix): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + "yapf.yapf_api.FormatFile", + ("PythonChecker.yapf_config_path", dict(new_callable=PropertyMock)), + ("PythonChecker.fix", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_format, m_config, m_fix): + m_fix.return_value = fix + assert checker.yapf_format("FILENAME") == m_format.return_value + + assert ( + list(m_format.call_args) + == [('FILENAME',), + {'style_config': m_config.return_value, + 'in_place': fix, + 'print_diff': not fix}]) + assert ( + list(list(c) for c in m_fix.call_args_list) + == [[(), {}], [(), {}]]) + + +TEST_FORMAT_RESULTS = ( + ("", "", True), + ("", "", False), + ("REFORMAT", "", True), + ("REFORMAT", "", False)) + + +@pytest.mark.parametrize("format_results", TEST_FORMAT_RESULTS) +@pytest.mark.parametrize("fix", [True, False]) +def test_python_yapf_run(patches, fix, format_results): + checker = python_check.PythonChecker("path1", "path2", "path3") + reformat, encoding, changed = format_results + patched = patches( + "PythonChecker.yapf_format", + "PythonChecker.succeed", + "PythonChecker.warn", + "PythonChecker.error", + ("PythonChecker.fix", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_format, m_succeed, m_warn, m_error, m_fix): + m_fix.return_value = fix + m_format.return_value = format_results + checker.yapf_run("FILENAME") + + if not changed: + assert ( + list(m_succeed.call_args) + == [('yapf', ['FILENAME: success']), {}]) + assert not m_warn.called + assert not m_error.called + assert not m_fix.called + return + assert not m_succeed.called + if fix: + assert not m_error.called + assert len(m_warn.call_args_list) == 1 + assert ( + list(m_warn.call_args) + == [('yapf', [f'FILENAME: reformatted']), {}]) + return + if reformat: + assert not m_error.called + assert len(m_warn.call_args_list) == 1 + assert ( + list(m_warn.call_args) + == [('yapf', [reformat]), {}]) + return + assert not m_warn.called + assert ( + list(m_error.call_args) + == [('yapf', ['FILENAME']), {}]) + + +def test_python_strip_lines(): + checker = python_check.PythonChecker("path1", "path2", "path3") + strip_mock = patch("tools.code_format.python_check.PythonChecker._strip_line") + lines = ["", "foo", "", "bar", "", "", "baz", "", ""] + + with strip_mock as m_strip: + assert ( + checker._strip_lines(lines) + == [m_strip.return_value] * 3) + + assert ( + list(list(c) for c in m_strip.call_args_list) + == [[('foo',), {}], [('bar',), {}], [('baz',), {}]]) + + +@pytest.mark.parametrize("line", ["REMOVE/foo", "REMOVE", "bar", "other", "REMOVE/baz", "baz"]) +def test_python_strip_line(line): + checker = python_check.PythonChecker("path1", "path2", "path3") + path_mock = patch( + "tools.code_format.python_check.PythonChecker.path", + new_callable=PropertyMock) + + with path_mock as m_path: + m_path.return_value = "REMOVE" + assert ( + checker._strip_line(line) + == line[7:] if line.startswith(f"REMOVE/") else line) + + +def test_python_checker_main(): + class_mock = patch("tools.code_format.python_check.PythonChecker") + + with class_mock as m_class: + assert ( + python_check.main("arg0", "arg1", "arg2") + == m_class.return_value.run_checks.return_value) + + assert ( + list(m_class.call_args) + == [('arg0', 'arg1', 'arg2'), {}]) + assert ( + list(m_class.return_value.run_checks.call_args) + == [(), {}]) diff --git a/tools/testing/plugin.py b/tools/testing/plugin.py index d654d23de584..af9f487fb8f1 100644 --- a/tools/testing/plugin.py +++ b/tools/testing/plugin.py @@ -2,7 +2,6 @@ # This is pytest plugin providing fixtures for tests. # -import importlib from contextlib import contextmanager, ExitStack from typing import ContextManager, Iterator from unittest.mock import patch From 2c637b01914351ddb9c8eac88639e5709c738ef1 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Tue, 13 Apr 2021 12:16:35 +0100 Subject: [PATCH 02/13] tools/ Signed-off-by: Ryan Northey --- tools/base/BUILD | 9 +- tools/base/checker.py | 88 +---------- tools/base/runner.py | 56 +++++++ tools/base/tests/test_checker.py | 157 ++----------------- tools/base/tests/test_runner.py | 153 +++++++++++++++++- tools/code_format/tests/test_python_check.py | 4 +- 6 files changed, 236 insertions(+), 231 deletions(-) diff --git a/tools/base/BUILD b/tools/base/BUILD index 3dff32bc6aa0..d38233b15eea 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -5,8 +5,13 @@ licenses(["notice"]) # Apache 2 envoy_package() -envoy_py_library("tools.base.checker") - envoy_py_library("tools.base.runner") envoy_py_library("tools.base.utils") + +envoy_py_library( + "tools.base.checker", + deps = [ + ":runner", + ], +) diff --git a/tools/base/checker.py b/tools/base/checker.py index 03f78f4e6eba..c4a290ee2aaf 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -1,25 +1,11 @@ import argparse -import logging import os -import subprocess -import sys from functools import cached_property -LOG_LEVELS = (("info", logging.INFO), ("debug", logging.DEBUG), ("warn", logging.WARN), - ("error", logging.ERROR)) +from tools.base import runner -class BazelRunError(Exception): - pass - - -class LogFilter(logging.Filter): - - def filter(self, rec): - return rec.levelno in (logging.DEBUG, logging.INFO) - - -class Checker(object): +class Checker(runner.Runner): """Runs check methods prefixed with `check_` and named in `self.checks` check methods should return the count of errors and log warnings and errors @@ -27,16 +13,11 @@ class Checker(object): checks = () def __init__(self, *args): - self._args = args + super().__init__(*args) self.success = {} self.errors = {} self.warnings = {} - @cached_property - def args(self) -> argparse.Namespace: - """Parsed args""" - return self.parser.parse_args(self._args) - @property def diff(self) -> bool: """Flag to determine whether the checker should print diffs to the console""" @@ -63,37 +44,6 @@ def has_failed(self) -> bool: # add logic for warn/error return self.failed or self.warned - @cached_property - def log(self) -> logging.Logger: - """Instantiated logger""" - logger = logging.getLogger(self.name) - logger.setLevel(self.log_level) - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setLevel(logging.DEBUG) - stdout_handler.addFilter(LogFilter()) - stderr_handler = logging.StreamHandler(sys.stderr) - stderr_handler.setLevel(logging.WARN) - logger.addHandler(stdout_handler) - logger.addHandler(stderr_handler) - return logger - - @cached_property - def log_level(self) -> int: - """Log level parsed from args""" - return dict(LOG_LEVELS)[self.args.log_level] - - @property - def name(self) -> bool: - """Name of the checker""" - return self.__class__.__name__ - - @cached_property - def parser(self) -> argparse.ArgumentParser: - """Argparse parser""" - parser = argparse.ArgumentParser() - self.add_arguments(parser) - return parser - @cached_property def path(self) -> str: """The "path" - usually Envoy src dir. This is used for finding configs for the tooling and should be a dir""" @@ -283,35 +233,9 @@ def warn(self, name: str, warnings: list, log: bool = True) -> None: class ForkingChecker(Checker): - def fork(self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess: - """Fork a subprocess, using self.path as the cwd by default""" - kwargs["cwd"] = kwargs.get("cwd", self.path) - return subprocess.run(*args, capture_output=capture_output, **kwargs) - - -class BazelChecker(ForkingChecker): - - def bazel_query(self, query: str) -> list: - """Run a bazel query and return stdout as list of lines""" - resp = self.fork(["bazel", "query", f"'{query}'"]) - if resp.returncode: - raise BazelRunError(f"Bazel query failed: {resp}") - return resp.stdout.decode("utf-8").split("\n") - - def bazel_run( - self, - target: str, - *args, - capture_output: bool = False, - cwd: str = "", - raises: bool = True) -> subprocess.CompletedProcess: - """Run a bazel target and return the subprocess response""" - args = (("--",) + args) if args else args - bazel_args = ("bazel", "run", target) + args - resp = self.fork(bazel_args, capture_output=capture_output, cwd=cwd or self.path) - if resp.returncode and raises: - raise BazelRunError(f"Bazel run failed: {resp}") - return resp + @cached_property + def fork(self): + return runner.ForkingAdapter(self) class CheckerSummary(object): diff --git a/tools/base/runner.py b/tools/base/runner.py index 1b5115703fe9..be6c14986dff 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -3,9 +3,23 @@ # import argparse +import logging +import os +import subprocess +import sys from functools import cached_property +LOG_LEVELS = (("info", logging.INFO), ("debug", logging.DEBUG), ("warn", logging.WARN), + ("error", logging.ERROR)) + + +class LogFilter(logging.Filter): + + def filter(self, rec): + return rec.levelno in (logging.DEBUG, logging.INFO) + + class Runner(object): def __init__(self, *args): @@ -21,6 +35,30 @@ def extra_args(self) -> list: """Unparsed args""" return self.parser.parse_known_args(self._args)[1] + @cached_property + def log(self) -> logging.Logger: + """Instantiated logger""" + logger = logging.getLogger(self.name) + logger.setLevel(self.log_level) + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setLevel(logging.DEBUG) + stdout_handler.addFilter(LogFilter()) + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setLevel(logging.WARN) + logger.addHandler(stdout_handler) + logger.addHandler(stderr_handler) + return logger + + @cached_property + def log_level(self) -> int: + """Log level parsed from args""" + return dict(LOG_LEVELS)[self.args.log_level] + + @property + def name(self) -> bool: + """Name of the runner""" + return self.__class__.__name__ + @cached_property def parser(self) -> argparse.ArgumentParser: """Argparse parser""" @@ -28,6 +66,24 @@ def parser(self) -> argparse.ArgumentParser: self.add_arguments(parser) return parser + @cached_property + def path(self) -> str: + return os.getcwd() + def add_arguments(self, parser: argparse.ArgumentParser) -> None: """Override this method to add custom arguments to the arg parser""" pass + + +class ForkingAdapter(object): + + def __init__(self, context: Runner): + self.context = context + + def __call__(self, *args, **kwargs) -> subprocess.CompletedProcess: + return self.fork(*args, **kwargs) + + def fork(self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess: + """Fork a subprocess, using self.path as the cwd by default""" + kwargs["cwd"] = kwargs.get("cwd", self.context.path) + return subprocess.run(*args, capture_output=capture_output, **kwargs) diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index fd6160f55e6d..cb5c53be1953 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -1,13 +1,8 @@ -import logging -import sys from unittest.mock import MagicMock, patch, PropertyMock import pytest -from tools.base import pytest_checker -from tools.base.checker import ( - Checker, CheckerSummary, ForkingChecker, - LogFilter, LOG_LEVELS) +from tools.base.checker import Checker, CheckerSummary, ForkingChecker class DummyChecker(Checker): @@ -30,34 +25,16 @@ def check_check2(self): self.check2() -def test_pytest_checker(): - with patch("tools.base.pytest_checker.python_pytest") as m_pytest: - pytest_checker.main("arg1", "arg2", "arg3") - - assert ( - list(m_pytest.main.call_args) - == [('arg1', 'arg2', 'arg3', '--cov', 'tools.base'), {}]) - - def test_checker_constructor(): - checker = Checker("path1", "path2", "path3") - assert checker._args == ("path1", "path2", "path3") - assert checker.summary_class == CheckerSummary - + super_mock = patch("tools.base.checker.runner.Runner.__init__") -def test_checker_args(): - checker = Checker("path1", "path2", "path3") - parser_mock = patch( - "tools.base.checker.Checker.parser", - new_callable=PropertyMock) - - with parser_mock as m_parser: - assert checker.args == m_parser.return_value.parse_args.return_value + with super_mock as m_super: + checker = Checker("path1", "path2", "path3") assert ( - list(m_parser.return_value.parse_args.call_args) - == [(('path1', 'path2', 'path3'),), {}]) - assert "args" in checker.__dict__ + list(m_super.call_args) + == [('path1', 'path2', 'path3'), {}]) + assert checker.summary_class == CheckerSummary def test_checker_diff(): @@ -117,83 +94,6 @@ def test_checker_has_failed(patches, failed, warned): assert "has_failed" not in checker.__dict__ -def test_checker_log(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - "logging.getLogger", - "logging.StreamHandler", - "LogFilter", - ("Checker.log_level", dict(new_callable=PropertyMock)), - ("Checker.name", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_logger, m_stream, m_filter, m_level, m_name): - _loggers = (MagicMock(), MagicMock()) - m_stream.side_effect = _loggers - assert checker.log == m_logger.return_value - assert ( - list(m_logger.return_value.setLevel.call_args) - == [(m_level.return_value,), {}]) - assert ( - list(list(c) for c in m_stream.call_args_list) - == [[(sys.stdout,), {}], - [(sys.stderr,), {}]]) - assert ( - list(_loggers[0].setLevel.call_args) - == [(logging.DEBUG,), {}]) - assert ( - list(_loggers[0].addFilter.call_args) - == [(m_filter.return_value,), {}]) - assert ( - list(_loggers[1].setLevel.call_args) - == [(logging.WARN,), {}]) - assert ( - list(list(c) for c in m_logger.return_value.addHandler.call_args_list) - == [[(_loggers[0],), {}], [(_loggers[1],), {}]]) - assert "log" in checker.__dict__ - - -def test_checker_log_level(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - "dict", - ("Checker.args", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - with patched as (m_dict, m_args): - assert checker.log_level == m_dict.return_value.__getitem__.return_value - - assert ( - list(m_dict.call_args) - == [(LOG_LEVELS, ), {}]) - assert ( - list(m_dict.return_value.__getitem__.call_args) - == [(m_args.return_value.log_level,), {}]) - assert "log_level" in checker.__dict__ - - -def test_checker_name(): - checker = DummyChecker() - assert checker.name == checker.__class__.__name__ - assert "name" not in checker.__dict__ - - -def test_checker_parser(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - "argparse.ArgumentParser", - "Checker.add_arguments", - prefix="tools.base.checker") - with patched as (m_parser, m_add_args): - assert checker.parser == m_parser.return_value - assert ( - list(m_parser.call_args) - == [(), {}]) - assert ( - list(m_add_args.call_args) - == [(m_parser.return_value,), {}]) - assert "parser" in checker.__dict__ - - @pytest.mark.parametrize("path", [None, "PATH"]) @pytest.mark.parametrize("paths", [[], ["PATH0"]]) @pytest.mark.parametrize("isdir", [True, False]) @@ -614,32 +514,15 @@ def test_checker_succeed(patches, log, success): # ForkingChecker tests -@pytest.mark.parametrize("args", [(), ("a", "b")]) -@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"]) -@pytest.mark.parametrize("capture_output", ["NONE", True, False]) -def test_forkingchecker_fork(patches, args, cwd, capture_output): +def test_forkingchecker_fork(): checker = ForkingChecker("path1", "path2", "path3") - patched = patches( - "subprocess.run", - ("Checker.path", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") + forking_mock = patch("tools.base.checker.runner.ForkingAdapter") - with patched as (m_run, m_path): - kwargs = {} - if cwd != "NONE": - kwargs["cwd"] = cwd - if capture_output != "NONE": - kwargs["capture_output"] = capture_output - assert checker.fork(*args, **kwargs) == m_run.return_value - - expected = {'capture_output': True, 'cwd': cwd} - if capture_output is False: - expected["capture_output"] = False - if cwd == "NONE": - expected["cwd"] = m_path.return_value + with forking_mock as m_fork: + assert checker.fork == m_fork.return_value assert ( - list(m_run.call_args) - == [args, expected]) + list(m_fork.call_args) + == [(checker,), {}]) # CheckerSummary tests @@ -770,17 +653,3 @@ def _extra(prob): assert ( list(list(c) for c in m_section.call_args_list) == expected) - - -# LogFilter tests -@pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"]) -def test_checker_log_filter(level): - logfilter = LogFilter() - - class DummyRecord(object): - levelno = level - - if level in [logging.DEBUG, logging.INFO]: - assert logfilter.filter(DummyRecord()) - else: - assert not logfilter.filter(DummyRecord()) diff --git a/tools/base/tests/test_runner.py b/tools/base/tests/test_runner.py index 970fd14a3afa..497a68b32db5 100644 --- a/tools/base/tests/test_runner.py +++ b/tools/base/tests/test_runner.py @@ -1,5 +1,9 @@ import importlib -from unittest.mock import patch, PropertyMock +import logging +import sys +from unittest.mock import MagicMock, patch, PropertyMock + +import pytest from tools.base import runner @@ -9,6 +13,12 @@ importlib.reload(runner) +class DummyRunner(runner.Runner): + + def __init__(self): + self.args = PropertyMock() + + def test_runner_constructor(): run = runner.Runner("path1", "path2", "path3") assert run._args == ("path1", "path2", "path3") @@ -50,6 +60,66 @@ def test_runner_extra_args(): assert "extra_args" in run.__dict__ +def test_runner_log(patches): + run = runner.Runner("path1", "path2", "path3") + patched = patches( + "logging.getLogger", + "logging.StreamHandler", + "LogFilter", + ("Runner.log_level", dict(new_callable=PropertyMock)), + ("Runner.name", dict(new_callable=PropertyMock)), + prefix="tools.base.runner") + + with patched as (m_logger, m_stream, m_filter, m_level, m_name): + _loggers = (MagicMock(), MagicMock()) + m_stream.side_effect = _loggers + assert run.log == m_logger.return_value + assert ( + list(m_logger.return_value.setLevel.call_args) + == [(m_level.return_value,), {}]) + assert ( + list(list(c) for c in m_stream.call_args_list) + == [[(sys.stdout,), {}], + [(sys.stderr,), {}]]) + assert ( + list(_loggers[0].setLevel.call_args) + == [(logging.DEBUG,), {}]) + assert ( + list(_loggers[0].addFilter.call_args) + == [(m_filter.return_value,), {}]) + assert ( + list(_loggers[1].setLevel.call_args) + == [(logging.WARN,), {}]) + assert ( + list(list(c) for c in m_logger.return_value.addHandler.call_args_list) + == [[(_loggers[0],), {}], [(_loggers[1],), {}]]) + assert "log" in run.__dict__ + + +def test_runner_log_level(patches): + run = runner.Runner("path1", "path2", "path3") + patched = patches( + "dict", + ("Runner.args", dict(new_callable=PropertyMock)), + prefix="tools.base.runner") + with patched as (m_dict, m_args): + assert run.log_level == m_dict.return_value.__getitem__.return_value + + assert ( + list(m_dict.call_args) + == [(runner.LOG_LEVELS, ), {}]) + assert ( + list(m_dict.return_value.__getitem__.call_args) + == [(m_args.return_value.log_level,), {}]) + assert "log_level" in run.__dict__ + + +def test_runner_name(): + run = DummyRunner() + assert run.name == run.__class__.__name__ + assert "name" not in run.__dict__ + + def test_runner_parser(patches): run = runner.Runner("path1", "path2", "path3") patched = patches( @@ -68,6 +138,87 @@ def test_runner_parser(patches): assert "parser" in run.__dict__ +def test_checker_path(): + run = runner.Runner("path1", "path2", "path3") + cwd_mock = patch("tools.base.runner.os.getcwd") + + with cwd_mock as m_cwd: + assert run.path == m_cwd.return_value + + assert ( + list(m_cwd.call_args) + == [(), {}]) + + def test_runner_add_arguments(patches): run = runner.Runner("path1", "path2", "path3") assert run.add_arguments("PARSER") is None + + +# LogFilter tests +@pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"]) +def test_runner_log_filter(level): + logfilter = runner.LogFilter() + + class DummyRecord(object): + levelno = level + + if level in [logging.DEBUG, logging.INFO]: + assert logfilter.filter(DummyRecord()) + else: + assert not logfilter.filter(DummyRecord()) + + +# ForkingAdapter tests + +def test_forkingadapter_constructor(): + _runner = DummyRunner() + adapter = runner.ForkingAdapter(_runner) + assert adapter.context == _runner + + +def test_forkingadapter_call(): + _runner = DummyRunner() + adapter = runner.ForkingAdapter(_runner) + fork_mock = patch("tools.base.runner.ForkingAdapter.fork") + + with fork_mock as m_fork: + assert ( + adapter( + "arg1", "arg2", "arg3", + kwa1="foo", + kwa2="bar", + kwa3="baz") + == m_fork.return_value) + assert ( + list(m_fork.call_args) + == [('arg1', 'arg2', 'arg3'), + {'kwa1': 'foo', 'kwa2': 'bar', 'kwa3': 'baz'}]) + + +@pytest.mark.parametrize("args", [(), ("a", "b")]) +@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"]) +@pytest.mark.parametrize("capture_output", ["NONE", True, False]) +def test_forkingadapter_fork(patches, args, cwd, capture_output): + adapter = runner.ForkingAdapter(DummyRunner()) + patched = patches( + "subprocess.run", + ("Runner.path", dict(new_callable=PropertyMock)), + prefix="tools.base.runner") + + with patched as (m_run, m_path): + kwargs = {} + if cwd != "NONE": + kwargs["cwd"] = cwd + if capture_output != "NONE": + kwargs["capture_output"] = capture_output + assert adapter.fork(*args, **kwargs) == m_run.return_value + + expected = {'capture_output': True, 'cwd': cwd} + if capture_output is False: + expected["capture_output"] = False + if cwd == "NONE": + expected["cwd"] = m_path.return_value + assert ( + list(m_run.call_args) + == [args, expected]) diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py index 7ade3552ff54..f9f0c2d90a11 100644 --- a/tools/code_format/tests/test_python_check.py +++ b/tools/code_format/tests/test_python_check.py @@ -224,8 +224,8 @@ def test_python_on_checks_complete(patches, results): diff_path, failed = results patched = patches( "open", - "PythonChecker.fork", - "checker.ForkingChecker.on_checks_complete", + "checker.ForkingChecker.fork", + "checker.Checker.on_checks_complete", ("PythonChecker.diff_file_path", dict(new_callable=PropertyMock)), ("PythonChecker.has_failed", dict(new_callable=PropertyMock)), prefix="tools.code_format.python_check") From ebbeba363420724f07f6be408431d7679a69dd9f Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Tue, 13 Apr 2021 14:03:42 +0100 Subject: [PATCH 03/13] tools/ Signed-off-by: Ryan Northey --- tools/base/runner.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/base/runner.py b/tools/base/runner.py index be6c14986dff..33ee810d1740 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -9,7 +9,6 @@ import sys from functools import cached_property - LOG_LEVELS = (("info", logging.INFO), ("debug", logging.DEBUG), ("warn", logging.WARN), ("error", logging.ERROR)) From 95bc0a8f14e3728df5bf1e0791db2e4d786f373d Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Tue, 13 Apr 2021 14:04:04 +0100 Subject: [PATCH 04/13] tools/ Signed-off-by: Ryan Northey --- tools/base/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/base/runner.py b/tools/base/runner.py index 33ee810d1740..285718643e06 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -83,6 +83,6 @@ def __call__(self, *args, **kwargs) -> subprocess.CompletedProcess: return self.fork(*args, **kwargs) def fork(self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess: - """Fork a subprocess, using self.path as the cwd by default""" + """Fork a subprocess, using self.context.path as the cwd by default""" kwargs["cwd"] = kwargs.get("cwd", self.context.path) return subprocess.run(*args, capture_output=capture_output, **kwargs) From c42cec322ec20ea9020c573bd2b6d33900b98cb5 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Tue, 13 Apr 2021 14:07:02 +0100 Subject: [PATCH 05/13] tools/ Signed-off-by: Ryan Northey --- tools/base/tests/test_checker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index cb5c53be1953..a128f182644d 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -523,6 +523,7 @@ def test_forkingchecker_fork(): assert ( list(m_fork.call_args) == [(checker,), {}]) + assert "fork" in checker.__dict__ # CheckerSummary tests From c00011dd47487f2e082a7b377ec3e5a220d55a7b Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Wed, 14 Apr 2021 15:17:59 +0100 Subject: [PATCH 06/13] readme Signed-off-by: Ryan Northey --- tools/README.md | 91 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/tools/README.md b/tools/README.md index 22a3ed20a6bb..7afd311a4bf2 100644 --- a/tools/README.md +++ b/tools/README.md @@ -10,7 +10,7 @@ We will assume that `sometools` does not yet exist and will also need a `require and `bazel` rule to configure the dependencies. In most cases of adding a tool, it is likely you will not need to create a new set of dependencies, and -you can skip to the ("Add Python requirements")[#add-python-requirements] section. +you can skip to the ["Add Python requirements"](#add-python-requirements) section. We will also assume that you have `python3` and `pip` installed and working in your local environment. @@ -149,8 +149,6 @@ add a test runner to ensure the file is tested. ```starlark envoy_py_binary( name = "tools.sometools.mytool", - srcs = ["mytool.py"], - visibility = ["//visibility:public"], deps = [ requirement("requests"), requirement("pyyaml"), @@ -229,7 +227,7 @@ def test_mytool_main(): This example use the mock library to patch all of the method calls, and then tests that they have been called with the expected values. -You can run the test as follows: +You can run the test using the (automatically generated) `//tools/sometools:pytest_mytool` target as follows: ```console $ bazel run //tools/sometools:pytest_mytool @@ -314,15 +312,13 @@ This will drop you into the Python debugger (`pdb`) at the breakpoint. A base class for writing tools that need to parse command line arguments has been provided. -To make use of it in this example we will need to add the runner as a dependency to the `tools.base.mytool` target. +To make use of it in this example we will need to add the runner as a dependency to the `tools.sometools.mytool` target. -Edit `tools/sometools/BUILD` and change the `tools.base.mytool` target to the following: +Edit `tools/sometools/BUILD` and change the `tools.sometools.mytool` target to the following: ```starlark envoy_py_binary( - name = "tools.base.mytool", - srcs = ["mytool.py"], - visibility = ["//visibility:public"], + name = "tools.sometools.mytool", deps = [ "//tools/base:runner", requirement("requests"), @@ -389,3 +385,80 @@ or directly with `python`: $ ./tools/sometools/mytool.py -h ... ``` + +### Using the `tools.base.checker.Checker` class + +A base class for writing checkers (for example, linting tools) has also been provided. + +Any classes subclassing `tools.base.checker.Checker` should provide a tuple of `__class__.checks`. + +For each named check in `checks` the `Checker` will expect a method of the same name with the prefix `check_`. + +For example, setting `checks` to the tuple `("check1", "check2")` the `Checker` will run the methods `check_check1` and `check_check2` in order. + +Let's look at an example. + +First, we need to add the bazel target. + +Edit `tools/sometools/BUILD` and add a `tools.sometools.mychecker` target with a dependency on the base `Checker`. + +```starlark +envoy_py_binary( + name = "tools.sometools.mychecker", + deps = [ + "//tools/base:checker", + ], +) +``` + +Next add the `MyChecker` class to `tools/sometools/mychecker.py` as follows: + +```python +#!/usr/bin/env python3 + +import sys + +from tools.base.checker import Checker + + +class MyChecker(Checker): + checks = ("check1", "check2") + + def check_check1(self) -> None: + # checking code for check1 + try: + do_something() + except NotSuchABadError: + self.warn("check1", ["Doing something didn't work out quite as expected 8/"]) + except ATerribleError: + self.error("check1", ["Oh noes, something went badly wrong! 8("]) + else: + self.success("check1", ["All good 8)"]) + + def check_check2(self) -> None: + # checking code for check2 + try: + do_something_else() + except NotSuchABadError: + self.warn("check2", ["Doing something else didn't work out quite as expected 8/"]) + except ATerribleError: + self.error("check2", ["Oh noes, something else went badly wrong! 8("]) + else: + self.success("check2", ["All good 8)"]) + + +def main(*args) -> int: + return MyChecker(*args).run() + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) +``` + +Just like with the `Runner` class described [above](#using-the-toolsbaserunnerrunner-class) you can +user both with and without `bazel`. To use without, you will need make it executable, and the end +user will need to have any dependencies locally installed. + +Notice in the check methods the results of the check are logged to one of `self.error`, `self.warn`, +or `self.success`. Each takes a `list` of outcomes. The results will be summarized to the user at the +end of all checks. From 7b13cd7c6f8c147d18cb5c043e237593e4351913 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Wed, 14 Apr 2021 15:24:44 +0100 Subject: [PATCH 07/13] readme Signed-off-by: Ryan Northey --- tools/README.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/README.md b/tools/README.md index 7afd311a4bf2..081238d36fcd 100644 --- a/tools/README.md +++ b/tools/README.md @@ -433,7 +433,7 @@ class MyChecker(Checker): except ATerribleError: self.error("check1", ["Oh noes, something went badly wrong! 8("]) else: - self.success("check1", ["All good 8)"]) + self.succeed("check1", ["All good 8)"]) def check_check2(self) -> None: # checking code for check2 @@ -444,7 +444,7 @@ class MyChecker(Checker): except ATerribleError: self.error("check2", ["Oh noes, something else went badly wrong! 8("]) else: - self.success("check2", ["All good 8)"]) + self.succeed("check2", ["All good 8)"]) def main(*args) -> int: @@ -460,5 +460,12 @@ user both with and without `bazel`. To use without, you will need make it execut user will need to have any dependencies locally installed. Notice in the check methods the results of the check are logged to one of `self.error`, `self.warn`, -or `self.success`. Each takes a `list` of outcomes. The results will be summarized to the user at the +or `self.succeed`. Each takes a `list` of outcomes. The results will be summarized to the user at the end of all checks. + +Just like with `Runner` a help menu is automatically created, and you can add custom arguments if +required. + +One key difference with the `Checker` tools and its derivatives is that it expects a `path` either specified +with `--path` or as an argument. This is used as a context (for example the Envoy src directory) for +running the checks. From 4dee42178ffa52c8ee32229496e7b935e5e3cca2 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Wed, 14 Apr 2021 15:32:33 +0100 Subject: [PATCH 08/13] readme Signed-off-by: Ryan Northey --- tools/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/README.md b/tools/README.md index 081238d36fcd..f0781ec027d0 100644 --- a/tools/README.md +++ b/tools/README.md @@ -466,6 +466,9 @@ end of all checks. Just like with `Runner` a help menu is automatically created, and you can add custom arguments if required. +Also like `Runner`, any added `Checker` classes are expected to have unit tests, and a `pytest_mychecker` bazel target +is automatically added. With the above example, the test file should be located at `tools/sometools/tests/test_mychecker.py`. + One key difference with the `Checker` tools and its derivatives is that it expects a `path` either specified with `--path` or as an argument. This is used as a context (for example the Envoy src directory) for running the checks. From 23fdbebc9e6e0e162284dcdd5fdca075fbcb6830 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Wed, 14 Apr 2021 15:42:31 +0100 Subject: [PATCH 09/13] docstring-cleanup Signed-off-by: Ryan Northey --- tools/base/checker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/base/checker.py b/tools/base/checker.py index c4a290ee2aaf..0c9c93fa69f1 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -8,7 +8,8 @@ class Checker(runner.Runner): """Runs check methods prefixed with `check_` and named in `self.checks` - check methods should return the count of errors and log warnings and errors + Check methods should call the `self.warn`, `self.error` or `self.succeed` + depending upon the outcome of the checks. """ checks = () From 4a4a3cd2d62a642d05547753a2504f56987a5b55 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Thu, 15 Apr 2021 11:46:02 +0100 Subject: [PATCH 10/13] mypy-fixes Signed-off-by: Ryan Northey --- tools/base/checker.py | 13 +++++++------ tools/base/runner.py | 2 +- tools/base/utils.py | 9 ++++++--- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/tools/base/checker.py b/tools/base/checker.py index 0c9c93fa69f1..2462ac8e0667 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -1,6 +1,7 @@ import argparse import os from functools import cached_property +from typing import Optional, Sequence, Tuple, Type from tools.base import runner @@ -11,7 +12,7 @@ class Checker(runner.Runner): Check methods should call the `self.warn`, `self.error` or `self.succeed` depending upon the outcome of the checks. """ - checks = () + checks: Tuple[str, ...] = () def __init__(self, *args): super().__init__(*args) @@ -43,7 +44,7 @@ def fix(self) -> bool: def has_failed(self) -> bool: """Shows whether there are any failures""" # add logic for warn/error - return self.failed or self.warned + return bool(self.failed or self.warned) @cached_property def path(self) -> str: @@ -96,12 +97,12 @@ def success_count(self) -> int: return sum(len(e) for e in self.success.values()) @cached_property - def summary(self) -> bool: + def summary(self) -> CheckerSummary: """Instance of the checker's summary class""" return self.summary_class(self) @property - def summary_class(self): + def summary_class(self) -> Type[CheckerSummary]: """Checker's summary class""" return CheckerSummary @@ -177,7 +178,7 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: "Paths to check. At least one path must be specified, or the `path` argument should be provided" ) - def error(self, name: str, errors: list, log: bool = True) -> None: + def error(self, name: str, errors: list, log: bool = True) -> int: """Record (and log) errors for a check type""" self.errors[name] = self.errors.get(name, []) self.errors[name].extend(errors) @@ -185,7 +186,7 @@ def error(self, name: str, errors: list, log: bool = True) -> None: self.log.error("\n".join(errors)) return 1 - def get_checks(self) -> list: + def get_checks(self) -> Sequence[str]: """Get list of checks for this checker class filtered according to user args""" return ( self.checks if not self.args.check else diff --git a/tools/base/runner.py b/tools/base/runner.py index 285718643e06..be1bf1e8bec7 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -54,7 +54,7 @@ def log_level(self) -> int: return dict(LOG_LEVELS)[self.args.log_level] @property - def name(self) -> bool: + def name(self) -> str: """Name of the runner""" return self.__class__.__name__ diff --git a/tools/base/utils.py b/tools/base/utils.py index 3ce0294218ec..68c3c57a927c 100644 --- a/tools/base/utils.py +++ b/tools/base/utils.py @@ -7,7 +7,7 @@ import tempfile from configparser import ConfigParser from contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout -from typing import Iterator +from typing import Callable, Iterator, List, Optional, Union # this is testing specific - consider moving to tools.testing.utils @@ -40,7 +40,10 @@ def nested(*contexts): @contextmanager -def buffered(stdout: list = None, stderr: list = None, mangle=None) -> None: +def buffered( + stdout: list = None, + stderr: list = None, + mangle: Optional[Callable[[list], list]] = None) -> Iterator[None]: """Captures stdout and stderr and feeds lines to supplied lists""" mangle = mangle or (lambda lines: lines) @@ -48,7 +51,7 @@ def buffered(stdout: list = None, stderr: list = None, mangle=None) -> None: if stdout is None and stderr is None: raise BufferUtilError("You must specify stdout and/or stderr") - contexts = [] + contexts: List[Union[redirect_stderr[io.StringIO], redirect_stdout[io.StringIO]]] = [] if stdout is not None: _stdout = io.StringIO() From 43f8961d94d5e0a886ae3f94645cde3ca8bd3d29 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Thu, 15 Apr 2021 11:48:27 +0100 Subject: [PATCH 11/13] bool-equality Signed-off-by: Ryan Northey --- tools/base/tests/test_checker.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index a128f182644d..1a421e5b77f8 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -88,9 +88,9 @@ def test_checker_has_failed(patches, failed, warned): result = checker.has_failed if failed or warned: - assert result == True + assert result is True else: - assert result == False + assert result is False assert "has_failed" not in checker.__dict__ @@ -181,9 +181,9 @@ def test_checker_show_summary(patches, summary, error_count, warning_count): result = checker.show_summary if summary or error_count or warning_count: - assert result == True + assert result is True else: - assert result == False + assert result is False assert "show_summary" not in checker.__dict__ From 3d1f3985699324074acd5f7c49be959f64fcfbd9 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Thu, 15 Apr 2021 12:05:06 +0100 Subject: [PATCH 12/13] cleanups Signed-off-by: Ryan Northey --- tools/base/checker.py | 15 ++------------- tools/base/tests/test_checker.py | 15 --------------- tools/code_format/python_check.py | 13 ++++++++++++- tools/code_format/tests/test_python_check.py | 17 ++++++++++++++++- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/tools/base/checker.py b/tools/base/checker.py index 2462ac8e0667..da5e765ec6e4 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -65,11 +65,6 @@ def paths(self) -> list: """List of paths to apply checks to""" return self.args.paths or [self.path] - @property - def recurse(self) -> bool: - """Flag to determine whether to apply checks recursively (where appropriate)""" - return self.args.recurse - @property def show_summary(self) -> bool: """Show a summary at the end or not""" @@ -97,12 +92,12 @@ def success_count(self) -> int: return sum(len(e) for e in self.success.values()) @cached_property - def summary(self) -> CheckerSummary: + def summary(self) -> "CheckerSummary": """Instance of the checker's summary class""" return self.summary_class(self) @property - def summary_class(self) -> Type[CheckerSummary]: + def summary_class(self) -> Type["CheckerSummary"]: """Checker's summary class""" return CheckerSummary @@ -125,12 +120,6 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: action="store_true", default=False, help="Display a diff in the console where available") - parser.add_argument( - "--recurse", - "-r", - choices=["yes", "no"], - default="yes", - help="Recurse path or paths directories") parser.add_argument( "--warning", "-w", diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index 1a421e5b77f8..6f3d2e63e787 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -152,17 +152,6 @@ def test_checker_paths(patches, paths): assert "paths" not in checker.__dict__ -def test_checker_recurse(): - checker = Checker("path1", "path2", "path3") - args_mock = patch( - "tools.base.checker.Checker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - assert checker.recurse == m_args.return_value.recurse - assert "recurse" not in checker.__dict__ - - @pytest.mark.parametrize("summary", [True, False]) @pytest.mark.parametrize("error_count", [0, 1]) @pytest.mark.parametrize("warning_count", [0, 1]) @@ -280,10 +269,6 @@ def test_checker_add_arguments(): {'action': 'store_true', 'default': False, 'help': 'Display a diff in the console where available'}], - [('--recurse', '-r'), - {'choices': ['yes', 'no'], - 'default': 'yes', - 'help': 'Recurse path or paths directories'}], [('--warning', '-w'), {'choices': ['warn', 'error'], 'default': 'warn', diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py index 40d0cfd7faa7..d84ab1a42eeb 100755 --- a/tools/code_format/python_check.py +++ b/tools/code_format/python_check.py @@ -51,6 +51,11 @@ def flake8_args(self) -> list: def flake8_config_path(self) -> str: return os.path.join(self.path, FLAKE8_CONFIG) + @property + def recurse(self) -> bool: + """Flag to determine whether to apply checks recursively""" + return self.args.recurse + @property def yapf_config_path(self) -> str: return os.path.join(self.path, YAPF_CONFIG) @@ -59,11 +64,17 @@ def yapf_config_path(self) -> str: def yapf_files(self): return yapf.file_resources.GetCommandLineFiles( self.args.paths, - recursive=self.args.recurse, + recursive=self.recurse, exclude=yapf.file_resources.GetExcludePatternsForDir(self.path)) def add_arguments(self, parser) -> None: super().add_arguments(parser) + parser.add_argument( + "--recurse", + "-r", + choices=["yes", "no"], + default="yes", + help="Recurse path or paths directories") parser.add_argument( "--diff-file", default=None, help="Specify the path to a diff file with fixes") diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py index f9f0c2d90a11..b74b893f7d8f 100644 --- a/tools/code_format/tests/test_python_check.py +++ b/tools/code_format/tests/test_python_check.py @@ -118,7 +118,11 @@ def test_python_add_arguments(patches): == [(m_parser,), {}]) assert ( list(list(c) for c in m_parser.add_argument.call_args_list) - == [[('--diff-file',), + == [[('--recurse', '-r'), + {'choices': ['yes', 'no'], + 'default': 'yes', + 'help': 'Recurse path or paths directories'}], + [('--diff-file',), {'default': None, 'help': 'Specify the path to a diff file with fixes'}]]) @@ -160,6 +164,17 @@ def mock_buffered(stdout=None, mangle=None): assert not m_error.called +def test_python_check_recurse(): + checker = python_check.PythonChecker("path1", "path2", "path3") + args_mock = patch( + "tools.code_format.python_check.PythonChecker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.recurse == m_args.return_value.recurse + assert "recurse" not in checker.__dict__ + + def test_python_check_yapf(patches): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( From 82292fa89684ba88f987cdb81ff106357d6fdd93 Mon Sep 17 00:00:00 2001 From: Ryan Northey Date: Thu, 15 Apr 2021 12:10:19 +0100 Subject: [PATCH 13/13] cleanups Signed-off-by: Ryan Northey --- tools/README.md | 2 +- tools/base/checker.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/README.md b/tools/README.md index f0781ec027d0..77244a0ced46 100644 --- a/tools/README.md +++ b/tools/README.md @@ -456,7 +456,7 @@ if __name__ == "__main__": ``` Just like with the `Runner` class described [above](#using-the-toolsbaserunnerrunner-class) you can -user both with and without `bazel`. To use without, you will need make it executable, and the end +use both with and without `bazel`. To use without, you will need make it executable, and the end user will need to have any dependencies locally installed. Notice in the check methods the results of the check are logged to one of `self.error`, `self.warn`, diff --git a/tools/base/checker.py b/tools/base/checker.py index da5e765ec6e4..4bdef7a5632b 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -1,7 +1,7 @@ import argparse import os from functools import cached_property -from typing import Optional, Sequence, Tuple, Type +from typing import Sequence, Tuple, Type from tools.base import runner