diff --git a/.gitignore b/.gitignore index ae1f29656b59..94d0778d9287 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ cmake-build-debug /linux bazel.output.txt *~ +.coverage diff --git a/.yapfignore b/.yapfignore new file mode 100644 index 000000000000..0409026f278a --- /dev/null +++ b/.yapfignore @@ -0,0 +1,7 @@ +*generated* +*venv* +*protos* +*~ +*_pb2.py +*tests* +*#* diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 9b4274271d3a..bda3919952dd 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -6,13 +6,6 @@ load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_fetch_ def _python_deps(): py_repositories() - # REMOVE!!! - pip_install( - name = "sometools_pip3", - requirements = "@envoy//tools/sometools:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) - pip_install( name = "config_validation_pip3", requirements = "@envoy//tools/config_validation:requirements.txt", diff --git a/ci/do_ci.sh b/ci/do_ci.sh index c9421a0c90ae..f906a04bbd7b 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -7,9 +7,9 @@ set -e build_setup_args="" if [[ "$1" == "format_pre" || "$1" == "fix_format" || "$1" == "check_format" || "$1" == "check_repositories" || \ - "$1" == "check_spelling" || "$1" == "fix_spelling" || "$1" == "bazel.clang_tidy" || \ - "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" ]]; then - build_setup_args="-nofetch" + "$1" == "check_spelling" || "$1" == "fix_spelling" || "$1" == "bazel.clang_tidy" || "$1" == "tooling" || \ + "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" ]]; then + build_setup_args="-nofetch" fi # TODO(phlax): Clarify and/or integrate SRCDIR and ENVOY_SRCDIR @@ -470,8 +470,10 @@ elif [[ "$CI_TARGET" == "tooling" ]]; then echo "Run pytest tooling tests..." bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_pytest -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_coverage -- --cov-collect /tmp/.coverage-envoy + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_checker -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_runner -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_utils -- --cov-collect /tmp/.coverage-envoy + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:pytest_python_check -- --cov-collect /tmp/.coverage-envoy bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:python_coverage -- --fail-under=95 /tmp/.coverage-envoy /source/generated/tooling exit 0 elif [[ "$CI_TARGET" == "verify_examples" ]]; then diff --git a/ci/format_pre.sh b/ci/format_pre.sh index 86ae89d484e2..92df44517097 100755 --- a/ci/format_pre.sh +++ b/ci/format_pre.sh @@ -43,18 +43,8 @@ CURRENT=shellcheck CURRENT=configs bazel run "${BAZEL_BUILD_OPTIONS[@]}" //configs:example_configs_validation -# TODO(phlax): update to use bazel and python_flake8/python_check -# this will simplify this code to a single line CURRENT=python -"${ENVOY_SRCDIR}"/tools/code_format/format_python_tools.sh check || { - "${ENVOY_SRCDIR}"/tools/code_format/format_python_tools.sh fix - git diff HEAD | tee "${DIFF_OUTPUT}" - raise () { - # this throws an error without exiting - return 1 - } - raise -} +bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix "$(pwd)" if [[ "${#FAILED[@]}" -ne "0" ]]; then echo "TESTS FAILED:" >&2 diff --git a/tools/base/BUILD b/tools/base/BUILD index c5656e230a90..3dff32bc6aa0 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -5,6 +5,8 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_py_library("tools.base.checker") + envoy_py_library("tools.base.runner") envoy_py_library("tools.base.utils") diff --git a/tools/base/checker.py b/tools/base/checker.py new file mode 100644 index 000000000000..03f78f4e6eba --- /dev/null +++ b/tools/base/checker.py @@ -0,0 +1,361 @@ +import argparse +import logging +import os +import subprocess +import sys +from functools import cached_property + +LOG_LEVELS = (("info", logging.INFO), ("debug", logging.DEBUG), ("warn", logging.WARN), + ("error", logging.ERROR)) + + +class BazelRunError(Exception): + pass + + +class LogFilter(logging.Filter): + + def filter(self, rec): + return rec.levelno in (logging.DEBUG, logging.INFO) + + +class Checker(object): + """Runs check methods prefixed with `check_` and named in `self.checks` + + check methods should return the count of errors and log warnings and errors + """ + checks = () + + def __init__(self, *args): + self._args = args + self.success = {} + self.errors = {} + self.warnings = {} + + @cached_property + def args(self) -> argparse.Namespace: + """Parsed args""" + return self.parser.parse_args(self._args) + + @property + def diff(self) -> bool: + """Flag to determine whether the checker should print diffs to the console""" + return self.args.diff + + @property + def error_count(self) -> int: + """Count of all errors found""" + return sum(len(e) for e in self.errors.values()) + + @property + def failed(self) -> dict: + """Dictionary of errors per check""" + return dict((k, (len(v))) for k, v in self.errors.items()) + + @property + def fix(self) -> bool: + """Flag to determine whether the checker should attempt to fix found problems""" + return self.args.fix + + @property + def has_failed(self) -> bool: + """Shows whether there are any failures""" + # add logic for warn/error + return self.failed or self.warned + + @cached_property + def log(self) -> logging.Logger: + """Instantiated logger""" + logger = logging.getLogger(self.name) + logger.setLevel(self.log_level) + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setLevel(logging.DEBUG) + stdout_handler.addFilter(LogFilter()) + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setLevel(logging.WARN) + logger.addHandler(stdout_handler) + logger.addHandler(stderr_handler) + return logger + + @cached_property + def log_level(self) -> int: + """Log level parsed from args""" + return dict(LOG_LEVELS)[self.args.log_level] + + @property + def name(self) -> bool: + """Name of the checker""" + return self.__class__.__name__ + + @cached_property + def parser(self) -> argparse.ArgumentParser: + """Argparse parser""" + parser = argparse.ArgumentParser() + self.add_arguments(parser) + return parser + + @cached_property + def path(self) -> str: + """The "path" - usually Envoy src dir. This is used for finding configs for the tooling and should be a dir""" + try: + path = self.args.path or self.args.paths[0] + except IndexError: + raise self.parser.error( + "Missing path: `path` must be set either as an arg or with --path") + if not os.path.isdir(path): + raise self.parser.error( + "Incorrect path: `path` must be a directory, set either as first arg or with --path" + ) + return path + + @property + def paths(self) -> list: + """List of paths to apply checks to""" + return self.args.paths or [self.path] + + @property + def recurse(self) -> bool: + """Flag to determine whether to apply checks recursively (where appropriate)""" + return self.args.recurse + + @property + def show_summary(self) -> bool: + """Show a summary at the end or not""" + return bool(self.args.summary or self.error_count or self.warning_count) + + @property + def status(self) -> dict: + """Dictionary showing current success/warnings/errors""" + return dict( + success=self.success_count, + errors=self.error_count, + warnings=self.warning_count, + failed=self.failed, + warned=self.warned, + succeeded=self.succeeded) + + @property + def succeeded(self) -> dict: + """Dictionary of successful checks grouped by check type""" + return dict((k, (len(v))) for k, v in self.success.items()) + + @property + def success_count(self) -> int: + """Current count of successful checks""" + return sum(len(e) for e in self.success.values()) + + @cached_property + def summary(self) -> bool: + """Instance of the checker's summary class""" + return self.summary_class(self) + + @property + def summary_class(self): + """Checker's summary class""" + return CheckerSummary + + @property + def warned(self) -> dict: + """Dictionary of warned checks grouped by check type""" + return dict((k, (len(v))) for k, v in self.warnings.items()) + + @property + def warning_count(self) -> int: + """Current count of warned checks""" + return sum(len(e) for e in self.warnings.values()) + + def add_arguments(self, parser: argparse.ArgumentParser) -> None: + """Add arguments to the arg parser""" + parser.add_argument( + "--fix", action="store_true", default=False, help="Attempt to fix in place") + parser.add_argument( + "--diff", + action="store_true", + default=False, + help="Display a diff in the console where available") + parser.add_argument( + "--recurse", + "-r", + choices=["yes", "no"], + default="yes", + help="Recurse path or paths directories") + parser.add_argument( + "--warning", + "-w", + choices=["warn", "error"], + default="warn", + help="Handle warnings as warnings or errors") + parser.add_argument( + "--summary", action="store_true", default=False, help="Show a summary of check runs") + parser.add_argument( + "--summary-errors", + type=int, + default=5, + help="Number of errors to show in the summary, -1 shows all") + parser.add_argument( + "--summary-warnings", + type=int, + default=5, + help="Number of warnings to show in the summary, -1 shows all") + parser.add_argument( + "--check", + "-c", + choices=self.checks, + nargs="*", + help="Specify which checks to run, can be specified for multiple checks") + for check in self.checks: + parser.add_argument( + f"--config-{check}", default="", help=f"Custom configuration for the {check} check") + parser.add_argument( + "--path", + "-p", + default=None, + help= + "Path to the test root (usually Envoy source dir). If not specified the first path of paths is used" + ) + parser.add_argument( + "--log-level", + "-l", + choices=["info", "warn", "debug", "error"], + default="info", + help="Log level to display") + parser.add_argument( + "paths", + nargs="*", + help= + "Paths to check. At least one path must be specified, or the `path` argument should be provided" + ) + + def error(self, name: str, errors: list, log: bool = True) -> None: + """Record (and log) errors for a check type""" + self.errors[name] = self.errors.get(name, []) + self.errors[name].extend(errors) + if log: + self.log.error("\n".join(errors)) + return 1 + + def get_checks(self) -> list: + """Get list of checks for this checker class filtered according to user args""" + return ( + self.checks if not self.args.check else + [check for check in self.args.check if check in self.checks]) + + def on_check_run(self, check: str) -> None: + """Callback hook called after each check run""" + pass + + def on_checks_begin(self) -> None: + """Callback hook called before all checks""" + pass + + def on_checks_complete(self) -> int: + """Callback hook called after all checks have run, and returning the final outcome of a checks_run""" + if self.show_summary: + self.summary.print_summary() + return 1 if self.has_failed else 0 + + def run_checks(self) -> int: + """Run all configured checks and return the sum of their error counts""" + checks = self.get_checks() + self.on_checks_begin() + for check in checks: + self.log.info(f"[CHECKS:{self.name}] {check}") + getattr(self, f"check_{check}")() + self.on_check_run(check) + return self.on_checks_complete() + + def succeed(self, name: str, success: list, log: bool = True) -> None: + """Record (and log) success for a check type""" + + self.success[name] = self.success.get(name, []) + self.success[name].extend(success) + if log: + self.log.info("\n".join(success)) + + def warn(self, name: str, warnings: list, log: bool = True) -> None: + """Record (and log) warnings for a check type""" + + self.warnings[name] = self.warnings.get(name, []) + self.warnings[name].extend(warnings) + if log: + self.log.warning("\n".join(warnings)) + + +class ForkingChecker(Checker): + + def fork(self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess: + """Fork a subprocess, using self.path as the cwd by default""" + kwargs["cwd"] = kwargs.get("cwd", self.path) + return subprocess.run(*args, capture_output=capture_output, **kwargs) + + +class BazelChecker(ForkingChecker): + + def bazel_query(self, query: str) -> list: + """Run a bazel query and return stdout as list of lines""" + resp = self.fork(["bazel", "query", f"'{query}'"]) + if resp.returncode: + raise BazelRunError(f"Bazel query failed: {resp}") + return resp.stdout.decode("utf-8").split("\n") + + def bazel_run( + self, + target: str, + *args, + capture_output: bool = False, + cwd: str = "", + raises: bool = True) -> subprocess.CompletedProcess: + """Run a bazel target and return the subprocess response""" + args = (("--",) + args) if args else args + bazel_args = ("bazel", "run", target) + args + resp = self.fork(bazel_args, capture_output=capture_output, cwd=cwd or self.path) + if resp.returncode and raises: + raise BazelRunError(f"Bazel run failed: {resp}") + return resp + + +class CheckerSummary(object): + + def __init__(self, checker: Checker): + self.checker = checker + + @property + def max_errors(self) -> int: + """Maximum errors to display in summary""" + return self.checker.args.summary_errors + + @property + def max_warnings(self) -> int: + """Maximum warnings to display in summary""" + return self.checker.args.summary_warnings + + def print_failed(self, problem_type): + _out = [] + _max = getattr(self, f"max_{problem_type}") + for check, problems in getattr(self.checker, problem_type).items(): + _msg = f"[{problem_type.upper()}:{self.checker.name}] {check}" + _max = (min(len(problems), _max) if _max >= 0 else len(problems)) + msg = ( + f"{_msg}: (showing first {_max} of {len(problems)})" if + (len(problems) > _max and _max > 0) else (f"{_msg}:" if _max != 0 else _msg)) + _out.extend(self._section(msg, problems[:_max])) + if _out: + self.checker.log.error("\n".join(_out)) + + def print_status(self) -> None: + """Print summary status to stderr""" + self.checker.log.warning( + "\n".join(self._section(f"[SUMMARY:{self.checker.name}] {self.checker.status}"))) + + def print_summary(self) -> None: + """Write summary to stderr""" + self.print_failed("warnings") + self.print_failed("errors") + self.print_status() + + def _section(self, message: str, lines: list = None) -> list: + """Print a summary section""" + section = ["", "-" * 80, "", f"{message}"] + if lines: + section += lines + return section diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py new file mode 100644 index 000000000000..fd6160f55e6d --- /dev/null +++ b/tools/base/tests/test_checker.py @@ -0,0 +1,786 @@ +import logging +import sys +from unittest.mock import MagicMock, patch, PropertyMock + +import pytest + +from tools.base import pytest_checker +from tools.base.checker import ( + Checker, CheckerSummary, ForkingChecker, + LogFilter, LOG_LEVELS) + + +class DummyChecker(Checker): + + def __init__(self): + self.args = PropertyMock() + + +class DummyCheckerWithChecks(Checker): + checks = ("check1", "check2") + + def __init__(self, *args): + self.check1 = MagicMock() + self.check2 = MagicMock() + + def check_check1(self): + self.check1() + + def check_check2(self): + self.check2() + + +def test_pytest_checker(): + with patch("tools.base.pytest_checker.python_pytest") as m_pytest: + pytest_checker.main("arg1", "arg2", "arg3") + + assert ( + list(m_pytest.main.call_args) + == [('arg1', 'arg2', 'arg3', '--cov', 'tools.base'), {}]) + + +def test_checker_constructor(): + checker = Checker("path1", "path2", "path3") + assert checker._args == ("path1", "path2", "path3") + assert checker.summary_class == CheckerSummary + + +def test_checker_args(): + checker = Checker("path1", "path2", "path3") + parser_mock = patch( + "tools.base.checker.Checker.parser", + new_callable=PropertyMock) + + with parser_mock as m_parser: + assert checker.args == m_parser.return_value.parse_args.return_value + + assert ( + list(m_parser.return_value.parse_args.call_args) + == [(('path1', 'path2', 'path3'),), {}]) + assert "args" in checker.__dict__ + + +def test_checker_diff(): + checker = Checker("path1", "path2", "path3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.diff == m_args.return_value.diff + assert "diff" not in checker.__dict__ + + +def test_checker_error_count(): + checker = Checker("path1", "path2", "path3") + checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) + assert checker.error_count == 15 + assert "error_count" not in checker.__dict__ + + +def test_checker_failed(): + checker = Checker("path1", "path2", "path3") + checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) + assert checker.failed == {'foo': 3, 'bar': 5, 'baz': 7} + assert "failed" not in checker.__dict__ + + +def test_checker_fix(): + checker = Checker("path1", "path2", "path3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.fix == m_args.return_value.fix + assert "fix" not in checker.__dict__ + + +@pytest.mark.parametrize("failed", [True, False]) +@pytest.mark.parametrize("warned", [True, False]) +def test_checker_has_failed(patches, failed, warned): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.failed", dict(new_callable=PropertyMock)), + ("Checker.warned", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_failed, m_warned): + m_failed.return_value = failed + m_warned.return_value = warned + result = checker.has_failed + + if failed or warned: + assert result == True + else: + assert result == False + assert "has_failed" not in checker.__dict__ + + +def test_checker_log(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + "logging.getLogger", + "logging.StreamHandler", + "LogFilter", + ("Checker.log_level", dict(new_callable=PropertyMock)), + ("Checker.name", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_logger, m_stream, m_filter, m_level, m_name): + _loggers = (MagicMock(), MagicMock()) + m_stream.side_effect = _loggers + assert checker.log == m_logger.return_value + assert ( + list(m_logger.return_value.setLevel.call_args) + == [(m_level.return_value,), {}]) + assert ( + list(list(c) for c in m_stream.call_args_list) + == [[(sys.stdout,), {}], + [(sys.stderr,), {}]]) + assert ( + list(_loggers[0].setLevel.call_args) + == [(logging.DEBUG,), {}]) + assert ( + list(_loggers[0].addFilter.call_args) + == [(m_filter.return_value,), {}]) + assert ( + list(_loggers[1].setLevel.call_args) + == [(logging.WARN,), {}]) + assert ( + list(list(c) for c in m_logger.return_value.addHandler.call_args_list) + == [[(_loggers[0],), {}], [(_loggers[1],), {}]]) + assert "log" in checker.__dict__ + + +def test_checker_log_level(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + "dict", + ("Checker.args", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + with patched as (m_dict, m_args): + assert checker.log_level == m_dict.return_value.__getitem__.return_value + + assert ( + list(m_dict.call_args) + == [(LOG_LEVELS, ), {}]) + assert ( + list(m_dict.return_value.__getitem__.call_args) + == [(m_args.return_value.log_level,), {}]) + assert "log_level" in checker.__dict__ + + +def test_checker_name(): + checker = DummyChecker() + assert checker.name == checker.__class__.__name__ + assert "name" not in checker.__dict__ + + +def test_checker_parser(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + "argparse.ArgumentParser", + "Checker.add_arguments", + prefix="tools.base.checker") + with patched as (m_parser, m_add_args): + assert checker.parser == m_parser.return_value + assert ( + list(m_parser.call_args) + == [(), {}]) + assert ( + list(m_add_args.call_args) + == [(m_parser.return_value,), {}]) + assert "parser" in checker.__dict__ + + +@pytest.mark.parametrize("path", [None, "PATH"]) +@pytest.mark.parametrize("paths", [[], ["PATH0"]]) +@pytest.mark.parametrize("isdir", [True, False]) +def test_checker_path(patches, path, paths, isdir): + class DummyError(Exception): + pass + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.args", dict(new_callable=PropertyMock)), + ("Checker.parser", dict(new_callable=PropertyMock)), + "os.path.isdir", + prefix="tools.base.checker") + + with patched as (m_args, m_parser, m_isdir): + m_parser.return_value.error = DummyError + m_args.return_value.path = path + m_args.return_value.paths = paths + m_isdir.return_value = isdir + if not path and not paths: + with pytest.raises(DummyError) as e: + checker.path + assert ( + e.value.args + == ('Missing path: `path` must be set either as an arg or with --path',)) + elif not isdir: + with pytest.raises(DummyError) as e: + checker.path + assert ( + e.value.args + == ('Incorrect path: `path` must be a directory, set either as first arg or with --path',)) + else: + assert checker.path == path or paths[0] + assert "path" in checker.__dict__ + if path or paths: + assert ( + list(m_isdir.call_args) + == [(path or paths[0],), {}]) + + +@pytest.mark.parametrize("paths", [[], ["path1", "path2"]]) +def test_checker_paths(patches, paths): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.args", dict(new_callable=PropertyMock)), + ("Checker.path", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_args, m_path): + m_args.return_value.paths = paths + result = checker.paths + + if paths: + assert result == paths + else: + assert result == [m_path.return_value] + assert "paths" not in checker.__dict__ + + +def test_checker_recurse(): + checker = Checker("path1", "path2", "path3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.recurse == m_args.return_value.recurse + assert "recurse" not in checker.__dict__ + + +@pytest.mark.parametrize("summary", [True, False]) +@pytest.mark.parametrize("error_count", [0, 1]) +@pytest.mark.parametrize("warning_count", [0, 1]) +def test_checker_show_summary(patches, summary, error_count, warning_count): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.args", dict(new_callable=PropertyMock)), + ("Checker.error_count", dict(new_callable=PropertyMock)), + ("Checker.warning_count", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_args, m_errors, m_warnings): + m_args.return_value.summary = summary + m_errors.return_value = error_count + m_warnings.return_value = warning_count + result = checker.show_summary + + if summary or error_count or warning_count: + assert result == True + else: + assert result == False + assert "show_summary" not in checker.__dict__ + + +def test_checker_status(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.success_count", dict(new_callable=PropertyMock)), + ("Checker.error_count", dict(new_callable=PropertyMock)), + ("Checker.warning_count", dict(new_callable=PropertyMock)), + ("Checker.failed", dict(new_callable=PropertyMock)), + ("Checker.warned", dict(new_callable=PropertyMock)), + ("Checker.succeeded", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as args: + (m_success_count, m_error_count, m_warning_count, + m_failed, m_warned, m_succeeded) = args + assert ( + checker.status + == dict( + success=m_success_count.return_value, + errors=m_error_count.return_value, + warnings=m_warning_count.return_value, + failed=m_failed.return_value, + warned=m_warned.return_value, + succeeded=m_succeeded.return_value)) + assert "status" not in checker.__dict__ + + +def test_checker_succeeded(): + checker = Checker("path1", "path2", "path3") + checker.success = dict( + foo=["check"] * 3, + bar=["check"] * 5, + baz=["check"] * 7) + assert ( + checker.succeeded + == dict(foo=3, bar=5, baz=7)) + assert "succeeded" not in checker.__dict__ + + +def test_checker_success_count(): + checker = Checker("path1", "path2", "path3") + checker.success = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) + assert checker.success_count == 15 + assert "success_count" not in checker.__dict__ + + +def test_checker_summary(): + checker = Checker("path1", "path2", "path3") + summary_mock = patch( + "tools.base.checker.Checker.summary_class", + new_callable=PropertyMock) + + with summary_mock as m_summary: + assert checker.summary == m_summary.return_value.return_value + + assert ( + list(m_summary.return_value.call_args) + == [(checker,), {}]) + assert "summary" in checker.__dict__ + + +def test_checker_warned(): + checker = Checker("path1", "path2", "path3") + checker.warnings = dict( + foo=["check"] * 3, + bar=["check"] * 5, + baz=["check"] * 7) + assert ( + checker.warned + == dict(foo=3, bar=5, baz=7)) + assert "warned" not in checker.__dict__ + + +def test_checker_warning_count(): + checker = Checker("path1", "path2", "path3") + checker.warnings = dict(foo=["warn"] * 3, bar=["warn"] * 5, baz=["warn"] * 7) + assert checker.warning_count == 15 + assert "warning_count" not in checker.__dict__ + + +def test_checker_add_arguments(): + checker = DummyCheckerWithChecks("path1", "path2", "path3") + parser = MagicMock() + checker.add_arguments(parser) + assert ( + list(list(c) for c in parser.add_argument.call_args_list) + == [[('--fix',), + {'action': 'store_true', + 'default': False, + 'help': 'Attempt to fix in place'}], + [('--diff',), + {'action': 'store_true', + 'default': False, + 'help': 'Display a diff in the console where available'}], + [('--recurse', '-r'), + {'choices': ['yes', 'no'], + 'default': 'yes', + 'help': 'Recurse path or paths directories'}], + [('--warning', '-w'), + {'choices': ['warn', 'error'], + 'default': 'warn', + 'help': 'Handle warnings as warnings or errors'}], + [('--summary',), + {'action': 'store_true', + 'default': False, + 'help': 'Show a summary of check runs'}], + [('--summary-errors',), + {'type': int, + 'default': 5, + 'help': 'Number of errors to show in the summary, -1 shows all'}], + [('--summary-warnings',), + {'type': int, + 'default': 5, + 'help': 'Number of warnings to show in the summary, -1 shows all'}], + [('--check', '-c'), + {'choices': ("check1", "check2"), + 'nargs': '*', + 'help': 'Specify which checks to run, can be specified for multiple checks'}], + [('--config-check1',), + {'default': '', + 'help': 'Custom configuration for the check1 check'}], + [('--config-check2',), + {'default': '', + 'help': 'Custom configuration for the check2 check'}], + [('--path', '-p'), + {'default': None, + 'help': 'Path to the test root (usually Envoy source dir). If not specified the first path of paths is used'}], + [('--log-level', '-l'), + {'choices': ['info', 'warn', 'debug', 'error'], + 'default': 'info', 'help': 'Log level to display'}], + [('paths',), + {'nargs': '*', + 'help': 'Paths to check. At least one path must be specified, or the `path` argument should be provided'}]]) + + +TEST_ERRORS = ( + {}, + dict(myerror=[]), + dict(myerror=["a", "b", "c"]), + dict(othererror=["other1", "other2", "other3"]), + dict(othererror=["other1", "other2", "other3"], myerror=["a", "b", "c"])) + + +@pytest.mark.parametrize("log", [True, False]) +@pytest.mark.parametrize("errors", TEST_ERRORS) +def test_checker_error(patches, log, errors): + checker = Checker("path1", "path2", "path3") + log_mock = patch( + "tools.base.checker.Checker.log", + new_callable=PropertyMock) + checker.errors = errors.copy() + + with log_mock as m_log: + assert checker.error("mycheck", ["err1", "err2", "err3"], log) == 1 + + assert checker.errors["mycheck"] == errors.get("mycheck", []) + ["err1", "err2", "err3"] + for k, v in errors.items(): + if k != "mycheck": + assert checker.errors[k] == v + if log: + assert ( + list(m_log.return_value.error.call_args) + == [('err1\nerr2\nerr3',), {}]) + else: + assert not m_log.return_value.error.called + + +TEST_CHECKS = ( + None, + (), + ("check1", ), + ("check1", "check2", "check3"), + ("check3", "check4", "check5"), + ("check4", "check5")) + + +@pytest.mark.parametrize("checks", TEST_CHECKS) +def test_checker_get_checks(checks): + checker = Checker("path1", "path2", "path3") + checker.checks = ("check1", "check2", "check3") + args_mock = patch( + "tools.base.checker.Checker.args", + new_callable=PropertyMock) + + with args_mock as m_args: + m_args.return_value.check = checks + if checks: + assert ( + checker.get_checks() + == [check for check in checker.checks if check in checks or []]) + else: + assert checker.get_checks() == checker.checks + + +def test_checker_on_check_run(): + checker = Checker("path1", "path2", "path3") + assert not checker.on_check_run("checkname") + + +def test_checker_on_checks_begin(): + checker = Checker("path1", "path2", "path3") + assert checker.on_checks_begin() is None + + +@pytest.mark.parametrize("failed", [True, False]) +@pytest.mark.parametrize("show_summary", [True, False]) +def test_checker_on_checks_complete(patches, failed, show_summary): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.has_failed", dict(new_callable=PropertyMock)), + ("Checker.show_summary", dict(new_callable=PropertyMock)), + ("Checker.summary", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_failed, m_show_summary, m_summary): + m_failed.return_value = failed + m_show_summary.return_value = show_summary + assert checker.on_checks_complete() is (1 if failed else 0) + + if show_summary: + assert ( + list(m_summary.return_value.print_summary.call_args) + == [(), {}]) + else: + assert not m_summary.return_value.print_summary.called + + +def test_checker_run_checks(patches): + checker = DummyCheckerWithChecks("path1", "path2", "path3") + patched = patches( + "Checker.get_checks", + "Checker.on_checks_begin", + "Checker.on_checks_complete", + ("Checker.log", dict(new_callable=PropertyMock)), + ("Checker.name", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_get, m_begin, m_complete, m_log, m_name): + m_get.return_value = ("check1", "check2") + assert checker.run_checks() == m_complete.return_value + + assert ( + list(m_get.call_args) + == [(), {}]) + assert ( + list(m_begin.call_args) + == [(), {}]) + assert ( + list(m_complete.call_args) + == [(), {}]) + assert ( + list(list(c) for c in m_log.return_value.info.call_args_list) + == [[(f"[CHECKS:{m_name.return_value}] check1",), {}], + [(f"[CHECKS:{m_name.return_value}] check2",), {}]]) + assert ( + list(checker.check1.call_args) + == [(), {}]) + assert ( + list(checker.check2.call_args) + == [(), {}]) + + +TEST_WARNS = ( + {}, + dict(mywarn=[]), + dict(mywarn=["a", "b", "c"]), + dict(otherwarn=["other1", "other2", "other3"]), + dict(otherwarn=["other1", "other2", "other3"], mywarn=["a", "b", "c"])) + + +@pytest.mark.parametrize("log", [True, False]) +@pytest.mark.parametrize("warns", TEST_WARNS) +def test_checker_warn(patches, log, warns): + checker = Checker("path1", "path2", "path3") + log_mock = patch( + "tools.base.checker.Checker.log", + new_callable=PropertyMock) + checker.warnings = warns.copy() + + with log_mock as m_log: + checker.warn("mycheck", ["warn1", "warn2", "warn3"], log) + + assert checker.warnings["mycheck"] == warns.get("mycheck", []) + ["warn1", "warn2", "warn3"] + for k, v in warns.items(): + if k != "mycheck": + assert checker.warnings[k] == v + if log: + assert ( + list(m_log.return_value.warning.call_args) + == [('warn1\nwarn2\nwarn3',), {}]) + else: + assert not m_log.return_value.warn.called + + +TEST_SUCCESS = ( + {}, + dict(mysuccess=[]), + dict(mysuccess=["a", "b", "c"]), + dict(othersuccess=["other1", "other2", "other3"]), + dict(othersuccess=["other1", "other2", "other3"], mysuccess=["a", "b", "c"])) + + +@pytest.mark.parametrize("log", [True, False]) +@pytest.mark.parametrize("success", TEST_SUCCESS) +def test_checker_succeed(patches, log, success): + checker = Checker("path1", "path2", "path3") + log_mock = patch( + "tools.base.checker.Checker.log", + new_callable=PropertyMock) + checker.success = success.copy() + + with log_mock as m_log: + checker.succeed("mycheck", ["success1", "success2", "success3"], log) + + assert checker.success["mycheck"] == success.get("mycheck", []) + ["success1", "success2", "success3"] + for k, v in success.items(): + if k != "mycheck": + assert checker.success[k] == v + if log: + assert ( + list(m_log.return_value.info.call_args) + == [('success1\nsuccess2\nsuccess3',), {}]) + else: + assert not m_log.return_value.info.called + + +# ForkingChecker tests + +@pytest.mark.parametrize("args", [(), ("a", "b")]) +@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"]) +@pytest.mark.parametrize("capture_output", ["NONE", True, False]) +def test_forkingchecker_fork(patches, args, cwd, capture_output): + checker = ForkingChecker("path1", "path2", "path3") + patched = patches( + "subprocess.run", + ("Checker.path", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_run, m_path): + kwargs = {} + if cwd != "NONE": + kwargs["cwd"] = cwd + if capture_output != "NONE": + kwargs["capture_output"] = capture_output + assert checker.fork(*args, **kwargs) == m_run.return_value + + expected = {'capture_output': True, 'cwd': cwd} + if capture_output is False: + expected["capture_output"] = False + if cwd == "NONE": + expected["cwd"] = m_path.return_value + assert ( + list(m_run.call_args) + == [args, expected]) + + +# CheckerSummary tests + +def test_checker_summary_constructor(): + checker = DummyChecker() + summary = CheckerSummary(checker) + assert summary.checker == checker + + +@pytest.mark.parametrize("max_errors", [-1, 0, 1, 23]) +def test_checker_summary_max_errors(max_errors): + checker = DummyChecker() + summary = CheckerSummary(checker) + checker.args.summary_errors = max_errors + assert summary.max_errors == max_errors + + +@pytest.mark.parametrize("max_warnings", [-1, 0, 1, 23]) +def test_checker_summary_max_warnings(max_warnings): + checker = DummyChecker() + summary = CheckerSummary(checker) + checker.args.summary_warnings = max_warnings + assert summary.max_warnings == max_warnings + + +def test_checker_summary_print_summary(patches): + checker = DummyChecker() + summary = CheckerSummary(checker) + patched = patches( + "CheckerSummary.print_failed", + "CheckerSummary.print_status", + prefix="tools.base.checker") + + with patched as (m_failed, m_status): + summary.print_summary() + assert ( + list(list(c) for c in m_failed.call_args_list) + == [[('warnings',), {}], [('errors',), {}]]) + assert m_status.called + + +TEST_SECTIONS = ( + ("MSG1", ["a", "b", "c"]), + ("MSG2", []), + ("MSG3", None)) + + +@pytest.mark.parametrize("section", TEST_SECTIONS) +def test_checker_summary_section(section): + checker = DummyChecker() + summary = CheckerSummary(checker) + message, lines = section + expected = [ + "", + "-" * 80, + "", + f"{message}"] + if lines: + expected += lines + assert summary._section(message, lines) == expected + + +def test_checker_summary_print_status(patches): + checker = DummyChecker() + summary = CheckerSummary(checker) + patched = patches( + "CheckerSummary._section", + prefix="tools.base.checker") + + summary.checker = MagicMock() + with patched as (m_section, ): + m_section.return_value = ["A", "B", "C"] + summary.print_status() + assert ( + list(m_section.call_args) + == [(f"[SUMMARY:{summary.checker.name}] {summary.checker.status}",), {}]) + assert ( + list(summary.checker.log.warning.call_args) + == [('A\nB\nC',), {}]) + + +@pytest.mark.parametrize("problem_type", ("errors", "warnings")) +@pytest.mark.parametrize("max_display", (-1, 0, 1, 23)) +@pytest.mark.parametrize("problems", ({}, dict(foo=["problem1"]), dict(foo=["problem1", "problem2"], bar=["problem3", "problem4"]))) +def test_checker_summary_print_failed(patches, problem_type, max_display, problems): + checker = DummyChecker() + summary = CheckerSummary(checker) + patched = patches( + "CheckerSummary._section", + (f"CheckerSummary.max_{problem_type}", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_section, m_max): + summary.checker = MagicMock() + setattr(summary.checker, f"{problem_type}", problems) + m_max.return_value = max_display + m_section.return_value = ["A", "B", "C"] + summary.print_failed(problem_type) + + if not problems: + assert not summary.checker.log.error.called + assert not m_section.called + return + assert ( + list(summary.checker.log.error.call_args) + == [("\n".join(['A\nB\nC'] * len(problems)),), {}]) + if max_display == 0: + expected = [ + [(f"[{problem_type.upper()}:{summary.checker.name}] {prob}", []), {}] + for prob in problems] + else: + def _problems(prob): + return ( + problems[prob][:max_display] + if max_display > 0 + else problems[prob]) + def _extra(prob): + return ( + f": (showing first {max_display} of {len(problems)})" + if len(problems[prob]) > max_display and max_display >= 0 + else (":" + if max_display != 0 + else "")) + expected = [ + [(f"[{problem_type.upper()}:{summary.checker.name}] {prob}{_extra(prob)}", _problems(prob)), {}] + for prob in problems] + assert ( + list(list(c) for c in m_section.call_args_list) + == expected) + + +# LogFilter tests +@pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"]) +def test_checker_log_filter(level): + logfilter = LogFilter() + + class DummyRecord(object): + levelno = level + + if level in [logging.DEBUG, logging.INFO]: + assert logfilter.filter(DummyRecord()) + else: + assert not logfilter.filter(DummyRecord()) diff --git a/tools/base/tests/test_utils.py b/tools/base/tests/test_utils.py index 78f83a5ae999..4d48a9ac5d41 100644 --- a/tools/base/tests/test_utils.py +++ b/tools/base/tests/test_utils.py @@ -1,4 +1,8 @@ import importlib +import sys +from contextlib import contextmanager + +import pytest from tools.base import utils @@ -8,6 +12,76 @@ importlib.reload(utils) +def test_util_buffered_stdout(): + stdout = [] + + with utils.buffered(stdout=stdout): + print("test1") + print("test2") + sys.stdout.write("test3\n") + sys.stderr.write("error0\n") + + assert stdout == ["test1", "test2", "test3"] + + +def test_util_buffered_stderr(): + stderr = [] + + with utils.buffered(stderr=stderr): + print("test1") + print("test2") + sys.stdout.write("test3\n") + sys.stderr.write("error0\n") + sys.stderr.write("error1\n") + + assert stderr == ["error0", "error1"] + + +def test_util_buffered_stdout_stderr(): + stdout = [] + stderr = [] + + with utils.buffered(stdout=stdout, stderr=stderr): + print("test1") + print("test2") + sys.stdout.write("test3\n") + sys.stderr.write("error0\n") + sys.stderr.write("error1\n") + + assert stdout == ["test1", "test2", "test3"] + assert stderr == ["error0", "error1"] + + +def test_util_buffered_no_stdout_stderr(): + + with pytest.raises(utils.BufferUtilError): + with utils.buffered(): + pass + + +def test_util_nested(): + + fun1_args = [] + fun2_args = [] + + @contextmanager + def fun1(arg): + fun1_args.append(arg) + yield "FUN1" + + @contextmanager + def fun2(arg): + fun2_args.append(arg) + yield "FUN2" + + with utils.nested(fun1("A"), fun2("B")) as (fun1_yield, fun2_yield): + assert fun1_yield == "FUN1" + assert fun2_yield == "FUN2" + + assert fun1_args == ["A"] + assert fun2_args == ["B"] + + def test_util_coverage_with_data_file(patches): patched = patches( "ConfigParser", diff --git a/tools/base/utils.py b/tools/base/utils.py index 203910c37f41..3ce0294218ec 100644 --- a/tools/base/utils.py +++ b/tools/base/utils.py @@ -2,10 +2,11 @@ # Provides shared utils used by other python modules # +import io import os import tempfile from configparser import ConfigParser -from contextlib import contextmanager +from contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout from typing import Iterator @@ -26,3 +27,42 @@ def coverage_with_data_file(data_file: str) -> Iterator[str]: with open(tmprc, "w") as f: parser.write(f) yield tmprc + + +class BufferUtilError(Exception): + pass + + +@contextmanager +def nested(*contexts): + with ExitStack() as stack: + yield [stack.enter_context(context) for context in contexts] + + +@contextmanager +def buffered(stdout: list = None, stderr: list = None, mangle=None) -> None: + """Captures stdout and stderr and feeds lines to supplied lists""" + + mangle = mangle or (lambda lines: lines) + + if stdout is None and stderr is None: + raise BufferUtilError("You must specify stdout and/or stderr") + + contexts = [] + + if stdout is not None: + _stdout = io.StringIO() + contexts.append(redirect_stdout(_stdout)) + if stderr is not None: + _stderr = io.StringIO() + contexts.append(redirect_stderr(_stderr)) + + with nested(*contexts): + yield + + if stdout is not None: + _stdout.seek(0) + stdout.extend(mangle(_stdout.read().strip().split("\n"))) + if stderr is not None: + _stderr.seek(0) + stderr.extend(mangle(_stderr.read().strip().split("\n"))) diff --git a/tools/code_format/BUILD b/tools/code_format/BUILD index 78297275b0fe..95784d0ef412 100644 --- a/tools/code_format/BUILD +++ b/tools/code_format/BUILD @@ -1,6 +1,6 @@ -load("@rules_python//python:defs.bzl", "py_binary") load("@pylint_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") +load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -12,12 +12,13 @@ exports_files([ "envoy_build_fixer.py", ]) -py_binary( - name = "python_flake8", - srcs = ["python_flake8.py"], - visibility = ["//visibility:public"], +envoy_py_binary( + name = "tools.code_format.python_check", deps = [ + "//tools/base:checker", + "//tools/base:utils", requirement("flake8"), requirement("pep8-naming"), + requirement("yapf"), ], ) diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py new file mode 100755 index 000000000000..40d0cfd7faa7 --- /dev/null +++ b/tools/code_format/python_check.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 + +# usage +# +# with bazel: +# +# bazel run //tools/code_format:python_check -- -h +# +# alternatively, if you have the necessary python deps available +# +# ./tools/code_format/python_check.py -h +# +# python requires: flake8, yapf +# + +import os +import sys +from functools import cached_property + +from flake8.main.application import Application as Flake8Application + +import yapf + +from tools.base import checker, utils + +FLAKE8_CONFIG = '.flake8' +YAPF_CONFIG = '.style.yapf' + +# TODO(phlax): add checks for: +# - isort + + +class PythonChecker(checker.ForkingChecker): + checks = ("flake8", "yapf") + + @property + def diff_file_path(self) -> str: + return self.args.diff_file + + @cached_property + def flake8_app(self) -> Flake8Application: + flake8_app = Flake8Application() + flake8_app.initialize(self.flake8_args) + return flake8_app + + @property + def flake8_args(self) -> list: + return ["--config", self.flake8_config_path, self.path] + + @property + def flake8_config_path(self) -> str: + return os.path.join(self.path, FLAKE8_CONFIG) + + @property + def yapf_config_path(self) -> str: + return os.path.join(self.path, YAPF_CONFIG) + + @property + def yapf_files(self): + return yapf.file_resources.GetCommandLineFiles( + self.args.paths, + recursive=self.args.recurse, + exclude=yapf.file_resources.GetExcludePatternsForDir(self.path)) + + def add_arguments(self, parser) -> None: + super().add_arguments(parser) + parser.add_argument( + "--diff-file", default=None, help="Specify the path to a diff file with fixes") + + def check_flake8(self) -> None: + """Run flake8 on files and/or repo""" + errors = [] + with utils.buffered(stdout=errors, mangle=self._strip_lines): + self.flake8_app.run_checks() + self.flake8_app.report() + if errors: + self.error("flake8", errors) + + def check_yapf(self) -> None: + """Run flake8 on files and/or repo""" + for python_file in self.yapf_files: + self.yapf_run(python_file) + + def on_check_run(self, check: str) -> None: + if check not in self.failed and check not in self.warned: + self.succeed(check, [f"[CHECKS:{self.name}] {check}: success"]) + + def on_checks_complete(self) -> int: + if self.diff_file_path and self.has_failed: + result = self.fork(["git", "diff", "HEAD"]) + with open(self.diff_file_path, "wb") as f: + f.write(result.stdout) + return super().on_checks_complete() + + def yapf_format(self, python_file: str) -> tuple: + return yapf.yapf_api.FormatFile( + python_file, + style_config=self.yapf_config_path, + in_place=self.fix, + print_diff=not self.fix) + + def yapf_run(self, python_file: str) -> None: + reformatted_source, encoding, changed = self.yapf_format(python_file) + if not changed: + return self.succeed("yapf", [f"{python_file}: success"]) + if self.fix: + return self.warn("yapf", [f"{python_file}: reformatted"]) + if reformatted_source: + return self.warn("yapf", [reformatted_source]) + self.error("yapf", [python_file]) + + def _strip_line(self, line) -> str: + return line[len(self.path) + 1:] if line.startswith(f"{self.path}/") else line + + def _strip_lines(self, lines) -> list: + return [self._strip_line(line) for line in lines if line] + + +def main(*args: list) -> None: + return PythonChecker(*args).run_checks() + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) diff --git a/tools/code_format/python_flake8.py b/tools/code_format/python_flake8.py deleted file mode 100644 index 49ba8ced694d..000000000000 --- a/tools/code_format/python_flake8.py +++ /dev/null @@ -1,19 +0,0 @@ -import subprocess -import sys - -# explicitly use python3 linter -FLAKE8_COMMAND = ("python3", "-m", "flake8", ".") - - -def main(): - resp = subprocess.run(FLAKE8_COMMAND, capture_output=True, cwd=sys.argv[1]) - if resp.returncode: - # stdout and stderr are dumped to ensure we capture all errors - raise SystemExit( - "ERROR: flake8 linting failed: \n" - f"{resp.stdout.decode('utf-8')}\n" - f"{resp.stderr.decode('utf-8')}") - - -if __name__ == "__main__": - main() diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py new file mode 100644 index 000000000000..7ade3552ff54 --- /dev/null +++ b/tools/code_format/tests/test_python_check.py @@ -0,0 +1,377 @@ +from contextlib import contextmanager +from unittest.mock import patch, MagicMock, PropertyMock + +import pytest + +from tools.code_format import python_check + + +def test_python_checker_constructor(): + checker = python_check.PythonChecker("path1", "path2", "path3") + assert checker.checks == ("flake8", "yapf") + assert checker.args.paths == ['path1', 'path2', 'path3'] + + +def test_python_diff_path(): + checker = python_check.PythonChecker("path1", "path2", "path3") + args_mock = patch("tools.code_format.python_check.PythonChecker.args", new_callable=PropertyMock) + + with args_mock as m_args: + assert checker.diff_file_path == m_args.return_value.diff_file + + +def test_python_flake8_app(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.flake8_args", dict(new_callable=PropertyMock)), + "Flake8Application", + prefix="tools.code_format.python_check") + + with patched as (m_flake8_args, m_flake8_app): + assert checker.flake8_app == m_flake8_app.return_value + + assert ( + list(m_flake8_app.call_args) + == [(), {}]) + assert ( + list(m_flake8_app.return_value.initialize.call_args) + == [(m_flake8_args.return_value,), {}]) + + +def test_python_flake8_args(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.flake8_config_path", dict(new_callable=PropertyMock)), + ("PythonChecker.path", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_flake8_config, m_path): + assert ( + checker.flake8_args + == ['--config', + m_flake8_config.return_value, m_path.return_value]) + + +def test_python_flake8_config_path(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.path", dict(new_callable=PropertyMock)), + "os.path.join", + prefix="tools.code_format.python_check") + + with patched as (m_path, m_join): + assert checker.flake8_config_path == m_join.return_value + + assert ( + list(m_join.call_args) + == [(m_path.return_value, python_check.FLAKE8_CONFIG), {}]) + + +def test_python_yapf_config_path(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + ("PythonChecker.path", dict(new_callable=PropertyMock)), + "os.path.join", + prefix="tools.code_format.python_check") + + with patched as (m_path, m_join): + assert checker.yapf_config_path == m_join.return_value + + assert ( + list(m_join.call_args) + == [(m_path.return_value, python_check.YAPF_CONFIG), {}]) + + +def test_python_yapf_files(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + + patched = patches( + ("PythonChecker.args", dict(new_callable=PropertyMock)), + ("PythonChecker.path", dict(new_callable=PropertyMock)), + "yapf.file_resources.GetCommandLineFiles", + "yapf.file_resources.GetExcludePatternsForDir", + prefix="tools.code_format.python_check") + + with patched as (m_args, m_path, m_yapf_files, m_yapf_exclude): + assert checker.yapf_files == m_yapf_files.return_value + + assert ( + list(m_yapf_files.call_args) + == [(m_args.return_value.paths,), + {'recursive': m_args.return_value.recurse, + 'exclude': m_yapf_exclude.return_value}]) + assert ( + list(m_yapf_exclude.call_args) + == [(m_path.return_value,), {}]) + + +def test_python_add_arguments(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + add_mock = patch("tools.code_format.python_check.checker.ForkingChecker.add_arguments") + m_parser = MagicMock() + + with add_mock as m_add: + checker.add_arguments(m_parser) + + assert ( + list(m_add.call_args) + == [(m_parser,), {}]) + assert ( + list(list(c) for c in m_parser.add_argument.call_args_list) + == [[('--diff-file',), + {'default': None, 'help': 'Specify the path to a diff file with fixes'}]]) + + +@pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) +def test_python_check_flake8(patches, errors): + checker = python_check.PythonChecker("path1", "path2", "path3") + + patched = patches( + "utils.buffered", + "PythonChecker.error", + "PythonChecker._strip_lines", + ("PythonChecker.flake8_app", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + @contextmanager + def mock_buffered(stdout=None, mangle=None): + yield + stdout.extend(errors) + + with patched as (m_buffered, m_error, m_mangle, m_flake8_app): + m_buffered.side_effect = mock_buffered + checker.check_flake8() + + assert ( + list(m_buffered.call_args) + == [(), {'stdout': errors, 'mangle': m_mangle}]) + assert ( + list(m_flake8_app.return_value.run_checks.call_args) + == [(), {}]) + assert ( + list(m_flake8_app.return_value.report.call_args) + == [(), {}]) + + if errors: + assert ( + list(m_error.call_args) + == [('flake8', ['err1', 'err2']), {}]) + else: + assert not m_error.called + + +def test_python_check_yapf(patches): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + "PythonChecker.yapf_run", + ("PythonChecker.yapf_files", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_yapf_run, m_yapf_files): + m_yapf_files.return_value = ["file1", "file2", "file3"] + checker.check_yapf() + + assert ( + list(list(c) for c in m_yapf_files.call_args_list) + == [[(), {}]]) + assert ( + list(list(c) for c in m_yapf_run.call_args_list) + == [[('file1',), {}], [('file2',), {}], [('file3',), {}]]) + + +TEST_CHECK_RESULTS = ( + ("check1", [], []), + ("check1", ["check2", "check3"], ["check4", "check5"]), + ("check1", ["check1", "check3"], ["check4", "check5"]), + ("check1", ["check2", "check3"], ["check1", "check5"]), + ("check1", ["check1", "check3"], ["check1", "check5"])) + + +@pytest.mark.parametrize("results", TEST_CHECK_RESULTS) +def test_python_on_check_run(patches, results): + checker = python_check.PythonChecker("path1", "path2", "path3") + checkname, errors, warnings = results + patched = patches( + "PythonChecker.succeed", + ("PythonChecker.name", dict(new_callable=PropertyMock)), + ("PythonChecker.failed", dict(new_callable=PropertyMock)), + ("PythonChecker.warned", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_succeed, m_name, m_failed, m_warned): + m_failed.return_value = errors + m_warned.return_value = warnings + checker.on_check_run(checkname) + + if checkname in warnings or checkname in errors: + assert not m_succeed.called + else: + assert ( + list(m_succeed.call_args) + == [(checkname, [f"[CHECKS:{m_name.return_value}] {checkname}: success"]), {}]) + + +TEST_CHECKS_COMPLETE = ( + ("DIFF1", False), + ("DIFF1", True), + ("", False), + ("", True)) + + +@pytest.mark.parametrize("results", TEST_CHECKS_COMPLETE) +def test_python_on_checks_complete(patches, results): + checker = python_check.PythonChecker("path1", "path2", "path3") + diff_path, failed = results + patched = patches( + "open", + "PythonChecker.fork", + "checker.ForkingChecker.on_checks_complete", + ("PythonChecker.diff_file_path", dict(new_callable=PropertyMock)), + ("PythonChecker.has_failed", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_open, m_fork, m_super, m_diff, m_failed): + m_diff.return_value = diff_path + m_failed.return_value = failed + assert checker.on_checks_complete() == m_super.return_value + + if diff_path and failed: + assert ( + list(m_fork.call_args) + == [(['git', 'diff', 'HEAD'],), {}]) + assert ( + list(m_open.call_args) + == [(diff_path, 'wb'), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.write.call_args) + == [(m_fork.return_value.stdout,), {}]) + else: + assert not m_fork.called + assert not m_open.called + + assert ( + list(m_super.call_args) + == [(), {}]) + + +@pytest.mark.parametrize("fix", [True, False]) +def test_python_yapf_format(patches, fix): + checker = python_check.PythonChecker("path1", "path2", "path3") + patched = patches( + "yapf.yapf_api.FormatFile", + ("PythonChecker.yapf_config_path", dict(new_callable=PropertyMock)), + ("PythonChecker.fix", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_format, m_config, m_fix): + m_fix.return_value = fix + assert checker.yapf_format("FILENAME") == m_format.return_value + + assert ( + list(m_format.call_args) + == [('FILENAME',), + {'style_config': m_config.return_value, + 'in_place': fix, + 'print_diff': not fix}]) + assert ( + list(list(c) for c in m_fix.call_args_list) + == [[(), {}], [(), {}]]) + + +TEST_FORMAT_RESULTS = ( + ("", "", True), + ("", "", False), + ("REFORMAT", "", True), + ("REFORMAT", "", False)) + + +@pytest.mark.parametrize("format_results", TEST_FORMAT_RESULTS) +@pytest.mark.parametrize("fix", [True, False]) +def test_python_yapf_run(patches, fix, format_results): + checker = python_check.PythonChecker("path1", "path2", "path3") + reformat, encoding, changed = format_results + patched = patches( + "PythonChecker.yapf_format", + "PythonChecker.succeed", + "PythonChecker.warn", + "PythonChecker.error", + ("PythonChecker.fix", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") + + with patched as (m_format, m_succeed, m_warn, m_error, m_fix): + m_fix.return_value = fix + m_format.return_value = format_results + checker.yapf_run("FILENAME") + + if not changed: + assert ( + list(m_succeed.call_args) + == [('yapf', ['FILENAME: success']), {}]) + assert not m_warn.called + assert not m_error.called + assert not m_fix.called + return + assert not m_succeed.called + if fix: + assert not m_error.called + assert len(m_warn.call_args_list) == 1 + assert ( + list(m_warn.call_args) + == [('yapf', [f'FILENAME: reformatted']), {}]) + return + if reformat: + assert not m_error.called + assert len(m_warn.call_args_list) == 1 + assert ( + list(m_warn.call_args) + == [('yapf', [reformat]), {}]) + return + assert not m_warn.called + assert ( + list(m_error.call_args) + == [('yapf', ['FILENAME']), {}]) + + +def test_python_strip_lines(): + checker = python_check.PythonChecker("path1", "path2", "path3") + strip_mock = patch("tools.code_format.python_check.PythonChecker._strip_line") + lines = ["", "foo", "", "bar", "", "", "baz", "", ""] + + with strip_mock as m_strip: + assert ( + checker._strip_lines(lines) + == [m_strip.return_value] * 3) + + assert ( + list(list(c) for c in m_strip.call_args_list) + == [[('foo',), {}], [('bar',), {}], [('baz',), {}]]) + + +@pytest.mark.parametrize("line", ["REMOVE/foo", "REMOVE", "bar", "other", "REMOVE/baz", "baz"]) +def test_python_strip_line(line): + checker = python_check.PythonChecker("path1", "path2", "path3") + path_mock = patch( + "tools.code_format.python_check.PythonChecker.path", + new_callable=PropertyMock) + + with path_mock as m_path: + m_path.return_value = "REMOVE" + assert ( + checker._strip_line(line) + == line[7:] if line.startswith(f"REMOVE/") else line) + + +def test_python_checker_main(): + class_mock = patch("tools.code_format.python_check.PythonChecker") + + with class_mock as m_class: + assert ( + python_check.main("arg0", "arg1", "arg2") + == m_class.return_value.run_checks.return_value) + + assert ( + list(m_class.call_args) + == [('arg0', 'arg1', 'arg2'), {}]) + assert ( + list(m_class.return_value.run_checks.call_args) + == [(), {}]) diff --git a/tools/testing/plugin.py b/tools/testing/plugin.py index d654d23de584..af9f487fb8f1 100644 --- a/tools/testing/plugin.py +++ b/tools/testing/plugin.py @@ -2,7 +2,6 @@ # This is pytest plugin providing fixtures for tests. # -import importlib from contextlib import contextmanager, ExitStack from typing import ContextManager, Iterator from unittest.mock import patch