diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3efeb9cf..7445a9ba 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,6 +66,7 @@ repos: flake8-print, flake8-pytest-style, flake8-todo, + flake8-typing-imports, flake8-unused-arguments, pep8-naming, pydocstyle, @@ -94,6 +95,21 @@ repos: hooks: - id: tryceratops exclude: (console\.py|test_mark_expression\.py) +- repo: https://github.com/pre-commit/mirrors-mypy + rev: 'v0.910-1' + hooks: + - id: mypy + args: [ + --no-strict-optional, + --ignore-missing-imports, + ] + additional_dependencies: [ + types-attrs, + types-click, + types-setuptools + ] + pass_filenames: false + language_version: "3.9" - repo: meta hooks: - id: check-hooks-apply diff --git a/docs/rtd_environment.yml b/docs/rtd_environment.yml index 33c4b9bf..d5d09a9b 100644 --- a/docs/rtd_environment.yml +++ b/docs/rtd_environment.yml @@ -14,7 +14,6 @@ dependencies: - nbsphinx - sphinx - sphinx-autoapi - - sphinx-autodoc-typehints - sphinx-click - sphinx-copybutton - sphinx-panels diff --git a/docs/source/changes.rst b/docs/source/changes.rst index 66d68d2f..4dd1ab9e 100644 --- a/docs/source/changes.rst +++ b/docs/source/changes.rst @@ -11,6 +11,7 @@ all releases are available on `PyPI `_ and ------------------ - :gh:`159` removes files for creating a conda package which is handled by conda-forge. +- :gh:`160` adds rudimentary typing to pytask. - :gh:`161` removes a workaround for pyreadline which is also removed in pytest 7. - :gh:`163` allow forward slashes in expressions and marker expressions. - :gh:`164` allows to use backward slashes in expressions and marker expressions. diff --git a/docs/source/conf.py b/docs/source/conf.py index 81048f27..2c944bfa 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -39,7 +39,6 @@ "sphinx.ext.napoleon", "sphinx.ext.viewcode", "sphinx_copybutton", - "sphinx_autodoc_typehints", "sphinx_click", "sphinx_panels", "autoapi.extension", diff --git a/environment.yml b/environment.yml index a1475214..8955e290 100644 --- a/environment.yml +++ b/environment.yml @@ -40,7 +40,6 @@ dependencies: - nbsphinx - sphinx - sphinx-autoapi - - sphinx-autodoc-typehints - sphinx-click - sphinx-copybutton - sphinx-panels diff --git a/pyproject.toml b/pyproject.toml index 82173650..1c8d5e6c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,3 +8,20 @@ write_to = "src/_pytask/_version.py" [tool.tryceratops] ignore = ["TC003"] + + +[tool.mypy] +files = ["src", "tests"] +check_untyped_defs = true +disallow_any_generics = true +disallow_incomplete_defs = true +disallow_untyped_defs = true +no_implicit_optional = true +warn_redundant_casts = true +warn_unused_ignores = true + + +[[tool.mypy.overrides]] +module = "tests.*" +disallow_untyped_defs = false +ignore_errors = true diff --git a/src/_pytask/build.py b/src/_pytask/build.py index fd5cfeb0..80512109 100644 --- a/src/_pytask/build.py +++ b/src/_pytask/build.py @@ -1,5 +1,8 @@ """Implement the build command.""" import sys +from typing import Any +from typing import Dict +from typing import TYPE_CHECKING import click from _pytask.config import hookimpl @@ -13,13 +16,17 @@ from _pytask.session import Session +if TYPE_CHECKING: + from typing import NoReturn + + @hookimpl(tryfirst=True) -def pytask_extend_command_line_interface(cli): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend the command line interface.""" cli.add_command(build) -def main(config_from_cli): +def main(config_from_cli: Dict[str, Any]) -> Session: """Run pytask. This is the main command to run pytask which usually receives kwargs from the @@ -100,7 +107,7 @@ def main(config_from_cli): default=None, help="Print errors with tracebacks as soon as the task fails.", ) -def build(**config_from_cli): +def build(**config_from_cli: Any) -> "NoReturn": """Collect and execute tasks and report the results. This is the default command of pytask which searches given paths or the current diff --git a/src/_pytask/capture.py b/src/_pytask/capture.py index 8c06b4db..152ee9d4 100644 --- a/src/_pytask/capture.py +++ b/src/_pytask/capture.py @@ -29,7 +29,9 @@ import os import sys from tempfile import TemporaryFile +from typing import Any from typing import AnyStr +from typing import Dict from typing import Generator from typing import Generic from typing import Iterator @@ -45,9 +47,13 @@ from _pytask.shared import get_first_non_none_value if TYPE_CHECKING: - from typing_extensions import Literal + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal _CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + _CaptureCallback = Literal["no", "stdout", "stderr", "all"] if TYPE_CHECKING: if sys.version_info >= (3, 8): @@ -63,7 +69,7 @@ def final(f): @hookimpl -def pytask_extend_command_line_interface(cli): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Add CLI options for capturing output.""" additional_parameters = [ click.Option( @@ -85,7 +91,11 @@ def pytask_extend_command_line_interface(cli): @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: """Parse configuration. Note that, ``-s`` is a shortcut for ``--capture=no``. @@ -112,7 +122,7 @@ def pytask_parse_config(config, config_from_cli, config_from_file): @hookimpl -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: """Initialize the CaptureManager.""" if config["capture"] == "fd": _py36_windowsconsoleio_workaround(sys.stdout) @@ -126,7 +136,7 @@ def pytask_post_parse(config): capman.suspend() -def _capture_callback(x): +def _capture_callback(x: "Optional[_CaptureMethod]") -> "Optional[_CaptureMethod]": """Validate the passed options for capturing output.""" if x in [None, "None", "none"]: x = None @@ -134,11 +144,12 @@ def _capture_callback(x): pass else: raise ValueError("'capture' can only be one of ['fd', 'no', 'sys', 'tee-sys'].") - return x -def _show_capture_callback(x): +def _show_capture_callback( + x: "Optional[_CaptureCallback]", +) -> "Optional[_CaptureCallback]": """Validate the passed options for showing captured output.""" if x in [None, "None", "none"]: x = None @@ -148,7 +159,6 @@ def _show_capture_callback(x): raise ValueError( "'show_capture' must be one of ['no', 'stdout', 'stderr', 'all']." ) - return x @@ -200,27 +210,30 @@ def _py36_windowsconsoleio_workaround(stream: TextIO) -> None: return # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). - if not hasattr(stream, "buffer"): # type: ignore[unreachable] + if not hasattr(stream, "buffer"): return buffered = hasattr(stream.buffer, "raw") - raw_stdout = stream.buffer.raw if buffered else stream.buffer + # ``getattr`` hack since ``buffer`` might not have an attribute ``raw``. + raw_stdout = getattr(stream.buffer, "raw", stream.buffer) - if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined] + # ``getattr`` hack since ``_WindowsConsoleIO`` is not defined in stubs. + windowsconsoleio = getattr(io, "_WindowsConsoleIO", None) + if windowsconsoleio is not None and not isinstance(raw_stdout, windowsconsoleio): return - def _reopen_stdio(f, mode): + def _reopen_stdio(f: TextIO, mode: str) -> TextIO: if not buffered and mode[0] == "w": buffering = 0 else: buffering = -1 return io.TextIOWrapper( - open(os.dup(f.fileno()), mode, buffering), # type: ignore[arg-type] + open(os.dup(f.fileno()), mode, buffering), f.encoding, f.errors, f.newlines, - f.line_buffering, + bool(f.line_buffering), ) sys.stdin = _reopen_stdio(sys.stdin, "rb") @@ -270,7 +283,7 @@ class DontReadFromInput: encoding = None - def read(self, *_args): # noqa: U101 + def read(self, *_args: Any) -> None: # noqa: U101 raise OSError( "pytest: reading from stdin while output is captured! Consider using `-s`." ) @@ -279,7 +292,7 @@ def read(self, *_args): # noqa: U101 readlines = read __next__ = read - def __iter__(self): + def __iter__(self) -> "DontReadFromInput": return self def fileno(self) -> int: @@ -292,7 +305,7 @@ def close(self) -> None: pass @property - def buffer(self): + def buffer(self) -> "DontReadFromInput": return self @@ -319,7 +332,13 @@ class SysCaptureBinary: EMPTY_BUFFER = b"" - def __init__(self, fd: int, tmpfile=None, *, tee: bool = False) -> None: + def __init__( # type: ignore + self, + fd: int, + tmpfile=None, + *, + tee: bool = False, + ) -> None: name = patchsysdict[fd] self._old = getattr(sys, name) self.name = name @@ -361,7 +380,7 @@ def start(self) -> None: setattr(sys, self.name, self.tmpfile) self._state = "started" - def snap(self): + def snap(self) -> str: self._assert_state("snap", ("started", "suspended")) self.tmpfile.seek(0) res = self.tmpfile.buffer.read() @@ -390,7 +409,7 @@ def resume(self) -> None: setattr(sys, self.name, self.tmpfile) self._state = "started" - def writeorg(self, data) -> None: + def writeorg(self, data: str) -> None: self._assert_state("writeorg", ("started", "suspended")) self._old.flush() self._old.buffer.write(data) @@ -406,13 +425,13 @@ class SysCapture(SysCaptureBinary): EMPTY_BUFFER = "" # type: ignore[assignment] - def snap(self): + def snap(self) -> str: res = self.tmpfile.getvalue() self.tmpfile.seek(0) self.tmpfile.truncate() return res - def writeorg(self, data): + def writeorg(self, data: str) -> None: self._assert_state("writeorg", ("started", "suspended")) self._old.write(data) self._old.flush() @@ -455,7 +474,7 @@ def __init__(self, targetfd: int) -> None: self.syscapture = SysCapture(targetfd) else: self.tmpfile = EncodedFile( - TemporaryFile(buffering=0), # type: ignore[arg-type] + TemporaryFile(buffering=0), encoding="utf-8", errors="replace", newline="", @@ -491,7 +510,7 @@ def start(self) -> None: self.syscapture.start() self._state = "started" - def snap(self): + def snap(self) -> bytes: self._assert_state("snap", ("started", "suspended")) self.tmpfile.seek(0) res = self.tmpfile.buffer.read() @@ -531,7 +550,7 @@ def resume(self) -> None: os.dup2(self.tmpfile.fileno(), self.targetfd) self._state = "started" - def writeorg(self, data): + def writeorg(self, data: bytes) -> None: """Write to original file descriptor.""" self._assert_state("writeorg", ("started", "suspended")) os.write(self.targetfd_save, data) @@ -547,7 +566,8 @@ class FDCapture(FDCaptureBinary): # Ignore type because it doesn't match the type in the superclass (bytes). EMPTY_BUFFER = "" # type: ignore - def snap(self): + # Ignore type because it doesn't match the type in the superclass (bytes). + def snap(self) -> str: # type: ignore self._assert_state("snap", ("started", "suspended")) self.tmpfile.seek(0) res = self.tmpfile.read() @@ -555,7 +575,8 @@ def snap(self): self.tmpfile.truncate() return res - def writeorg(self, data): + # Ignore type because it doesn't match the type in the superclass (bytes). + def writeorg(self, data: str) -> None: # type: ignore """Write to original file descriptor.""" super().writeorg(data.encode("utf-8")) @@ -577,9 +598,7 @@ class CaptureResult(Generic[AnyStr]): """ - # Can't use slots in Python<3.5.3 due to https://bugs.python.org/issue31272 - if sys.version_info >= (3, 5, 3): - __slots__ = ("out", "err") + __slots__ = ("out", "err") def __init__(self, out: AnyStr, err: AnyStr) -> None: self.out: AnyStr = out @@ -604,7 +623,7 @@ def _replace( def count(self, value: AnyStr) -> int: return tuple(self).count(value) - def index(self, value) -> int: + def index(self, value: int) -> int: return tuple(self).index(value) def __eq__(self, other: object) -> bool: @@ -636,7 +655,12 @@ class MultiCapture(Generic[AnyStr]): _state = None _in_suspended = False - def __init__(self, in_, out, err) -> None: + def __init__( + self, + in_: Optional[Union[FDCapture, SysCapture]], + out: Optional[Union[FDCapture, SysCapture]], + err: Optional[Union[FDCapture, SysCapture]], + ) -> None: self.in_ = in_ self.out = out self.err = err @@ -666,9 +690,9 @@ def pop_outerr_to_orig(self) -> Tuple[AnyStr, AnyStr]: """Pop current snapshot out/err capture and flush to orig streams.""" out, err = self.readouterr() if out: - self.out.writeorg(out) + self.out.writeorg(out) # type: ignore if err: - self.err.writeorg(err) + self.err.writeorg(err) # type: ignore return out, err def suspend_capturing(self, in_: bool = False) -> None: @@ -716,7 +740,7 @@ def readouterr(self) -> CaptureResult[AnyStr]: err = self.err.snap() else: err = "" - return CaptureResult(out, err) + return CaptureResult(out, err) # type: ignore def _get_multicapture(method: "_CaptureMethod") -> MultiCapture[str]: @@ -764,7 +788,7 @@ def __repr__(self) -> str: self._method, self._capturing ) - def is_capturing(self) -> Union[str, bool]: + def is_capturing(self) -> bool: return self._method != "no" def start_capturing(self) -> None: @@ -831,7 +855,7 @@ def pytask_execute_task_teardown( yield @hookimpl(hookwrapper=True) - def pytask_collect_log(self): + def pytask_collect_log(self) -> Generator[None, None, None]: """Suspend capturing at the end of the collection. This hook needs to be here as long as the collection has no proper capturing. If diff --git a/src/_pytask/clean.py b/src/_pytask/clean.py index df6f6798..0b04e321 100644 --- a/src/_pytask/clean.py +++ b/src/_pytask/clean.py @@ -3,6 +3,17 @@ import shutil import sys from pathlib import Path +from types import TracebackType +from typing import Any +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING import attr import click @@ -12,6 +23,7 @@ from _pytask.enums import ColorCode from _pytask.enums import ExitCode from _pytask.exceptions import CollectionError +from _pytask.nodes import MetaTask from _pytask.path import find_common_ancestor from _pytask.path import relative_to from _pytask.pluginmanager import get_plugin_manager @@ -20,6 +32,10 @@ from _pytask.traceback import render_exc_info +if TYPE_CHECKING: + from typing import NoReturn + + _HELP_TEXT_MODE = ( "Choose 'dry-run' to print the paths of files/directories which would be removed, " "'interactive' for a confirmation prompt for every path, and 'force' to remove all " @@ -28,13 +44,15 @@ @hookimpl(tryfirst=True) -def pytask_extend_command_line_interface(cli: click.Group): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend the command line interface.""" cli.add_command(clean) @hookimpl -def pytask_parse_config(config, config_from_cli): +def pytask_parse_config( + config: Dict[str, Any], config_from_cli: Dict[str, Any] +) -> None: """Parse the configuration.""" config["mode"] = get_first_non_none_value( config_from_cli, key="mode", default="dry-run" @@ -48,7 +66,7 @@ def pytask_parse_config(config, config_from_cli): @hookimpl -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: """Correct ignore patterns such that caches, etc. will not be ignored.""" if config["command"] == "clean": config["ignore"] = [ @@ -66,7 +84,7 @@ def pytask_post_parse(config): @click.option( "-q", "--quiet", is_flag=True, help="Do not print the names of the removed paths." ) -def clean(**config_from_cli): +def clean(**config_from_cli: Any) -> "NoReturn": """Clean provided paths by removing files unknown to pytask.""" config_from_cli["command"] = "clean" @@ -84,7 +102,10 @@ def clean(**config_from_cli): except Exception: session = Session({}, None) session.exit_code = ExitCode.CONFIGURATION_FAILED - console.print(render_exc_info(*sys.exc_info(), config["show_locals"])) + exc_info: Tuple[ + Type[BaseException], BaseException, Optional[TracebackType] + ] = sys.exc_info() + console.print(render_exc_info(*exc_info, config["show_locals"])) else: try: @@ -136,14 +157,15 @@ def clean(**config_from_cli): console.rule(style=ColorCode.FAILED) except Exception: - console.print(render_exc_info(*sys.exc_info(), config["show_locals"])) + exc_info = sys.exc_info() + console.print(render_exc_info(*exc_info, show_locals=config["show_locals"])) console.rule(style=ColorCode.FAILED) session.exit_code = ExitCode.FAILED sys.exit(session.exit_code) -def _collect_all_paths_known_to_pytask(session): +def _collect_all_paths_known_to_pytask(session: Session) -> Set[Path]: """Collect all paths from the session which are known to pytask. Paths belong to tasks and nodes and configuration values. @@ -154,7 +176,7 @@ def _collect_all_paths_known_to_pytask(session): for path in _yield_paths_from_task(task): known_files.add(path) - known_directories = set() + known_directories: Set[Path] = set() for path in known_files: known_directories.update(path.parents) @@ -168,16 +190,18 @@ def _collect_all_paths_known_to_pytask(session): return known_paths -def _yield_paths_from_task(task): +def _yield_paths_from_task(task: MetaTask) -> Generator[Path, None, None]: """Yield all paths attached to a task.""" yield task.path for attribute in ["depends_on", "produces"]: for node in getattr(task, attribute).values(): - if isinstance(node.value, Path): - yield node.value + if hasattr(node, "path") and isinstance(node.path, Path): + yield node.path -def _find_all_unknown_paths(session, known_paths, include_directories): +def _find_all_unknown_paths( + session: Session, known_paths: Set[Path], include_directories: bool +) -> List[Path]: """Find all unknown paths. First, create a tree of :class:`_RecursivePathNode`. Then, create a list of unknown @@ -199,20 +223,6 @@ def _find_all_unknown_paths(session, known_paths, include_directories): return unknown_paths -def _find_all_unkown_paths_per_recursive_node(node, include_directories): - """Return unknown paths per recursive file node. - - If ``--directories`` is given, take a short-cut and return only the path of the - directory and not the path of every single file in it. - - """ - if node.is_unknown and (node.is_file or (node.is_dir and include_directories)): - yield node.path - else: - for n in node.sub_nodes: - yield from _find_all_unkown_paths_per_recursive_node(n, include_directories) - - @attr.s(repr=False) class _RecursivePathNode: """A class for a path to a file or directory which recursively instantiates itself. @@ -228,13 +238,15 @@ class _RecursivePathNode: """ path = attr.ib(type=Path) - sub_nodes = attr.ib(type=list) + sub_nodes = attr.ib(type="List[_RecursivePathNode]") is_dir = attr.ib(type=bool) is_file = attr.ib(type=bool) is_unknown = attr.ib(type=bool) @classmethod - def from_path(cls, path: Path, known_paths: list, session): + def from_path( + cls, path: Path, known_paths: Iterable[Path], session: Session + ) -> "_RecursivePathNode": """Create a node from a path. While instantiating the class, subordinate nodes are spawned for all paths @@ -268,5 +280,21 @@ def from_path(cls, path: Path, known_paths: list, session): return cls(path, sub_nodes, path.is_dir(), path.is_file(), is_unknown) - def __repr__(self): + def __repr__(self) -> str: return f"" + + +def _find_all_unkown_paths_per_recursive_node( + node: _RecursivePathNode, include_directories: bool +) -> Generator[Path, None, None]: + """Return unknown paths per recursive file node. + + If ``--directories`` is given, take a short-cut and return only the path of the + directory and not the path of every single file in it. + + """ + if node.is_unknown and (node.is_file or (node.is_dir and include_directories)): + yield node.path + else: + for n in node.sub_nodes: + yield from _find_all_unkown_paths_per_recursive_node(n, include_directories) diff --git a/src/_pytask/cli.py b/src/_pytask/cli.py index ff6d01f2..d898b6d1 100644 --- a/src/_pytask/cli.py +++ b/src/_pytask/cli.py @@ -1,30 +1,34 @@ """Implements the command line interface.""" import sys +from typing import Any +from typing import Dict import click +import pluggy from _pytask.config import hookimpl from _pytask.pluginmanager import get_plugin_manager from click_default_group import DefaultGroup -from pkg_resources import packaging +from packaging.version import parse as parse_version -_CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]} +_CONTEXT_SETTINGS: Dict[str, Any] = {"help_option_names": ["-h", "--help"]} -if packaging.version.parse(click.__version__) < packaging.version.parse("8"): - _VERSION_OPTION_KWARGS = {} + +if parse_version(click.__version__) < parse_version("8"): + _VERSION_OPTION_KWARGS: Dict[str, Any] = {} else: _VERSION_OPTION_KWARGS = {"package_name": "pytask"} -def _extend_command_line_interface(command_line_interface): +def _extend_command_line_interface(cli: click.Group) -> click.Group: """Add parameters from plugins to the commandline interface.""" pm = _prepare_plugin_manager() - pm.hook.pytask_extend_command_line_interface(cli=command_line_interface) - _sort_options_for_each_command_alphabetically(command_line_interface) - return command_line_interface + pm.hook.pytask_extend_command_line_interface(cli=cli) + _sort_options_for_each_command_alphabetically(cli) + return cli -def _prepare_plugin_manager(): +def _prepare_plugin_manager() -> pluggy.PluginManager: """Prepare the plugin manager.""" pm = get_plugin_manager() pm.register(sys.modules[__name__]) @@ -32,7 +36,7 @@ def _prepare_plugin_manager(): return pm -def _sort_options_for_each_command_alphabetically(cli): +def _sort_options_for_each_command_alphabetically(cli: click.Group) -> None: """Sort command line options and arguments for each command alphabetically.""" for command in cli.commands: cli.commands[command].params = sorted( @@ -41,7 +45,7 @@ def _sort_options_for_each_command_alphabetically(cli): @hookimpl -def pytask_add_hooks(pm): +def pytask_add_hooks(pm: pluggy.PluginManager) -> None: """Add hooks.""" from _pytask import build from _pytask import capture @@ -91,7 +95,7 @@ def pytask_add_hooks(pm): default_if_no_args=True, ) @click.version_option(**_VERSION_OPTION_KWARGS) -def cli(): +def cli() -> None: """The command line interface of pytask.""" pass diff --git a/src/_pytask/collect.py b/src/_pytask/collect.py index ace38820..62b33f0a 100644 --- a/src/_pytask/collect.py +++ b/src/_pytask/collect.py @@ -1,12 +1,17 @@ """Implement functionality to collect tasks.""" -import importlib import inspect import os import sys import time +from importlib import util as importlib_util from pathlib import Path +from typing import Any +from typing import Dict from typing import Generator +from typing import Iterable from typing import List +from typing import Optional +from typing import Union from _pytask.config import hookimpl from _pytask.config import IS_FILE_SYSTEM_CASE_SENSITIVE @@ -19,12 +24,13 @@ from _pytask.nodes import PythonFunctionTask from _pytask.path import find_case_sensitive_path from _pytask.report import CollectionReport +from _pytask.session import Session from _pytask.shared import reduce_node_name from _pytask.traceback import render_exc_info @hookimpl -def pytask_collect(session): +def pytask_collect(session: Session) -> bool: """Collect tasks.""" session.collection_start = time.time() @@ -43,7 +49,7 @@ def pytask_collect(session): return True -def _collect_from_paths(session): +def _collect_from_paths(session: Session) -> None: """Collect tasks from paths. Go through all paths, check if the path is ignored, and collect the file if not. @@ -59,14 +65,16 @@ def _collect_from_paths(session): @hookimpl -def pytask_ignore_collect(path, config): +def pytask_ignore_collect(path: Path, config: Dict[str, Any]) -> bool: """Ignore a path during the collection.""" is_ignored = any(path.match(pattern) for pattern in config["ignore"]) return is_ignored @hookimpl -def pytask_collect_file_protocol(session, path, reports): +def pytask_collect_file_protocol( + session: Session, path: Path, reports: List[CollectionReport] +) -> List[CollectionReport]: try: reports = session.hook.pytask_collect_file( session=session, path=path, reports=reports @@ -81,16 +89,18 @@ def pytask_collect_file_protocol(session, path, reports): @hookimpl -def pytask_collect_file(session, path, reports): +def pytask_collect_file( + session: Session, path: Path, reports: List[CollectionReport] +) -> Optional[List[CollectionReport]]: """Collect a file.""" if any(path.match(pattern) for pattern in session.config["task_files"]): - spec = importlib.util.spec_from_file_location(path.stem, str(path)) + spec = importlib_util.spec_from_file_location(path.stem, str(path)) if spec is None: raise ImportError(f"Can't find module '{path.stem}' at location {path}.") - mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(mod) + mod = importlib_util.module_from_spec(spec) + spec.loader.exec_module(mod) # type: ignore collected_reports = [] for name, obj in inspect.getmembers(mod): @@ -109,10 +119,14 @@ def pytask_collect_file(session, path, reports): collected_reports.append(report) return collected_reports + else: + return None @hookimpl -def pytask_collect_task_protocol(session, path, name, obj): +def pytask_collect_task_protocol( + session: Session, path: Path, name: str, obj: Any +) -> Optional[CollectionReport]: """Start protocol for collecting a task.""" try: session.hook.pytask_collect_task_setup( @@ -129,9 +143,14 @@ def pytask_collect_task_protocol(session, path, name, obj): task = PythonFunctionTask(name, create_task_name(path, name), path, None) return CollectionReport.from_exception(exc_info=sys.exc_info(), node=task) + else: + return None + @hookimpl(trylast=True) -def pytask_collect_task(session, path, name, obj): +def pytask_collect_task( + session: Session, path: Path, name: str, obj: Any +) -> Optional[PythonFunctionTask]: """Collect a task which is a function. There is some discussion on how to detect functions in this `thread @@ -143,9 +162,11 @@ def pytask_collect_task(session, path, name, obj): return PythonFunctionTask.from_path_name_function_session( path, name, obj, session ) + else: + return None -_TEMPLATE_ERROR = ( +_TEMPLATE_ERROR: str = ( "The provided path of the dependency/product in the marker is {}, but the path of " "the file on disk is {}. Case-sensitive file systems would raise an error.\n\n" "Please, align the names to ensure reproducibility on case-sensitive file systems " @@ -154,7 +175,9 @@ def pytask_collect_task(session, path, name, obj): @hookimpl(trylast=True) -def pytask_collect_node(session, path, node): +def pytask_collect_node( + session: Session, path: Path, node: Union[str, Path] +) -> Optional[FilePathNode]: """Collect a node of a task as a :class:`pytask.nodes.FilePathNode`. Strings are assumed to be paths. This might be a strict assumption, but since this @@ -195,9 +218,13 @@ def pytask_collect_node(session, path, node): raise ValueError(_TEMPLATE_ERROR.format(node, case_sensitive_path)) return FilePathNode.from_path(node) + else: + return None -def _not_ignored_paths(paths: List[Path], session) -> Generator[Path, None, None]: +def _not_ignored_paths( + paths: Iterable[Path], session: Session +) -> Generator[Path, None, None]: """Traverse paths and yield not ignored paths. The paths passed by the user can either point to files or directories. For @@ -227,7 +254,9 @@ def _not_ignored_paths(paths: List[Path], session) -> Generator[Path, None, None @hookimpl -def pytask_collect_log(session, reports, tasks): +def pytask_collect_log( + session: Session, reports: List[CollectionReport], tasks: List[PythonFunctionTask] +) -> None: """Log collection.""" session.collection_end = time.time() diff --git a/src/_pytask/collect_command.py b/src/_pytask/collect_command.py index 11bc2ca9..2bbac484 100644 --- a/src/_pytask/collect_command.py +++ b/src/_pytask/collect_command.py @@ -1,5 +1,11 @@ """This module contains the implementation of ``pytask collect``.""" import sys +from pathlib import Path +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import TYPE_CHECKING import click from _pytask.config import hookimpl @@ -22,21 +28,28 @@ from rich.tree import Tree +if TYPE_CHECKING: + from typing import NoReturn + from _pytask.nodes import MetaTask + + @hookimpl(tryfirst=True) -def pytask_extend_command_line_interface(cli: click.Group): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend the command line interface.""" cli.add_command(collect) @hookimpl -def pytask_parse_config(config, config_from_cli): +def pytask_parse_config( + config: Dict[str, Any], config_from_cli: Dict[str, Any] +) -> None: """Parse configuration.""" config["nodes"] = config_from_cli.get("nodes", False) @click.command() @click.option("--nodes", is_flag=True, help="Show a task's dependencies and products.") -def collect(**config_from_cli): +def collect(**config_from_cli: Optional[Any]) -> "NoReturn": """Collect tasks from paths.""" config_from_cli["command"] = "collect" @@ -88,7 +101,7 @@ def collect(**config_from_cli): sys.exit(session.exit_code) -def _select_tasks_by_expressions_and_marker(session): +def _select_tasks_by_expressions_and_marker(session: Session) -> "List[MetaTask]": all_tasks = {task.name for task in session.tasks} remaining_by_mark = select_by_mark(session, session.dag) or all_tasks remaining_by_keyword = select_by_keyword(session, session.dag) or all_tasks @@ -97,7 +110,9 @@ def _select_tasks_by_expressions_and_marker(session): return [task for task in session.tasks if task.name in remaining] -def _find_common_ancestor_of_all_nodes(tasks, paths): +def _find_common_ancestor_of_all_nodes( + tasks: "List[MetaTask]", paths: List[Path] +) -> Path: """Find common ancestor from all nodes and passed paths.""" all_paths = [] for task in tasks: @@ -112,14 +127,16 @@ def _find_common_ancestor_of_all_nodes(tasks, paths): return common_ancestor -def _organize_tasks(tasks, common_ancestor): +def _organize_tasks( + tasks: "List[MetaTask]", common_ancestor: Path +) -> "Dict[Path, Dict[str, Dict[str, List[Path]]]]": """Organize tasks in a dictionary. The dictionary has file names as keys and then a dictionary with task names and below a dictionary with dependencies and targets. """ - dictionary = {} + dictionary: "Dict[Path, Dict[str, Dict[str, List[Path]]]]" = {} for task in tasks: reduced_task_path = relative_to(task.path, common_ancestor) reduced_task_name = reduce_node_name(task, [common_ancestor]) @@ -145,7 +162,9 @@ def _organize_tasks(tasks, common_ancestor): return dictionary -def _print_collected_tasks(dictionary, show_nodes): +def _print_collected_tasks( + dictionary: "Dict[Path, Dict[str, Dict[str, List[Path]]]]", show_nodes: bool +) -> None: """Print the information on collected tasks. Parameters diff --git a/src/_pytask/compat.py b/src/_pytask/compat.py index 15ba4b6a..efa4f715 100644 --- a/src/_pytask/compat.py +++ b/src/_pytask/compat.py @@ -4,16 +4,17 @@ import sys import types import warnings +from typing import Dict from typing import Optional from packaging.version import parse as parse_version -_MINIMUM_VERSIONS = {} +_MINIMUM_VERSIONS: Dict[str, str] = {} """Dict[str, str]: A mapping from packages to their minimum versions.""" -_IMPORT_TO_PACKAGE_NAME = {} +_IMPORT_TO_PACKAGE_NAME: Dict[str, str] = {} """Dict[str, str]: A mapping from import name to package name (on PyPI) for packages where these two names are different.""" diff --git a/src/_pytask/config.py b/src/_pytask/config.py index 68409777..2de25e38 100644 --- a/src/_pytask/config.py +++ b/src/_pytask/config.py @@ -5,7 +5,10 @@ import tempfile import warnings from pathlib import Path +from typing import Any +from typing import Dict from typing import List +from typing import Tuple import pluggy from _pytask.shared import convert_truthy_or_falsy_to_bool @@ -18,7 +21,7 @@ hookimpl = pluggy.HookimplMarker("pytask") -_IGNORED_FOLDERS = [ +_IGNORED_FOLDERS: List[str] = [ ".git/*", ".hg/*", ".svn/*", @@ -26,7 +29,7 @@ ] -_IGNORED_FILES = [ +_IGNORED_FILES: List[str] = [ ".codecov.yml", ".gitignore", ".pre-commit-config.yaml", @@ -41,10 +44,10 @@ ] -_IGNORED_FILES_AND_FOLDERS = _IGNORED_FILES + _IGNORED_FOLDERS +_IGNORED_FILES_AND_FOLDERS: List[str] = _IGNORED_FILES + _IGNORED_FOLDERS -IGNORED_TEMPORARY_FILES_AND_FOLDERS = [ +IGNORED_TEMPORARY_FILES_AND_FOLDERS: List[str] = [ "*.egg-info/*", ".ipynb_checkpoints/*", ".mypy_cache/*", @@ -68,7 +71,9 @@ def is_file_system_case_sensitive() -> bool: @hookimpl -def pytask_configure(pm, config_from_cli): +def pytask_configure( + pm: pluggy.PluginManager, config_from_cli: Dict[str, Any] +) -> Dict[str, Any]: """Configure pytask.""" config = {"pm": pm} @@ -123,7 +128,11 @@ def pytask_configure(pm, config_from_cli): @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: """Parse the configuration.""" config["command"] = config_from_cli.get("command", "build") @@ -199,12 +208,12 @@ def pytask_parse_config(config, config_from_cli, config_from_file): @hookimpl -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: """Sort markers alphabetically.""" config["markers"] = {k: config["markers"][k] for k in sorted(config["markers"])} -def _find_project_root_and_ini(paths: List[Path]): +def _find_project_root_and_ini(paths: List[Path]) -> Tuple[Path, Path]: """Find the project root and configuration file from a list of paths.""" try: common_ancestor = Path(os.path.commonpath(paths)) @@ -243,7 +252,7 @@ def _find_project_root_and_ini(paths: List[Path]): return root, config_path -def _read_config(path): +def _read_config(path: Path) -> Dict[str, Any]: """Read the configuration from a file with a [pytask] section.""" config = configparser.ConfigParser() config.read(path) diff --git a/src/_pytask/console.py b/src/_pytask/console.py index f13960e8..83b8e0a7 100644 --- a/src/_pytask/console.py +++ b/src/_pytask/console.py @@ -1,7 +1,7 @@ """This module contains the code to format output on the command line.""" import os import sys -from typing import List +from typing import Iterable from rich.console import Console from rich.tree import Tree @@ -29,7 +29,7 @@ console = Console(color_system=_COLOR_SYSTEM) -def format_strings_as_flat_tree(strings: List[str], title: str, icon: str) -> str: +def format_strings_as_flat_tree(strings: Iterable[str], title: str, icon: str) -> str: """Format list of strings as flat tree.""" tree = Tree(title) for name in strings: diff --git a/src/_pytask/dag.py b/src/_pytask/dag.py index 812ea22f..4a00ca54 100644 --- a/src/_pytask/dag.py +++ b/src/_pytask/dag.py @@ -5,6 +5,7 @@ from typing import Generator from typing import Iterable from typing import List +from typing import Set import attr import networkx as nx @@ -45,7 +46,7 @@ def task_and_preceding_tasks( yield from preceding_tasks(task_name, dag) -def node_and_neighbors(dag: nx.DiGraph, node: str) -> Generator[str, None, None]: +def node_and_neighbors(dag: nx.DiGraph, node: str) -> Iterable[str]: """Yield node and neighbors which are first degree predecessors and successors. We cannot use ``dag.neighbors`` as it only considers successors as neighbors in a @@ -63,11 +64,11 @@ class TopologicalSorter: """ - dag = attr.ib(converter=nx.DiGraph) - priorities = attr.ib(factory=dict) - _dag_backup = attr.ib(default=None) + dag = attr.ib(type=nx.DiGraph) + priorities = attr.ib(factory=dict, type=Dict[str, int]) + _dag_backup = attr.ib(default=None, type=nx.DiGraph) _is_prepared = attr.ib(default=False, type=bool) - _nodes_out = attr.ib(factory=set) + _nodes_out = attr.ib(factory=set, type=Set[str]) @classmethod def from_dag(cls, dag: nx.DiGraph, paths: List[Path] = None) -> "TopologicalSorter": @@ -99,7 +100,7 @@ def prepare(self) -> None: self._is_prepared = True - def get_ready(self, n: int = 1): + def get_ready(self, n: int = 1) -> List[str]: """Get up to ``n`` tasks which are ready.""" if not self._is_prepared: raise ValueError("The TopologicalSorter needs to be prepared.") @@ -119,7 +120,7 @@ def is_active(self) -> bool: """Indicate whether there are still tasks left.""" return bool(self.dag.nodes) - def done(self, *nodes: Iterable[str]) -> None: + def done(self, *nodes: str) -> None: """Mark some tasks as done.""" self._nodes_out = self._nodes_out - set(nodes) self.dag.remove_nodes_from(nodes) diff --git a/src/_pytask/database.py b/src/_pytask/database.py index 859f8c42..a36d5231 100644 --- a/src/_pytask/database.py +++ b/src/_pytask/database.py @@ -1,5 +1,7 @@ """Implement the database managed with pony.""" from pathlib import Path +from typing import Any +from typing import Dict import click from _pytask.config import hookimpl @@ -11,7 +13,7 @@ db = orm.Database() -class State(db.Entity): +class State(db.Entity): # type: ignore """Represent the state of a node in relation to a task.""" task = orm.Required(str) @@ -21,7 +23,9 @@ class State(db.Entity): orm.PrimaryKey(task, node) -def create_database(provider, filename, create_db, create_tables): +def create_database( + provider: str, filename: str, create_db: bool, create_tables: bool +) -> None: """Create the database. Raises @@ -38,10 +42,10 @@ def create_database(provider, filename, create_db, create_tables): @orm.db_session -def create_or_update_state(first_key, second_key, state): +def create_or_update_state(first_key: str, second_key: str, state: str) -> None: """Create or update a state.""" try: - state_in_db = State[first_key, second_key] + state_in_db = State[first_key, second_key] # type: ignore except orm.ObjectNotFound: State(task=first_key, node=second_key, state=state) else: @@ -49,7 +53,7 @@ def create_or_update_state(first_key, second_key, state): @hookimpl -def pytask_extend_command_line_interface(cli): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend command line interface.""" additional_parameters = [ click.Option( @@ -84,7 +88,11 @@ def pytask_extend_command_line_interface(cli): @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: """Parse the configuration.""" config["database_provider"] = get_first_non_none_value( config_from_cli, config_from_file, key="database_provider", default="sqlite" @@ -123,5 +131,5 @@ def pytask_parse_config(config, config_from_cli, config_from_file): @hookimpl -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: create_database(**config["database"]) diff --git a/src/_pytask/debugging.py b/src/_pytask/debugging.py index cb41ba5d..7df19f78 100644 --- a/src/_pytask/debugging.py +++ b/src/_pytask/debugging.py @@ -2,20 +2,38 @@ import functools import pdb import sys +from types import FrameType +from types import TracebackType +from typing import Any +from typing import Dict +from typing import Generator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING import click +import pluggy from _pytask.config import hookimpl from _pytask.console import console +from _pytask.nodes import MetaTask from _pytask.nodes import PythonFunctionTask from _pytask.outcomes import Exit +from _pytask.session import Session from _pytask.shared import convert_truthy_or_falsy_to_bool from _pytask.shared import get_first_non_none_value from _pytask.traceback import remove_internal_traceback_frames_from_exc_info from _pytask.traceback import render_exc_info +if TYPE_CHECKING: + from _pytask.capture import CaptureManager + from _pytask.live import LiveManager + + @hookimpl -def pytask_extend_command_line_interface(cli): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend command line interface.""" additional_parameters = [ click.Option( @@ -49,7 +67,11 @@ def pytask_extend_command_line_interface(cli): @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: """Parse the configuration.""" config["pdb"] = get_first_non_none_value( config_from_cli, @@ -81,25 +103,24 @@ def pytask_parse_config(config, config_from_cli, config_from_file): ) -def _pdbcls_callback(x): +def _pdbcls_callback(x: Optional[str]) -> Optional[Tuple[str, str]]: """Validate the debugger class string passed to pdbcls.""" message = "'pdbcls' must be like IPython.terminal.debugger:TerminalPdb" if x in [None, "None", "none"]: - x = None + return None elif isinstance(x, str): if len(x.split(":")) != 2: raise ValueError(message) else: - x = tuple(x.split(":")) + return tuple(x.split(":")) # type: ignore else: raise ValueError(message) - return x @hookimpl(trylast=True) -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: """Post parse the configuration. Register the plugins in this step to let other plugins influence the pdb or trace @@ -121,7 +142,7 @@ def pytask_post_parse(config): @hookimpl -def pytask_unconfigure(): +def pytask_unconfigure() -> None: """Return the resources. If the :func:`pdb.set_trace` function would not be returned, using breakpoints in @@ -134,20 +155,22 @@ def pytask_unconfigure(): class PytaskPDB: """Pseudo PDB that defers to the real pdb.""" - _pluginmanager = None - _config = None - _saved = [] - _recursive_debug = 0 - _wrapped_pdb_cls = None + _pluginmanager: Optional[pluggy.PluginManager] = None + _config: Optional[Dict[str, Any]] = None + _saved: List[Tuple[Any, ...]] = [] + _recursive_debug: int = 0 + _wrapped_pdb_cls: Optional[Tuple[Type[pdb.Pdb], Type[pdb.Pdb]]] = None @classmethod - def _is_capturing(cls, capman): + def _is_capturing(cls, capman: "CaptureManager") -> bool: if capman: return capman.is_capturing() return False @classmethod - def _import_pdb_cls(cls, capman, live_manager): + def _import_pdb_cls( + cls, capman: "CaptureManager", live_manager: "LiveManager" + ) -> Type[pdb.Pdb]: if not cls._config: import pdb @@ -186,7 +209,12 @@ def _import_pdb_cls(cls, capman, live_manager): return wrapped_cls @classmethod - def _get_pdb_wrapper_class(cls, pdb_cls, capman, live_manager): + def _get_pdb_wrapper_class( + cls, + pdb_cls: Type[pdb.Pdb], + capman: "CaptureManager", + live_manager: "LiveManager", + ) -> Type[pdb.Pdb]: # Type ignored because mypy doesn't support "dynamic" # inheritance like this. class PytaskPdbWrapper(pdb_cls): # type: ignore[valid-type,misc] @@ -194,13 +222,13 @@ class PytaskPdbWrapper(pdb_cls): # type: ignore[valid-type,misc] _pytask_live_manager = live_manager _continued = False - def do_debug(self, arg): + def do_debug(self, arg): # type: ignore cls._recursive_debug += 1 ret = super().do_debug(arg) cls._recursive_debug -= 1 return ret - def do_continue(self, arg): + def do_continue(self, arg): # type: ignore ret = super().do_continue(arg) if cls._recursive_debug == 0: assert cls._config is not None @@ -228,7 +256,7 @@ def do_continue(self, arg): do_c = do_cont = do_continue - def do_quit(self, arg): + def do_quit(self, arg): # type: ignore """Raise Exit outcome when quit command is used in pdb. This is a bit of a hack - it would be better if BdbQuit could be @@ -246,7 +274,7 @@ def do_quit(self, arg): do_q = do_quit do_exit = do_quit - def setup(self, f, tb): + def setup(self, f, tb): # type: ignore """Suspend on setup(). Needed after do_continue resumed, and entering another breakpoint again. @@ -262,7 +290,7 @@ def setup(self, f, tb): self._pytask_live_manager.pause() return ret - def get_stack(self, f, t): + def get_stack(self, f: FrameType, t: TracebackType) -> Tuple[str, int]: stack, i = super().get_stack(f, t) if f is None: # Find last non-hidden frame. @@ -274,7 +302,7 @@ def get_stack(self, f, t): return PytaskPdbWrapper @classmethod - def _init_pdb(cls, method, *args, **kwargs): # noqa: U100 + def _init_pdb(cls, method: str, *args: Any, **kwargs: Any) -> pdb.Pdb: # noqa: U100 """Initialize PDB debugging, dropping any IO capturing.""" if cls._pluginmanager is None: capman = None @@ -311,7 +339,7 @@ def _init_pdb(cls, method, *args, **kwargs): # noqa: U100 return _pdb @classmethod - def set_trace(cls, *args, **kwargs) -> None: + def set_trace(cls, *args: Any, **kwargs: Any) -> None: """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" frame = sys._getframe().f_back _pdb = cls._init_pdb("set_trace", *args, **kwargs) @@ -323,20 +351,22 @@ class PdbDebugger: @staticmethod @hookimpl(hookwrapper=True) - def pytask_execute_task(session, task): + def pytask_execute_task( + session: Session, task: MetaTask + ) -> Generator[None, None, None]: """Execute a task by wrapping the function with post-mortem debugger.""" if isinstance(task, PythonFunctionTask): wrap_function_for_post_mortem_debugging(session, task) yield -def wrap_function_for_post_mortem_debugging(session, task): +def wrap_function_for_post_mortem_debugging(session: Session, task: MetaTask) -> None: """Wrap the function for post-mortem debugging.""" task_function = task.function @functools.wraps(task_function) - def wrapper(*args, **kwargs): + def wrapper(*args: Any, **kwargs: Any) -> None: capman = session.config["pm"].get_plugin("capturemanager") live_manager = session.config["pm"].get_plugin("live_manager") try: @@ -382,14 +412,16 @@ class PdbTrace: @staticmethod @hookimpl(hookwrapper=True) - def pytask_execute_task(session, task): + def pytask_execute_task( + session: Session, task: MetaTask + ) -> Generator[None, None, None]: """Wrapping the task function with a tracer.""" if isinstance(task, PythonFunctionTask): wrap_function_for_tracing(session, task) yield -def wrap_function_for_tracing(session, task): +def wrap_function_for_tracing(session: Session, task: MetaTask) -> None: """Wrap the task function for tracing.""" _pdb = PytaskPDB._init_pdb("runcall") @@ -399,7 +431,7 @@ def wrap_function_for_tracing(session, task): # 3.7.4) runcall's first param is `func`, which means we'd get an exception if one # of the kwargs to task_function was called `func`. @functools.wraps(task_function) - def wrapper(*args, **kwargs): + def wrapper(*args: Any, **kwargs: Any) -> None: capman = session.config["pm"].get_plugin("capturemanager") live_manager = session.config["pm"].get_plugin("live_manager") @@ -428,7 +460,7 @@ def wrapper(*args, **kwargs): task.function = wrapper -def post_mortem(t) -> None: +def post_mortem(t: TracebackType) -> None: p = PytaskPDB._init_pdb("post_mortem") p.reset() p.interaction(None, t) diff --git a/src/_pytask/execute.py b/src/_pytask/execute.py index 7fc154d5..c95a3f61 100644 --- a/src/_pytask/execute.py +++ b/src/_pytask/execute.py @@ -1,6 +1,10 @@ import sys import time +from typing import Any +from typing import Dict +from typing import List +import networkx as nx from _pytask.config import hookimpl from _pytask.console import console from _pytask.dag import descending_tasks @@ -12,10 +16,12 @@ from _pytask.exceptions import NodeNotFoundError from _pytask.mark import Mark from _pytask.nodes import FilePathNode +from _pytask.nodes import MetaTask from _pytask.outcomes import Exit from _pytask.outcomes import Persisted from _pytask.outcomes import Skipped from _pytask.report import ExecutionReport +from _pytask.session import Session from _pytask.shared import get_first_non_none_value from _pytask.shared import reduce_node_name from _pytask.traceback import format_exception_without_traceback @@ -24,13 +30,17 @@ @hookimpl -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: if config["show_errors_immediately"]: config["pm"].register(ShowErrorsImmediatelyPlugin) @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: config["show_errors_immediately"] = get_first_non_none_value( config_from_cli, config_from_file, @@ -41,7 +51,7 @@ def pytask_parse_config(config, config_from_cli, config_from_file): @hookimpl -def pytask_execute(session): +def pytask_execute(session: Session) -> None: """Execute tasks.""" session.hook.pytask_execute_log_start(session=session) session.scheduler = session.hook.pytask_execute_create_scheduler(session=session) @@ -52,7 +62,7 @@ def pytask_execute(session): @hookimpl -def pytask_execute_log_start(session): +def pytask_execute_log_start(session: Session) -> None: """Start logging.""" session.execution_start = time.time() @@ -61,7 +71,7 @@ def pytask_execute_log_start(session): @hookimpl(trylast=True) -def pytask_execute_create_scheduler(session): +def pytask_execute_create_scheduler(session: Session) -> TopologicalSorter: """Create a scheduler based on topological sorting.""" scheduler = TopologicalSorter.from_dag(session.dag) scheduler.prepare() @@ -69,21 +79,25 @@ def pytask_execute_create_scheduler(session): @hookimpl -def pytask_execute_build(session): +def pytask_execute_build(session: Session) -> bool: """Execute tasks.""" - for name in session.scheduler.static_order(): - task = session.dag.nodes[name]["task"] - report = session.hook.pytask_execute_task_protocol(session=session, task=task) - session.execution_reports.append(report) - - if session.should_stop: - return True + if isinstance(session.scheduler, TopologicalSorter): + for name in session.scheduler.static_order(): + task = session.dag.nodes[name]["task"] + report = session.hook.pytask_execute_task_protocol( + session=session, task=task + ) + session.execution_reports.append(report) - return True + if session.should_stop: + return True + return True + else: + return None @hookimpl -def pytask_execute_task_protocol(session, task): +def pytask_execute_task_protocol(session: Session, task: MetaTask) -> ExecutionReport: """Follow the protocol to execute each task.""" session.hook.pytask_execute_task_log_start(session=session, task=task) try: @@ -105,7 +119,7 @@ def pytask_execute_task_protocol(session, task): @hookimpl(trylast=True) -def pytask_execute_task_setup(session, task): +def pytask_execute_task_setup(session: Session, task: MetaTask) -> None: """Set up the execution of a task. 1. Check whether all dependencies of a task are available. @@ -130,13 +144,13 @@ def pytask_execute_task_setup(session, task): @hookimpl -def pytask_execute_task(task): +def pytask_execute_task(task: MetaTask) -> None: """Execute task.""" task.execute() @hookimpl -def pytask_execute_task_teardown(session, task): +def pytask_execute_task_teardown(session: Session, task: MetaTask) -> None: """Check if each produced node was indeed produced.""" for product in session.dag.successors(task.name): node = session.dag.nodes[product]["node"] @@ -149,7 +163,9 @@ def pytask_execute_task_teardown(session, task): @hookimpl(trylast=True) -def pytask_execute_task_process_report(session, report): +def pytask_execute_task_process_report( + session: Session, report: ExecutionReport +) -> bool: """Process the execution report of a task. If a task failed, skip all subsequent tasks. Else, update the states of related @@ -185,7 +201,7 @@ def pytask_execute_task_process_report(session, report): @hookimpl(trylast=True) -def pytask_execute_task_log_end(report): +def pytask_execute_task_log_end(report: ExecutionReport) -> None: """Log task outcome.""" console.print(report.symbol, style=report.color, end="") @@ -193,13 +209,13 @@ def pytask_execute_task_log_end(report): class ShowErrorsImmediatelyPlugin: @staticmethod @hookimpl(tryfirst=True) - def pytask_execute_task_log_end(session, report): + def pytask_execute_task_log_end(session: Session, report: ExecutionReport) -> None: if not report.success: _print_errored_task_report(session, report) @hookimpl -def pytask_execute_log_end(session, reports): +def pytask_execute_log_end(session: Session, reports: List[ExecutionReport]) -> bool: session.execution_end = time.time() n_failed = len(reports) - sum(report.success for report in reports) @@ -240,7 +256,7 @@ def pytask_execute_log_end(session, reports): return True -def _print_errored_task_report(session, report): +def _print_errored_task_report(session: Session, report: ExecutionReport) -> None: """Print the traceback and the exception of an errored report.""" task_name = reduce_node_name(report.task, session.config["paths"]) if len(task_name) > console.width - 15: @@ -262,7 +278,7 @@ def _print_errored_task_report(session, report): console.print(content) -def _update_states_in_database(dag, task_name): +def _update_states_in_database(dag: nx.DiGraph, task_name: str) -> None: """Update the state for each node of a task in the database.""" for name in node_and_neighbors(dag, task_name): node = dag.nodes[name].get("task") or dag.nodes[name]["node"] diff --git a/src/_pytask/graph.py b/src/_pytask/graph.py index 08e23282..934c951a 100644 --- a/src/_pytask/graph.py +++ b/src/_pytask/graph.py @@ -3,6 +3,8 @@ from pathlib import Path from typing import Any from typing import Dict +from typing import List +from typing import TYPE_CHECKING import click import networkx as nx @@ -24,14 +26,22 @@ from rich.traceback import Traceback +if TYPE_CHECKING: + from typing import NoReturn + + @hookimpl(tryfirst=True) -def pytask_extend_command_line_interface(cli: click.Group): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend the command line interface.""" cli.add_command(dag) @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: """Parse configuration.""" config["output_path"] = get_first_non_none_value( config_from_cli, @@ -48,13 +58,13 @@ def pytask_parse_config(config, config_from_cli, config_from_file): ) -_HELP_TEXT_LAYOUT = ( +_HELP_TEXT_LAYOUT: str = ( "The layout determines the structure of the graph. Here you find an overview of " "all available layouts: https://graphviz.org/#roadmap." ) -_HELP_TEXT_OUTPUT = ( +_HELP_TEXT_OUTPUT: str = ( "The output path of the visualization. The format is inferred from the file " "extension." ) @@ -63,7 +73,7 @@ def pytask_parse_config(config, config_from_cli, config_from_file): @click.command() @click.option("-l", "--layout", type=str, default=None, help=_HELP_TEXT_LAYOUT) @click.option("-o", "--output-path", type=str, default=None, help=_HELP_TEXT_OUTPUT) -def dag(**config_from_cli): +def dag(**config_from_cli: Any) -> "NoReturn": """Create a visualization of the project's DAG.""" try: pm = get_plugin_manager() @@ -111,7 +121,7 @@ def dag(**config_from_cli): sys.exit(session.exit_code) -def build_dag(config_from_cli: Dict[str, Any]) -> "pydot.Dot": # noqa: F821 +def build_dag(config_from_cli: Dict[str, Any]) -> nx.DiGraph: """Build the DAG. This function is the programmatic interface to ``pytask dag`` and returns a @@ -168,7 +178,7 @@ def build_dag(config_from_cli: Dict[str, Any]) -> "pydot.Dot": # noqa: F821 return dag -def _refine_dag(session): +def _refine_dag(session: Session) -> nx.DiGraph: dag = _shorten_node_labels(session.dag, session.config["paths"]) dag = _add_root_node(dag) dag = _clean_dag(dag) @@ -217,7 +227,7 @@ def _create_session(config_from_cli: Dict[str, Any]) -> nx.DiGraph: return session -def _shorten_node_labels(dag, paths): +def _shorten_node_labels(dag: nx.DiGraph, paths: List[Path]) -> nx.DiGraph: node_names = dag.nodes short_names = reduce_names_of_multiple_nodes(node_names, dag, paths) old_to_new = dict(zip(node_names, short_names)) @@ -225,7 +235,7 @@ def _shorten_node_labels(dag, paths): return dag -def _add_root_node(dag): +def _add_root_node(dag: nx.DiGraph) -> nx.DiGraph: tasks_without_predecessor = [ name for name in dag.nodes @@ -239,7 +249,7 @@ def _add_root_node(dag): return dag -def _clean_dag(dag): +def _clean_dag(dag: nx.DiGraph) -> nx.DiGraph: """Clean the DAG.""" for node in dag.nodes: dag.nodes[node].clear() @@ -252,7 +262,7 @@ def _style_dag(dag: nx.DiGraph) -> nx.DiGraph: return dag -def _escape_node_names_with_colons(dag: nx.DiGraph): +def _escape_node_names_with_colons(dag: nx.DiGraph) -> nx.DiGraph: """Escape node names with colons. pydot cannot handle colons in node names since it messes up some syntax. Escaping diff --git a/src/_pytask/hookspecs.py b/src/_pytask/hookspecs.py index 0e38670a..7df991b8 100644 --- a/src/_pytask/hookspecs.py +++ b/src/_pytask/hookspecs.py @@ -4,10 +4,30 @@ the message send by the host and may send a response. """ +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING + import click +import networkx as nx import pluggy +if TYPE_CHECKING: + from _pytask.session import Session + from _pytask.nodes import MetaTask, MetaNode + from _pytask.reports import ( + CollectionReport, + ExecutionReport, + ResolveDependencyReport, + ) + + hookspec = pluggy.HookspecMarker("pytask") @@ -46,7 +66,9 @@ def pytask_extend_command_line_interface(cli: click.Group) -> None: @hookspec(firstresult=True) -def pytask_configure(pm: pluggy.PluginManager, config_from_cli: dict) -> dict: +def pytask_configure( + pm: pluggy.PluginManager, config_from_cli: Dict[str, Any] +) -> Dict[str, Any]: """Configure pytask. The main hook implementation which controls the configuration and calls subordinated @@ -57,7 +79,9 @@ def pytask_configure(pm: pluggy.PluginManager, config_from_cli: dict) -> dict: @hookspec def pytask_parse_config( - config: dict, config_from_cli: dict, config_from_file: dict + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], ) -> None: """Parse configuration from the CLI or from file. @@ -71,7 +95,7 @@ def pytask_parse_config( @hookspec -def pytask_post_parse(config: dict) -> None: +def pytask_post_parse(config: Dict[str, Any]) -> None: """Post parsing. This hook allows to consolidate the configuration in case some plugins might be @@ -83,7 +107,7 @@ def pytask_post_parse(config: dict) -> None: @hookspec -def pytask_unconfigure(session): +def pytask_unconfigure(session: "Session") -> None: """Unconfigure a pytask session before the process is exited. The hook allows to return resources previously borrowed like :func:`pdb.set_trace` @@ -96,7 +120,7 @@ def pytask_unconfigure(session): @hookspec(firstresult=True) -def pytask_collect(session): +def pytask_collect(session: "Session") -> Any: """Collect tasks from paths. The main hook implementation which controls the collection and calls subordinated @@ -106,7 +130,7 @@ def pytask_collect(session): @hookspec(firstresult=True) -def pytask_ignore_collect(path, config): +def pytask_ignore_collect(path: Path, config: Dict[str, Any]) -> bool: """Ignore collected path. This hook is indicates for each directory and file whether it should be ignored. @@ -116,7 +140,7 @@ def pytask_ignore_collect(path, config): @hookspec -def pytask_collect_modify_tasks(session, tasks): +def pytask_collect_modify_tasks(session: "Session", tasks: "List[MetaTask]") -> None: """Modify tasks after they have been collected. This hook can be used to deselect tasks when they match a certain keyword or mark. @@ -125,7 +149,9 @@ def pytask_collect_modify_tasks(session, tasks): @hookspec(firstresult=True) -def pytask_collect_file_protocol(session, path, reports): +def pytask_collect_file_protocol( + session: "Session", path: Path, reports: "List[CollectionReport]" +) -> "List[CollectionReport]": """Start protocol to collect files. The protocol calls the subordinate hook :func:`pytask_collect_file` which might @@ -135,7 +161,9 @@ def pytask_collect_file_protocol(session, path, reports): @hookspec(firstresult=True) -def pytask_collect_file(session, path, reports): +def pytask_collect_file( + session: "Session", path: Path, reports: "List[CollectionReport]" +) -> "Optional[List[CollectionReport]]": """Collect tasks from a file. If you want to collect tasks from other files, modify this hook. @@ -144,27 +172,35 @@ def pytask_collect_file(session, path, reports): @hookspec -def pytask_collect_file_log(session, reports): +def pytask_collect_file_log( + session: "Session", reports: "List[CollectionReport]" +) -> None: """Perform logging at the end of collecting a file.""" @hookspec(firstresult=True) -def pytask_collect_task_protocol(session, path, name, obj): +def pytask_collect_task_protocol( + session: "Session", path: Path, name: str, obj: Any +) -> "Optional[CollectionReport]": """Start protocol to collect tasks.""" @hookspec -def pytask_collect_task_setup(session, path, name, obj): +def pytask_collect_task_setup( + session: "Session", path: Path, name: str, obj: Any +) -> None: """Steps before collecting a task.""" @hookspec(firstresult=True) -def pytask_collect_task(session, path, name, obj): +def pytask_collect_task( + session: "Session", path: Path, name: str, obj: Any +) -> "MetaTask": """Collect a single task.""" @hookspec -def pytask_collect_task_teardown(session, task): +def pytask_collect_task_teardown(session: "Session", task: "MetaTask") -> None: """Perform tear-down operations when a task was collected. Use this hook specification to, for example, perform checks on the collected task. @@ -173,12 +209,16 @@ def pytask_collect_task_teardown(session, task): @hookspec(firstresult=True) -def pytask_collect_node(session, path, node): +def pytask_collect_node( + session: "Session", path: Path, node: "MetaNode" +) -> "Optional[MetaNode]": """Collect a node which is a dependency or a product of a task.""" @hookspec(firstresult=True) -def pytask_collect_log(session, reports, tasks): +def pytask_collect_log( + session: "Session", reports: "List[CollectionReport]", tasks: "List[MetaTask]" +) -> None: """Log errors occurring during the collection. This hook reports errors during the collection. @@ -190,12 +230,14 @@ def pytask_collect_log(session, reports, tasks): @hookspec(firstresult=True) -def pytask_parametrize_task(session, name, obj): +def pytask_parametrize_task( + session: "Session", name: str, obj: Any +) -> List[Tuple[str, Callable[..., Any]]]: """Generate multiple tasks from name and object with parametrization.""" @hookspec -def pytask_parametrize_kwarg_to_marker(obj, kwargs): +def pytask_parametrize_kwarg_to_marker(obj: Any, kwargs: Dict[Any, Any]) -> None: """Add some keyword arguments as markers to object. This hook moves arguments defined in the parametrization to marks of the same @@ -209,7 +251,7 @@ def pytask_parametrize_kwarg_to_marker(obj, kwargs): @hookspec(firstresult=True) -def pytask_resolve_dependencies(session): +def pytask_resolve_dependencies(session: "Session") -> None: """Resolve dependencies. The main hook implementation which controls the resolution of dependencies and calls @@ -219,7 +261,9 @@ def pytask_resolve_dependencies(session): @hookspec(firstresult=True) -def pytask_resolve_dependencies_create_dag(session, tasks): +def pytask_resolve_dependencies_create_dag( + session: "Session", tasks: "List[MetaTask]" +) -> nx.DiGraph: """Create the DAG. This hook creates the DAG from tasks, dependencies and products. The DAG can be used @@ -229,7 +273,7 @@ def pytask_resolve_dependencies_create_dag(session, tasks): @hookspec -def pytask_resolve_dependencies_modify_dag(session, dag): +def pytask_resolve_dependencies_modify_dag(session: "Session", dag: nx.DiGraph) -> None: """Modify the DAG. This hook allows to make some changes to the DAG before it is validated and tasks @@ -239,7 +283,9 @@ def pytask_resolve_dependencies_modify_dag(session, dag): @hookspec(firstresult=True) -def pytask_resolve_dependencies_validate_dag(session, dag): +def pytask_resolve_dependencies_validate_dag( + session: "Session", dag: nx.DiGraph +) -> None: """Validate the DAG. This hook validates the DAG. For example, there can be cycles in the DAG if tasks, @@ -249,7 +295,9 @@ def pytask_resolve_dependencies_validate_dag(session, dag): @hookspec -def pytask_resolve_dependencies_select_execution_dag(session, dag): +def pytask_resolve_dependencies_select_execution_dag( + session: "Session", dag: nx.DiGraph +) -> None: """Select the subgraph which needs to be executed. This hook determines which of the tasks have to be re-run because something has @@ -259,7 +307,9 @@ def pytask_resolve_dependencies_select_execution_dag(session, dag): @hookspec -def pytask_resolve_dependencies_log(session, report): +def pytask_resolve_dependencies_log( + session: "Session", report: "ResolveDependencyReport" +) -> None: """Log errors during resolving dependencies.""" @@ -267,7 +317,7 @@ def pytask_resolve_dependencies_log(session, report): @hookspec(firstresult=True) -def pytask_execute(session): +def pytask_execute(session: "Session") -> Optional[Any]: """Loop over all tasks for the execution. The main hook implementation which controls the execution and calls subordinated @@ -277,7 +327,7 @@ def pytask_execute(session): @hookspec -def pytask_execute_log_start(session): +def pytask_execute_log_start(session: "Session") -> None: """Start logging of execution. This hook allows to provide a header with information before the execution starts. @@ -286,7 +336,7 @@ def pytask_execute_log_start(session): @hookspec(firstresult=True) -def pytask_execute_create_scheduler(session): +def pytask_execute_create_scheduler(session: "Session") -> Any: """Create a scheduler for the execution. The scheduler provides information on which tasks are able to be executed. Its @@ -296,7 +346,7 @@ def pytask_execute_create_scheduler(session): @hookspec(firstresult=True) -def pytask_execute_build(session): +def pytask_execute_build(session: "Session") -> Any: """Execute the build. This hook implements the main loop to execute tasks. @@ -305,7 +355,9 @@ def pytask_execute_build(session): @hookspec(firstresult=True) -def pytask_execute_task_protocol(session, task): +def pytask_execute_task_protocol( + session: "Session", task: "MetaTask" +) -> "ExecutionReport": """Run the protocol for executing a test. This hook runs all stages of the execution process, setup, execution, and teardown @@ -317,7 +369,7 @@ def pytask_execute_task_protocol(session, task): @hookspec(firstresult=True) -def pytask_execute_task_log_start(session, task): +def pytask_execute_task_log_start(session: "Session", task: "MetaTask") -> None: """Start logging of task execution. This hook can be used to provide more verbose output during the execution. @@ -326,7 +378,7 @@ def pytask_execute_task_log_start(session, task): @hookspec -def pytask_execute_task_setup(session, task): +def pytask_execute_task_setup(session: "Session", task: "MetaTask") -> None: """Set up the task execution. This hook is called before the task is executed and can provide an entry-point to @@ -337,12 +389,12 @@ def pytask_execute_task_setup(session, task): @hookspec(firstresult=True) -def pytask_execute_task(session, task): +def pytask_execute_task(session: "Session", task: "MetaTask") -> Optional[Any]: """Execute a task.""" @hookspec -def pytask_execute_task_teardown(session, task): +def pytask_execute_task_teardown(session: "Session", task: "MetaTask") -> None: """Tear down task execution. This hook is executed after the task has been executed. It allows to perform @@ -352,7 +404,9 @@ def pytask_execute_task_teardown(session, task): @hookspec(firstresult=True) -def pytask_execute_task_process_report(session, report): +def pytask_execute_task_process_report( + session: "Session", report: "ExecutionReport" +) -> Optional[Any]: """Process the report of a task. This hook allows to process each report generated by a task which is either based on @@ -365,12 +419,14 @@ def pytask_execute_task_process_report(session, report): @hookspec(firstresult=True) -def pytask_execute_task_log_end(session, report): +def pytask_execute_task_log_end(session: "Session", report: "ExecutionReport") -> None: """Log the end of a task execution.""" @hookspec -def pytask_execute_log_end(session, reports): +def pytask_execute_log_end( + session: "Session", reports: "List[ExecutionReport]" +) -> None: """Log the footer of the execution report.""" @@ -378,12 +434,14 @@ def pytask_execute_log_end(session, reports): @hookspec -def pytask_log_session_header(session): +def pytask_log_session_header(session: "Session") -> None: """Log session information at the begin of a run.""" @hookspec -def pytask_log_session_footer(session, infos, duration, color): +def pytask_log_session_footer( + session: "Session", infos: List[Tuple[Any, str, str]], duration: float, color: str +) -> None: """Log session information at the end of a run.""" @@ -391,7 +449,9 @@ def pytask_log_session_footer(session, infos, duration, color): @hookspec -def pytask_profile_add_info_on_task(session, tasks, profile): +def pytask_profile_add_info_on_task( + session: "Session", tasks: "List[MetaTask]", profile: Dict[str, Dict[Any, Any]] +) -> None: """Add information on task for profile. Hook implementations can add information to the ``profile`` dictionary. The @@ -402,5 +462,7 @@ def pytask_profile_add_info_on_task(session, tasks, profile): @hookspec -def pytask_profile_export_profile(session, profile): +def pytask_profile_export_profile( + session: "Session", profile: Dict[str, Dict[Any, Any]] +) -> None: """Export the profile.""" diff --git a/src/_pytask/live.py b/src/_pytask/live.py index cdeef89e..ce9102e3 100644 --- a/src/_pytask/live.py +++ b/src/_pytask/live.py @@ -1,10 +1,19 @@ from pathlib import Path +from typing import Any +from typing import Dict +from typing import Generator +from typing import List +from typing import Optional +from typing import Set from typing import Union import attr import click from _pytask.config import hookimpl from _pytask.console import console +from _pytask.nodes import MetaTask +from _pytask.report import CollectionReport +from _pytask.report import ExecutionReport from _pytask.shared import get_first_non_none_value from _pytask.shared import reduce_node_name from rich.live import Live @@ -14,7 +23,7 @@ @hookimpl -def pytask_extend_command_line_interface(cli): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend command line interface.""" additional_parameters = [ click.Option( @@ -28,7 +37,11 @@ def pytask_extend_command_line_interface(cli): @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: config["n_entries_in_table"] = get_first_non_none_value( config_from_cli, config_from_file, @@ -55,7 +68,7 @@ def _parse_n_entries_in_table(value: Union[int, str, None]) -> int: @hookimpl -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: live_manager = LiveManager() config["pm"].register(live_manager, "live_manager") @@ -84,29 +97,29 @@ class LiveManager: _live = Live(renderable=None, console=console, auto_refresh=True) - def start(self): + def start(self) -> None: self._live.start() - def stop(self, transient=None): + def stop(self, transient: Optional[bool] = None) -> None: if transient is not None: self._live.transient = transient self._live.stop() - def pause(self): + def pause(self) -> None: self._live.transient = True self.stop() - def resume(self): + def resume(self) -> None: if not self._live.renderable: return self._live.transient = False self.start() - def update(self, *args, **kwargs): + def update(self, *args: Any, **kwargs: Any) -> None: self._live.update(*args, **kwargs) @property - def is_started(self): + def is_started(self) -> None: return self._live.is_started @@ -114,30 +127,30 @@ def is_started(self): class LiveExecution: _live_manager = attr.ib(type=LiveManager) - _paths = attr.ib(type=Path) + _paths = attr.ib(type=List[Path]) _n_entries_in_table = attr.ib(type=int) _verbose = attr.ib(type=int) - _running_tasks = attr.ib(factory=set) - _reports = attr.ib(factory=list) + _running_tasks = attr.ib(factory=set, type=Set[str]) + _reports = attr.ib(factory=list, type=List[Dict[str, str]]) @hookimpl(hookwrapper=True) - def pytask_execute_build(self): + def pytask_execute_build(self) -> Generator[None, None, None]: self._live_manager.start() yield self._update_table(reduce_table=False) self._live_manager.stop(transient=False) @hookimpl(tryfirst=True) - def pytask_execute_task_log_start(self, task): + def pytask_execute_task_log_start(self, task: MetaTask) -> bool: self.update_running_tasks(task) return True @hookimpl - def pytask_execute_task_log_end(self, report): + def pytask_execute_task_log_end(self, report: ExecutionReport) -> bool: self.update_reports(report) return True - def _generate_table(self, reduce_table: bool) -> Union[None, Table]: + def _generate_table(self, reduce_table: bool) -> Optional[Table]: """Generate the table. First, display all completed tasks and, then, all running tasks. @@ -171,16 +184,16 @@ def _generate_table(self, reduce_table: bool) -> Union[None, Table]: return table - def _update_table(self, reduce_table: bool = True): + def _update_table(self, reduce_table: bool = True) -> None: table = self._generate_table(reduce_table) self._live_manager.update(table) - def update_running_tasks(self, new_running_task): + def update_running_tasks(self, new_running_task: MetaTask) -> None: reduced_task_name = reduce_node_name(new_running_task, self._paths) self._running_tasks.add(reduced_task_name) self._update_table() - def update_reports(self, new_report): + def update_reports(self, new_report: ExecutionReport) -> None: reduced_task_name = reduce_node_name(new_report.task, self._paths) self._running_tasks.remove(reduced_task_name) self._reports.append( @@ -201,22 +214,22 @@ class LiveCollection: _n_errors = attr.ib(default=0, type=int) @hookimpl(hookwrapper=True) - def pytask_collect(self): + def pytask_collect(self) -> Generator[None, None, None]: self._live_manager.start() yield @hookimpl - def pytask_collect_file_log(self, reports): + def pytask_collect_file_log(self, reports: List[CollectionReport]) -> None: self._update_statistics(reports) self._update_status() @hookimpl(hookwrapper=True) - def pytask_collect_log(self): + def pytask_collect_log(self) -> Generator[None, None, None]: self._live_manager.update(None) self._live_manager.stop(transient=True) yield - def _update_statistics(self, reports): + def _update_statistics(self, reports: List[CollectionReport]) -> None: if reports is None: reports = [] for report in reports: @@ -225,11 +238,11 @@ def _update_statistics(self, reports): else: self._n_errors += 1 - def _update_status(self): + def _update_status(self) -> None: status = self._generate_status() self._live_manager.update(status) - def _generate_status(self): + def _generate_status(self) -> Status: msg = f"Collected {self._n_collected_tasks} tasks." if self._n_errors > 0: msg += f" {self._n_errors} errors." diff --git a/src/_pytask/logging.py b/src/_pytask/logging.py index 70f6dcb7..2ba064e0 100644 --- a/src/_pytask/logging.py +++ b/src/_pytask/logging.py @@ -2,20 +2,39 @@ import platform import sys from typing import Any +from typing import Dict from typing import List from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union import _pytask import click import pluggy from _pytask.config import hookimpl from _pytask.console import console +from _pytask.session import Session from _pytask.shared import convert_truthy_or_falsy_to_bool from _pytask.shared import get_first_non_none_value +try: + from pluggy._manager import DistFacade +except ImportError: + from pluggy.manager import DistFacade + + +if TYPE_CHECKING and sys.version_info >= (3, 8): + from typing import TypedDict + + class _TimeUnit(TypedDict): + singular: str + plural: str + short: str + in_seconds: int + @hookimpl -def pytask_extend_command_line_interface(cli): +def pytask_extend_command_line_interface(cli: click.Group) -> None: show_locals_option = click.Option( ["--show-locals"], is_flag=True, @@ -26,7 +45,11 @@ def pytask_extend_command_line_interface(cli): @hookimpl -def pytask_parse_config(config, config_from_file, config_from_cli): +def pytask_parse_config( + config: Dict[str, Any], + config_from_file: Dict[str, Any], + config_from_cli: Dict[str, Any], +) -> None: config["show_locals"] = get_first_non_none_value( config_from_cli, config_from_file, @@ -37,7 +60,7 @@ def pytask_parse_config(config, config_from_file, config_from_cli): @hookimpl -def pytask_log_session_header(session): +def pytask_log_session_header(session: Session) -> None: """Log the header of a pytask session.""" console.rule("Start pytask session", style=None) console.print( @@ -56,7 +79,9 @@ def pytask_log_session_header(session): console.print(f"Plugins: {formatted_plugins_w_versions}") -def _format_plugin_names_and_versions(plugininfo) -> List[str]: +def _format_plugin_names_and_versions( + plugininfo: List[Tuple[str, DistFacade]] +) -> List[str]: """Format name and version of loaded plugins.""" values: List[str] = [] for _, dist in plugininfo: @@ -73,8 +98,8 @@ def _format_plugin_names_and_versions(plugininfo) -> List[str]: @hookimpl def pytask_log_session_footer( - infos: List[Tuple[Any]], duration: float, color: str -) -> str: + infos: List[Tuple[Any, str, str]], duration: float, color: str +) -> None: """Format the footer of the log message.""" message = _style_infos(infos) formatted_duration = _format_duration(duration) @@ -83,7 +108,7 @@ def pytask_log_session_footer( console.rule(message, style=color) -def _style_infos(infos: List[Tuple[Any]]) -> str: +def _style_infos(infos: List[Tuple[Any, str, str]]) -> str: """Style infos. Example @@ -102,7 +127,7 @@ def _style_infos(infos: List[Tuple[Any]]) -> str: return ", ".join(message) -_TIME_UNITS = [ +_TIME_UNITS: List["_TimeUnit"] = [ {"singular": "day", "plural": "days", "short": "d", "in_seconds": 86400}, {"singular": "hour", "plural": "hours", "short": "h", "in_seconds": 3600}, {"singular": "minute", "plural": "minutes", "short": "m", "in_seconds": 60}, @@ -110,7 +135,7 @@ def _style_infos(infos: List[Tuple[Any]]) -> str: ] -def _format_duration(duration): +def _format_duration(duration: float) -> str: duration_tuples = _humanize_time(duration, "seconds", short_label=False) # Remove seconds if the execution lasted days or hours. @@ -123,7 +148,9 @@ def _format_duration(duration): return formatted_duration -def _humanize_time(amount: int, unit: str, short_label: bool = False): +def _humanize_time( + amount: Union[int, float], unit: str, short_label: bool = False +) -> List[Tuple[int, str]]: """Humanize the time. Examples diff --git a/src/_pytask/mark/__init__.py b/src/_pytask/mark/__init__.py index 4647a889..b1a576e8 100644 --- a/src/_pytask/mark/__init__.py +++ b/src/_pytask/mark/__init__.py @@ -1,8 +1,13 @@ import sys from typing import AbstractSet +from typing import Any +from typing import Dict +from typing import Set +from typing import TYPE_CHECKING import attr import click +import networkx as nx from _pytask.config import hookimpl from _pytask.console import console from _pytask.dag import task_and_preceding_tasks @@ -14,6 +19,7 @@ from _pytask.mark.structures import MARK_GEN from _pytask.mark.structures import MarkDecorator from _pytask.mark.structures import MarkGenerator +from _pytask.nodes import MetaTask from _pytask.pluginmanager import get_plugin_manager from _pytask.session import Session from _pytask.shared import convert_truthy_or_falsy_to_bool @@ -21,6 +27,10 @@ from rich.table import Table +if TYPE_CHECKING: + from typing import NoReturn + + __all__ = [ "Expression", "Mark", @@ -32,7 +42,7 @@ @click.command() -def markers(**config_from_cli): +def markers(**config_from_cli: Any) -> "NoReturn": """Show all registered markers.""" config_from_cli["command"] = "markers" @@ -94,7 +104,11 @@ def pytask_extend_command_line_interface(cli: click.Group) -> None: @hookimpl -def pytask_parse_config(config, config_from_cli, config_from_file): +def pytask_parse_config( + config: Dict[str, Any], + config_from_cli: Dict[str, Any], + config_from_file: Dict[str, Any], +) -> None: """Parse marker related options.""" markers = _read_marker_mapping_from_ini(config_from_file.get("markers", "")) config["markers"] = {**markers, **config["markers"]} @@ -113,7 +127,7 @@ def pytask_parse_config(config, config_from_cli, config_from_file): MARK_GEN.config = config -def _read_marker_mapping_from_ini(string: str) -> dict: +def _read_marker_mapping_from_ini(string: str) -> Dict[str, str]: """Read marker descriptions from configuration file.""" # Split by newlines and remove empty strings. lines = filter(lambda x: bool(x), string.split("\n")) @@ -152,7 +166,7 @@ class KeywordMatcher: _names = attr.ib(type=AbstractSet[str]) @classmethod - def from_task(cls, task) -> "KeywordMatcher": + def from_task(cls, task: MetaTask) -> "KeywordMatcher": mapped_names = {task.name} # Add the names attached to the current function through direct assignment. @@ -175,7 +189,7 @@ def __call__(self, subname: str) -> bool: return False -def select_by_keyword(session, dag) -> set: +def select_by_keyword(session: Session, dag: nx.DiGraph) -> Set[str]: """Deselect tests by keywords.""" keywordexpr = session.config["expression"] if not keywordexpr: @@ -188,7 +202,7 @@ def select_by_keyword(session, dag) -> set: f"Wrong expression passed to '-k': {keywordexpr}: {e}" ) from None - remaining = set() + remaining: Set[str] = set() for task in session.tasks: if keywordexpr and expression.evaluate(KeywordMatcher.from_task(task)): remaining.update(task_and_preceding_tasks(task.name, dag)) @@ -204,10 +218,10 @@ class MarkMatcher: """ - own_mark_names = attr.ib() + own_mark_names = attr.ib(type=Set[str]) @classmethod - def from_task(cls, task) -> "MarkMatcher": + def from_task(cls, task: MetaTask) -> "MarkMatcher": mark_names = {mark.name for mark in task.markers} return cls(mark_names) @@ -215,7 +229,7 @@ def __call__(self, name: str) -> bool: return name in self.own_mark_names -def select_by_mark(session, dag) -> set: +def select_by_mark(session: Session, dag: nx.DiGraph) -> Set[str]: """Deselect tests by marks.""" matchexpr = session.config["marker_expression"] if not matchexpr: @@ -226,7 +240,7 @@ def select_by_mark(session, dag) -> set: except ParseError as e: raise ValueError(f"Wrong expression passed to '-m': {matchexpr}: {e}") from None - remaining = set() + remaining: Set[str] = set() for task in session.tasks: if expression.evaluate(MarkMatcher.from_task(task)): remaining.update(task_and_preceding_tasks(task.name, dag)) @@ -234,14 +248,16 @@ def select_by_mark(session, dag) -> set: return remaining -def _deselect_others_with_mark(session, remaining, mark): +def _deselect_others_with_mark( + session: Session, remaining: Set[str], mark: Mark +) -> None: for task in session.tasks: if task.name not in remaining: task.markers.append(mark) @hookimpl -def pytask_resolve_dependencies_modify_dag(session, dag): +def pytask_resolve_dependencies_modify_dag(session: Session, dag: nx.DiGraph) -> None: """Modify the tasks which are executed with expressions and markers.""" remaining = select_by_keyword(session, dag) if remaining is not None: diff --git a/src/_pytask/mark/expression.py b/src/_pytask/mark/expression.py index d6271951..72b60f43 100644 --- a/src/_pytask/mark/expression.py +++ b/src/_pytask/mark/expression.py @@ -30,10 +30,15 @@ from typing import Mapping from typing import Optional from typing import Sequence +from typing import TYPE_CHECKING import attr +if TYPE_CHECKING: + from typing import NoReturn + + __all__ = ["Expression", "ParseError"] @@ -122,7 +127,7 @@ def accept(self, type_: TokenType, *, reject: bool = False) -> Optional[Token]: self.reject((type_,)) return None - def reject(self, expected: Sequence[TokenType]): + def reject(self, expected: Sequence[TokenType]) -> "NoReturn": raise ParseError( self.current.pos + 1, "expected {}; got {}".format( @@ -163,7 +168,7 @@ def and_expr(s: Scanner) -> ast.expr: return ret -def not_expr(s: Scanner) -> ast.expr: +def not_expr(s: Scanner) -> Optional[ast.expr]: if s.accept(TokenType.NOT): return ast.UnaryOp(ast.Not(), not_expr(s)) if s.accept(TokenType.LPAREN): diff --git a/src/_pytask/mark/structures.py b/src/_pytask/mark/structures.py index 515aa38e..eede92ab 100644 --- a/src/_pytask/mark/structures.py +++ b/src/_pytask/mark/structures.py @@ -1,17 +1,20 @@ import warnings from typing import Any +from typing import Callable +from typing import Dict from typing import Iterable from typing import List from typing import Mapping from typing import Optional from typing import Sequence +from typing import Set from typing import Tuple from typing import Union import attr -def is_task_function(func) -> bool: +def is_task_function(func: Any) -> bool: return callable(func) and getattr(func, "__name__", "") != "" @@ -123,7 +126,7 @@ def kwargs(self) -> Mapping[str, Any]: def __repr__(self) -> str: return f"" - def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator": + def with_args(self, *args: Any, **kwargs: Any) -> "MarkDecorator": """Return a MarkDecorator with extra arguments added. Unlike calling the MarkDecorator, ``with_args()`` can be used even if the sole @@ -133,7 +136,7 @@ def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator": mark = Mark(self.name, args, kwargs) return self.__class__(self.mark.combined_with(mark)) - def __call__(self, *args: object, **kwargs: object): # noqa: F811 + def __call__(self, *args: Any, **kwargs: Any) -> "MarkDecorator": """Call the MarkDecorator.""" if args and not kwargs: func = args[0] @@ -143,7 +146,7 @@ def __call__(self, *args: object, **kwargs: object): # noqa: F811 return self.with_args(*args, **kwargs) -def get_unpacked_marks(obj) -> List[Mark]: +def get_unpacked_marks(obj: Callable[..., Any]) -> List[Mark]: """Obtain the unpacked marks that are stored on an object.""" mark_list = getattr(obj, "pytaskmark", []) if not isinstance(mark_list, list): @@ -152,7 +155,7 @@ def get_unpacked_marks(obj) -> List[Mark]: def normalize_mark_list(mark_list: Iterable[Union[Mark, MarkDecorator]]) -> List[Mark]: - """Normalizes marker decorating helpers to mark objects. + """Normalize marker decorating helpers to mark objects. Parameters ---------- @@ -163,23 +166,23 @@ def normalize_mark_list(mark_list: Iterable[Union[Mark, MarkDecorator]]) -> List List[Mark] """ - extracted = [ - getattr(mark, "mark", mark) for mark in mark_list - ] # unpack MarkDecorator + extracted = [getattr(mark, "mark", mark) for mark in mark_list] for mark in extracted: if not isinstance(mark, Mark): - raise TypeError(f"got {mark!r} instead of Mark") + raise TypeError(f"Got {mark!r} instead of Mark.") return [x for x in extracted if isinstance(x, Mark)] -def store_mark(obj, mark: Mark) -> None: +def store_mark(obj: Callable[..., Any], mark: Mark) -> None: """Store a Mark on an object. + This is used to implement the Mark declarations/decorators correctly. + """ assert isinstance(mark, Mark), mark - # Always reassign name to avoid updating pytaskmark in a reference that - # was only borrowed. - obj.pytaskmark = get_unpacked_marks(obj) + [mark] + # Always reassign name to avoid updating pytaskmark in a reference that was only + # borrowed. + obj.pytaskmark = get_unpacked_marks(obj) + [mark] # type: ignore class MarkGenerator: @@ -198,9 +201,9 @@ class MarkGenerator: """ - config = None - """Optional[dict]: The configuration.""" - markers = set() + config: Optional[Dict[str, Any]] = None + """Optional[Dict[str, Any]]: The configuration.""" + markers: Set[str] = set() """Set[str]: The set of markers.""" def __getattr__(self, name: str) -> MarkDecorator: diff --git a/src/_pytask/mark_utils.py b/src/_pytask/mark_utils.py index cdfd1483..28f330a1 100644 --- a/src/_pytask/mark_utils.py +++ b/src/_pytask/mark_utils.py @@ -3,14 +3,22 @@ The utility functions are stored here to be separate from the plugin. """ +from typing import Any +from typing import List +from typing import TYPE_CHECKING -def get_specific_markers_from_task(task, marker_name): +if TYPE_CHECKING: + from _pytask.nodes import MetaTask + from _pytask.mark import Mark + + +def get_specific_markers_from_task(task: "MetaTask", marker_name: str) -> "List[Mark]": """Get a specific group of markers from a task.""" return [marker for marker in task.markers if marker.name == marker_name] -def get_marks_from_obj(obj, marker_name): +def get_marks_from_obj(obj: Any, marker_name: str) -> "List[Mark]": """Get a specific group of markers from a task function.""" return [ marker @@ -19,6 +27,6 @@ def get_marks_from_obj(obj, marker_name): ] -def has_marker(obj, marker_name): +def has_marker(obj: Any, marker_name: str) -> bool: """Determine whether a task function has a certain marker.""" return any(marker.name == marker_name for marker in getattr(obj, "pytaskmark", [])) diff --git a/src/_pytask/nodes.py b/src/_pytask/nodes.py index 496bc093..af3ab85b 100644 --- a/src/_pytask/nodes.py +++ b/src/_pytask/nodes.py @@ -2,29 +2,40 @@ import functools import inspect import itertools -import pathlib from abc import ABCMeta from abc import abstractmethod from pathlib import Path from typing import Any +from typing import Callable from typing import Dict +from typing import Generator from typing import Iterable from typing import List +from typing import Optional +from typing import Set from typing import Tuple +from typing import TYPE_CHECKING from typing import Union import attr from _pytask.exceptions import NodeNotCollectedError from _pytask.exceptions import NodeNotFoundError from _pytask.mark_utils import get_marks_from_obj +from _pytask.session import Session -def depends_on(objects: Union[Any, Iterable[Any]]) -> Union[Any, Iterable[Any]]: +if TYPE_CHECKING: + from _pytask.mark import Mark + + +def depends_on( + objects: Union[Any, Iterable[Any], Dict[Any, Any]] +) -> Union[Any, Iterable[Any], Dict[Any, Any]]: """Specify dependencies for a task. Parameters ---------- - objects : Union[Any, Iterable[Any]] + objects : Union[Any, Iterable[Any], Dict[Any, Any]] Can be any valid Python object or an iterable of any Python objects. To be valid, it must be parsed by some hook implementation for the :func:`pytask.hookspecs.pytask_collect_node` entry-point. @@ -33,12 +44,14 @@ def depends_on(objects: Union[Any, Iterable[Any]]) -> Union[Any, Iterable[Any]]: return objects -def produces(objects: Union[Any, Iterable[Any]]) -> Union[Any, Iterable[Any]]: +def produces( + objects: Union[Any, Iterable[Any], Dict[Any, Any]] +) -> Union[Any, Iterable[Any], Dict[Any, Any]]: """Specify products of a task. Parameters ---------- - objects : Union[Any, Iterable[Any]] + objects : Union[Any, Iterable[Any], Dict[Any, Any]] Can be any valid Python object or an iterable of any Python objects. To be valid, it must be parsed by some hook implementation for the :func:`pytask.hookspecs.pytask_collect_node` entry-point. @@ -50,19 +63,36 @@ def produces(objects: Union[Any, Iterable[Any]]) -> Union[Any, Iterable[Any]]: class MetaNode(metaclass=ABCMeta): """Meta class for nodes.""" + name: str + path: Path + @abstractmethod - def state(self): - """Return a value which indicates whether a node has changed or not.""" - pass + def state(self) -> Optional[str]: + ... class MetaTask(MetaNode): """The base class for tasks.""" + base_name: str + name: str + markers: "List[Mark]" + depends_on: Dict[str, MetaNode] + produces: Dict[str, MetaNode] + path: Path + function: Optional[Callable[..., Any]] + attributes: Dict[Any, Any] + _report_sections: List[Tuple[str, str, str]] + @abstractmethod - def execute(self): - """Execute the task.""" - pass + def execute(self) -> None: + ... + + @abstractmethod + def add_report_section( + self, when: str, key: str, content: str # noqa: U100 + ) -> None: + ... @attr.s @@ -75,34 +105,36 @@ class PythonFunctionTask(MetaTask): """str: The unique identifier for a task.""" path = attr.ib(type=Path) """pathlib.Path: Path to the file where the task was defined.""" - function = attr.ib(type=callable) - """callable: The task function.""" - depends_on = attr.ib(factory=dict) - """Optional[List[MetaNode]]: A list of dependencies of task.""" - produces = attr.ib(factory=dict) - """List[MetaNode]: A list of products of task.""" - markers = attr.ib(factory=list) + function = attr.ib(type=Callable[..., Any]) + """Callable[..., Any]: The task function.""" + depends_on = attr.ib(factory=dict, type=Dict[str, MetaNode]) + """Dict[str, MetaNode]: A list of dependencies of task.""" + produces = attr.ib(factory=dict, type=Dict[str, MetaNode]) + """Dict[str, MetaNode]: A list of products of task.""" + markers = attr.ib(factory=list, type="List[Mark]") """Optional[List[Mark]]: A list of markers attached to the task function.""" - keep_dict = attr.ib(default=False) + keep_dict = attr.ib(factory=dict, type=Dict[str, bool]) """Dict[str, bool]: Should dictionaries for single nodes be preserved?""" - _report_sections = attr.ib(factory=list) - """List[Tuple[str]]: A list of reports with entries for when, what, and content.""" - attributes = attr.ib(factory=dict) + _report_sections = attr.ib(factory=list, type=List[Tuple[str, str, str]]) + """List[Tuple[str, str, str]]: Reports with entries for when, what, and content.""" + attributes = attr.ib(factory=dict, type=Dict[Any, Any]) """Dict[Any, Any]: A dictionary to store additional information of the task.""" @classmethod - def from_path_name_function_session(cls, path, name, function, session): + def from_path_name_function_session( + cls, path: Path, name: str, function: Callable[..., Any], session: Session + ) -> "PythonFunctionTask": """Create a task from a path, name, function, and session.""" keep_dictionary = {} objects = _extract_nodes_from_function_markers(function, depends_on) - nodes, keep_dict = _convert_objects_to_node_dictionary(objects, "depends_on") - keep_dictionary["depends_on"] = keep_dict + nodes, keep_dict_de = _convert_objects_to_node_dictionary(objects, "depends_on") + keep_dictionary["depends_on"] = keep_dict_de dependencies = _collect_nodes(session, path, name, nodes) objects = _extract_nodes_from_function_markers(function, produces) - nodes, keep_dict = _convert_objects_to_node_dictionary(objects, "produces") - keep_dictionary["produces"] = keep_dict + nodes, keep_dict_prod = _convert_objects_to_node_dictionary(objects, "produces") + keep_dictionary["produces"] = keep_dict_prod products = _collect_nodes(session, path, name, nodes) markers = [ @@ -122,16 +154,16 @@ def from_path_name_function_session(cls, path, name, function, session): keep_dict=keep_dictionary, ) - def execute(self): + def execute(self) -> None: """Execute the task.""" kwargs = self._get_kwargs_from_task_for_function() self.function(**kwargs) - def state(self): + def state(self) -> str: """Return the last modified date of the file where the task is defined.""" return str(self.path.stat().st_mtime) - def _get_kwargs_from_task_for_function(self): + def _get_kwargs_from_task_for_function(self) -> Dict[str, Any]: """Process dependencies and products to pass them as kwargs to the function.""" func_arg_names = set(inspect.signature(self.function).parameters) kwargs = {} @@ -148,7 +180,7 @@ def _get_kwargs_from_task_for_function(self): return kwargs - def add_report_section(self, when: str, key: str, content: str): + def add_report_section(self, when: str, key: str, content: str) -> None: if content: self._report_sections.append((when, key, content)) @@ -157,18 +189,18 @@ def add_report_section(self, when: str, key: str, content: str): class FilePathNode(MetaNode): """The class for a node which is a path.""" - name = attr.ib() + name = attr.ib(type=str) """str: Name of the node which makes it identifiable in the DAG.""" - value = attr.ib() + value = attr.ib(type=Path) """Any: Value passed to the decorator which can be requested inside the function.""" - path = attr.ib() + path = attr.ib(type=Path) """pathlib.Path: Path to the FilePathNode.""" @classmethod @functools.lru_cache() - def from_path(cls, path: pathlib.Path): + def from_path(cls, path: Path) -> "FilePathNode": """Instantiate class from path to file. The `lru_cache` decorator ensures that the same object is not collected twice. @@ -178,7 +210,7 @@ def from_path(cls, path: pathlib.Path): raise ValueError("FilePathNode must be instantiated from absolute path.") return cls(path.as_posix(), path, path) - def state(self): + def state(self) -> Optional[str]: """Return the last modified date for file path.""" if not self.path.exists(): raise NodeNotFoundError @@ -187,8 +219,8 @@ def state(self): def _collect_nodes( - session, path: Path, name: str, nodes: Dict[str, Union[str, Path]] -) -> Dict[str, Path]: + session: Session, path: Path, name: str, nodes: Dict[str, Union[str, Path]] +) -> Dict[str, MetaNode]: """Collect nodes for a task. Parameters @@ -204,7 +236,7 @@ def _collect_nodes( Returns ------- - Dict[str, Path] + Dict[str, MetaNode] A dictionary of node names and their paths. Raises @@ -230,7 +262,9 @@ def _collect_nodes( return collected_nodes -def _extract_nodes_from_function_markers(function, parser): +def _extract_nodes_from_function_markers( + function: Callable[..., Any], parser: Callable[..., Any] +) -> Generator[Any, None, None]: """Extract nodes from a marker. The parser is a functions which is used to document the marker with the correct @@ -244,7 +278,9 @@ def _extract_nodes_from_function_markers(function, parser): yield parsed -def _convert_objects_to_node_dictionary(objects, when): +def _convert_objects_to_node_dictionary( + objects: Any, when: str +) -> Tuple[Dict[Any, Any], bool]: """Convert objects to node dictionary.""" list_of_tuples, keep_dict = _convert_objects_to_list_of_tuples(objects, when) _check_that_names_are_not_used_multiple_times(list_of_tuples, when) @@ -252,7 +288,9 @@ def _convert_objects_to_node_dictionary(objects, when): return nodes, keep_dict -def _convert_objects_to_list_of_tuples(objects, when: str): +def _convert_objects_to_list_of_tuples( + objects: Union[Any, Tuple[Any, Any], List[Any], List[Tuple[Any, Any]]], when: str +) -> Tuple[List[Tuple[Any, ...]], bool]: """Convert objects to list of tuples. Examples @@ -293,7 +331,9 @@ def _convert_objects_to_list_of_tuples(objects, when: str): return out, keep_dict -def _check_that_names_are_not_used_multiple_times(list_of_tuples, when): +def _check_that_names_are_not_used_multiple_times( + list_of_tuples: List[Tuple[Any, ...]], when: str +) -> None: """Check that names of nodes are not assigned multiple times. Tuples in the list have either one or two elements. The first element in the two @@ -321,8 +361,8 @@ def _check_that_names_are_not_used_multiple_times(list_of_tuples, when): def _convert_nodes_to_dictionary( - list_of_tuples: List[Tuple[str]], -) -> Dict[str, Union[str, Path]]: + list_of_tuples: List[Tuple[Any, ...]] +) -> Dict[Any, Any]: """Convert nodes to dictionaries. Examples @@ -353,7 +393,7 @@ def _convert_nodes_to_dictionary( return nodes -def create_task_name(path: Path, base_name: str): +def create_task_name(path: Path, base_name: str) -> str: """Create the name of a task from a path and the task's base name. Examples @@ -366,7 +406,7 @@ def create_task_name(path: Path, base_name: str): return path.as_posix() + "::" + base_name -def find_duplicates(x: Iterable[Any]): +def find_duplicates(x: Iterable[Any]) -> Set[Any]: """Find duplicated entries in iterable. Examples diff --git a/src/_pytask/parameters.py b/src/_pytask/parameters.py index 15e804fd..48911fac 100644 --- a/src/_pytask/parameters.py +++ b/src/_pytask/parameters.py @@ -7,7 +7,7 @@ _CONFIG_OPTION = click.Option( ["-c", "--config"], type=click.Path(exists=True), help="Path to configuration file." ) -"""click.Option: An general option for the --config flag.""" +"""click.Option: An option for the --config flag.""" _IGNORE_OPTION = click.Option( ["--ignore"], @@ -19,13 +19,13 @@ ), callback=falsy_to_none_callback, ) -"""click.Option: An general option for the --ignore flag.""" +"""click.Option: An option for the --ignore flag.""" _PATH_ARGUMENT = click.Argument( ["paths"], nargs=-1, type=click.Path(exists=True), callback=falsy_to_none_callback ) -"""click.Option: An general paths argument.""" +"""click.Argument: An argument for paths.""" _VERBOSE_OPTION = click.Option( @@ -34,10 +34,11 @@ default=None, help="Make pytask verbose (>= 0) or quiet (< 0) [default: 0]", ) +"""click.Option: An general .""" @hookimpl(trylast=True) -def pytask_extend_command_line_interface(cli): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Register general markers.""" for command in ["build", "clean", "collect", "markers", "profile"]: cli.commands[command].params.append(_CONFIG_OPTION) diff --git a/src/_pytask/parametrize.py b/src/_pytask/parametrize.py index e59b267b..7d3938c7 100644 --- a/src/_pytask/parametrize.py +++ b/src/_pytask/parametrize.py @@ -5,9 +5,11 @@ import types from typing import Any from typing import Callable +from typing import Dict from typing import Iterable from typing import List from typing import Optional +from typing import Sequence from typing import Tuple from typing import Union @@ -17,26 +19,31 @@ from _pytask.mark import Mark from _pytask.mark import MARK_GEN as mark # noqa: N811 from _pytask.nodes import find_duplicates +from _pytask.session import Session def parametrize( - arg_names: Union[str, Tuple[str], List[str]], - arg_values: Iterable, + arg_names: Union[str, List[str], Tuple[str, ...]], + arg_values: Iterable[Union[Sequence[Any], Any]], *, ids: Optional[ - Union[Iterable[Union[bool, float, int, str, None]], Callable[[Any], Any]] + Union[Iterable[Union[None, str, float, int, bool]], Callable[..., Any]] ] = None, -): +) -> Tuple[ + Union[str, List[str], Tuple[str, ...]], + Iterable[Union[Sequence[Any], Any]], + Optional[Union[Iterable[Union[None, str, float, int, bool]], Callable[..., Any]]], +]: """Parametrize a task function. Parametrizing a task allows to execute the same task with different arguments. Parameters ---------- - arg_names : Union[str, Tuple[str], List[str]] + arg_names : Union[str, List[str], Tuple[str, ...]] The names of the arguments which can either be given as a comma-separated string, a tuple of strings, or a list of strings. - arg_values : Iterable + arg_values : Iterable[Union[Sequence[Any], Any]] The values which correspond to names in ``arg_names``. For one argument, it is a single iterable. For multiple argument names it is an iterable of iterables. ids @@ -56,7 +63,7 @@ def parametrize( @hookimpl -def pytask_parse_config(config): +def pytask_parse_config(config: Dict[str, Any]) -> None: config["markers"]["parametrize"] = ( "Call a task function multiple times passing in different arguments each turn. " "arg_values generally needs to be a list of values if arg_names specifies only " @@ -69,7 +76,9 @@ def pytask_parse_config(config): @hookimpl -def pytask_parametrize_task(session, name, obj): +def pytask_parametrize_task( + session: Session, name: str, obj: Callable[..., Any] +) -> List[Tuple[str, Callable[..., Any]]]: """Parametrize a task. This function takes a single Python function and all parametrize decorators and @@ -89,7 +98,7 @@ def pytask_parametrize_task(session, name, obj): if len(markers) > 1: raise NotImplementedError( "Multiple parametrizations are currently not implemented since it is " - "not possible to define products for tasks from a cartesian product." + "not possible to define products for tasks from a Cartesian product." ) base_arg_names, arg_names, arg_values = _parse_parametrize_markers( @@ -99,7 +108,7 @@ def pytask_parametrize_task(session, name, obj): product_arg_names = list(itertools.product(*arg_names)) product_arg_values = list(itertools.product(*arg_values)) - names_and_functions = [] + names_and_functions: List[Tuple[str, Callable[..., Any]]] = [] for names, values in zip(product_arg_names, product_arg_values): kwargs = dict( zip( @@ -109,8 +118,8 @@ def pytask_parametrize_task(session, name, obj): ) # Copy function and attributes to allow in-place changes. - func = _copy_func(obj) - func.pytaskmark = copy.deepcopy(obj.pytaskmark) + func = _copy_func(obj) # type: ignore + func.pytaskmark = copy.deepcopy(obj.pytaskmark) # type: ignore # Convert parametrized dependencies and products to decorator. session.hook.pytask_parametrize_kwarg_to_marker(obj=func, kwargs=kwargs) @@ -122,8 +131,8 @@ def pytask_parametrize_task(session, name, obj): name_ = f"{name}[{'-'.join(itertools.chain.from_iterable(names))}]" names_and_functions.append((name_, wrapped_func)) - names = [i[0] for i in names_and_functions] - duplicates = find_duplicates(names) + all_names = [i[0] for i in names_and_functions] + duplicates = find_duplicates(all_names) if duplicates: text = format_strings_as_flat_tree( duplicates, "Duplicated task ids", TASK_ICON @@ -139,7 +148,7 @@ def pytask_parametrize_task(session, name, obj): return names_and_functions -def _remove_parametrize_markers_from_func(obj): +def _remove_parametrize_markers_from_func(obj: Any) -> Tuple[Any, List[Mark]]: """Remove parametrize markers from the object.""" parametrize_markers = [i for i in obj.pytaskmark if i.name == "parametrize"] others = [i for i in obj.pytaskmark if i.name != "parametrize"] @@ -148,7 +157,9 @@ def _remove_parametrize_markers_from_func(obj): return obj, parametrize_markers -def _parse_parametrize_marker(marker: Mark, name: str) -> Tuple[Any, Any, Any]: +def _parse_parametrize_marker( + marker: Mark, name: str +) -> Tuple[Tuple[str, ...], List[Tuple[str, ...]], List[Tuple[Any, ...]]]: """Parse parametrize marker. Parameters @@ -160,12 +171,12 @@ def _parse_parametrize_marker(marker: Mark, name: str) -> Tuple[Any, Any, Any]: Returns ------- - base_arg_names : Tuple[str] + base_arg_names : Tuple[str, ...] Contains the names of the arguments. - processed_arg_names : List[Tuple[str]] + processed_arg_names : List[Tuple[str, ...]] Each tuple in the list represents the processed names of the arguments suffixed with a number indicating the iteration. - processed_arg_values : List[Tuple[Any]] + processed_arg_values : List[Tuple[Any, ...]] Each tuple in the list represents the values of the arguments for each iteration. @@ -186,7 +197,13 @@ def _parse_parametrize_marker(marker: Mark, name: str) -> Tuple[Any, Any, Any]: return parsed_arg_names, expanded_arg_names, parsed_arg_values -def _parse_parametrize_markers(markers: List[Mark], name: str): +def _parse_parametrize_markers( + markers: List[Mark], name: str +) -> Tuple[ + List[Tuple[str, ...]], + List[List[Tuple[str, ...]]], + List[List[Tuple[Any, ...]]], +]: """Parse parametrize markers.""" parsed_markers = [_parse_parametrize_marker(marker, name) for marker in markers] base_arg_names = [i[0] for i in parsed_markers] @@ -196,7 +213,9 @@ def _parse_parametrize_markers(markers: List[Mark], name: str): return base_arg_names, processed_arg_names, processed_arg_values -def _parse_arg_names(arg_names: Union[str, Tuple[str], List[str]]) -> Tuple[str]: +def _parse_arg_names( + arg_names: Union[str, List[str], Tuple[str, ...]] +) -> Tuple[str, ...]: """Parse arg_names argument of parametrize decorator. There are three allowed formats: @@ -209,12 +228,12 @@ def _parse_arg_names(arg_names: Union[str, Tuple[str], List[str]]) -> Tuple[str] Parameters ---------- - arg_names : Union[str, Tuple[str], List[str]] + arg_names : Union[str, List[str], Tuple[str, ...]] The names of the arguments which are parametrized. Returns ------- - out : Tuple[str] + out : Tuple[str, ...] The parsed arg_names. Example @@ -238,9 +257,14 @@ def _parse_arg_names(arg_names: Union[str, Tuple[str], List[str]]) -> Tuple[str] return out -def _parse_arg_values(arg_values: Iterable) -> List[Tuple[Any]]: +def _parse_arg_values( + arg_values: Iterable[Union[Sequence[Any], Any]] +) -> List[Tuple[Any, ...]]: """Parse the values provided for each argument name. + After processing the values, the return is a list where each value is an iteration + of the parametrization. Each iteration is a tuple of all parametrized arguments. + Example ------- >>> _parse_arg_values(["a", "b", "c"]) @@ -256,7 +280,7 @@ def _parse_arg_values(arg_values: Iterable) -> List[Tuple[Any]]: def _check_if_n_arg_names_matches_n_arg_values( - arg_names: Tuple[str], arg_values: List[Tuple[Any]], name: str + arg_names: Tuple[str, ...], arg_values: List[Tuple[Any, ...]], name: str ) -> None: """Check if the number of argument names matches the number of arguments.""" n_names = len(arg_names) @@ -280,19 +304,19 @@ def _check_if_n_arg_names_matches_n_arg_values( def _create_parametrize_ids_components( - arg_names: Tuple[str], - arg_values: List[Tuple[Any]], + arg_names: Tuple[str, ...], + arg_values: List[Tuple[Any, ...]], ids: Optional[ - Union[Iterable[Union[bool, float, int, str, None]], Callable[[Any], Any]] + Union[Iterable[Union[None, str, float, int, bool]], Callable[..., Any]] ], -): +) -> List[Tuple[str, ...]]: """Create the ids for each parametrization. Parameters ---------- - arg_names : Tuple[str] + arg_names : Tuple[str, ...] The names of the arguments of the parametrized function. - arg_values : List[Tuple[Any]] + arg_values : List[Tuple[Any, ...]] A list of tuples where each tuple is for one run. ids The ids associated with one parametrization. @@ -307,21 +331,23 @@ def _create_parametrize_ids_components( """ if isinstance(ids, Iterable): - parsed_ids = [(id_,) for id_ in ids] + raw_ids = [(id_,) for id_ in ids] - if len(parsed_ids) != len(arg_values): + if len(raw_ids) != len(arg_values): raise ValueError("The number of ids must match the number of runs.") if not all( isinstance(id_, (bool, int, float, str)) or id_ is None - for id_ in itertools.chain.from_iterable(parsed_ids) + for id_ in itertools.chain.from_iterable(raw_ids) ): raise ValueError( "Ids for parametrization can only be of type bool, float, int, str or " "None." ) - parsed_ids = [(str(id_),) for id_ in itertools.chain.from_iterable(parsed_ids)] + parsed_ids: List[Tuple[str, ...]] = [ + (str(id_),) for id_ in itertools.chain.from_iterable(raw_ids) + ] else: parsed_ids = [] @@ -336,7 +362,7 @@ def _create_parametrize_ids_components( def _arg_value_to_id_component( - arg_name: str, arg_value: Any, i: int, id_func: Union[Callable[[Any], Any], None] + arg_name: str, arg_value: Any, i: int, id_func: Union[Callable[..., Any], None] ) -> str: """Create id component from the name and value of the argument. @@ -353,7 +379,7 @@ def _arg_value_to_id_component( Value of the argument. i : int The ith iteration of the parametrization. - id_func : Union[Callable[[Any], Any], None] + id_func : Union[Callable[..., Any], None] A callable which maps argument values to :obj:`bool`, :obj:`float`, :obj:`int`, or :obj:`str` or anything else. Any object with a different dtype than the first will be mapped to an auto-generated id component. @@ -375,7 +401,7 @@ def _arg_value_to_id_component( @hookimpl -def pytask_parametrize_kwarg_to_marker(obj, kwargs: dict) -> None: +def pytask_parametrize_kwarg_to_marker(obj: Any, kwargs: Dict[str, str]) -> None: """Add some parametrized keyword arguments as decorator.""" if callable(obj): for marker_name in ["depends_on", "produces"]: @@ -383,7 +409,7 @@ def pytask_parametrize_kwarg_to_marker(obj, kwargs: dict) -> None: mark.__getattr__(marker_name)(kwargs.pop(marker_name))(obj) -def _copy_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]: +def _copy_func(func: types.FunctionType) -> types.FunctionType: """Create a copy of a function. Based on https://stackoverflow.com/a/13503277/7523785. diff --git a/src/_pytask/path.py b/src/_pytask/path.py index 92dbfe84..477c7993 100644 --- a/src/_pytask/path.py +++ b/src/_pytask/path.py @@ -2,13 +2,13 @@ import functools import os from pathlib import Path -from typing import List +from typing import Sequence from typing import Union def relative_to( path: Union[str, Path], source: Union[str, Path], include_source: bool = True -) -> Union[str, Path]: +) -> Path: """Make a path relative to another path. In contrast to :meth:`pathlib.Path.relative_to`, this function allows to keep the @@ -34,7 +34,7 @@ def relative_to( def find_closest_ancestor( - path: Union[str, Path], potential_ancestors: List[Union[str, Path]] + path: Union[str, Path], potential_ancestors: Sequence[Union[str, Path]] ) -> Path: """Find the closest ancestor of a path. @@ -48,7 +48,6 @@ def find_closest_ancestor( Examples -------- - >>> from pathlib import Path >>> find_closest_ancestor(Path("folder", "file.py"), [Path("folder")]).as_posix() 'folder' @@ -57,8 +56,15 @@ def find_closest_ancestor( 'folder/subfolder' """ + if isinstance(path, str): + path = Path(path) + closest_ancestor = None for ancestor in potential_ancestors: + + if isinstance(ancestor, str): + ancestor = Path(ancestor) + if ancestor == path: closest_ancestor = path break @@ -84,9 +90,8 @@ def find_common_ancestor_of_nodes(*names: str) -> Path: def find_common_ancestor(*paths: Union[str, Path]) -> Path: """Find a common ancestor of many paths.""" - path = os.path.commonpath(paths) - path = Path(path) - return path + common_ancestor = Path(os.path.commonpath(paths)) + return common_ancestor @functools.lru_cache() diff --git a/src/_pytask/persist.py b/src/_pytask/persist.py index c7fdc462..d5e88cf3 100644 --- a/src/_pytask/persist.py +++ b/src/_pytask/persist.py @@ -1,4 +1,8 @@ """Implement the ability for tasks to persist.""" +from typing import Any +from typing import Dict +from typing import TYPE_CHECKING + from _pytask.config import hookimpl from _pytask.dag import node_and_neighbors from _pytask.enums import ColorCode @@ -7,8 +11,14 @@ from _pytask.outcomes import Persisted +if TYPE_CHECKING: + from _pytask.session import Session + from _pytask.nodes import MetaTask + from _pytask.report import ExecutionReport + + @hookimpl -def pytask_parse_config(config): +def pytask_parse_config(config: Dict[str, Any]) -> None: """Add the marker to the configuration.""" config["markers"]["persist"] = ( "Prevent execution of a task if all products exist and even if something has " @@ -20,7 +30,7 @@ def pytask_parse_config(config): @hookimpl -def pytask_execute_task_setup(session, task): +def pytask_execute_task_setup(session: "Session", task: "MetaTask") -> None: """Exit persisting tasks early. The decorator needs to be set and all nodes need to exist. @@ -44,7 +54,7 @@ def pytask_execute_task_setup(session, task): @hookimpl -def pytask_execute_task_process_report(report): +def pytask_execute_task_process_report(report: "ExecutionReport") -> None: """Set task status to success. Do not return ``True`` so that states will be updated in database. diff --git a/src/_pytask/pluginmanager.py b/src/_pytask/pluginmanager.py index efde434c..570d0da4 100644 --- a/src/_pytask/pluginmanager.py +++ b/src/_pytask/pluginmanager.py @@ -3,7 +3,7 @@ from _pytask import hookspecs -def get_plugin_manager(): +def get_plugin_manager() -> pluggy.PluginManager: """Get the plugin manager.""" pm = pluggy.PluginManager("pytask") pm.add_hookspecs(hookspecs) diff --git a/src/_pytask/profile.py b/src/_pytask/profile.py index 4aebbb05..0ba4a4f5 100644 --- a/src/_pytask/profile.py +++ b/src/_pytask/profile.py @@ -4,6 +4,16 @@ import sys import time from pathlib import Path +from types import TracebackType +from typing import Any +from typing import Dict +from typing import Generator +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING import click from _pytask.config import hookimpl @@ -14,7 +24,9 @@ from _pytask.exceptions import CollectionError from _pytask.exceptions import ConfigurationError from _pytask.nodes import FilePathNode +from _pytask.nodes import MetaTask from _pytask.pluginmanager import get_plugin_manager +from _pytask.report import ExecutionReport from _pytask.session import Session from _pytask.shared import get_first_non_none_value from _pytask.shared import reduce_node_name @@ -23,7 +35,11 @@ from rich.table import Table -class Runtime(db.Entity): +if TYPE_CHECKING: + from typing import NoReturn + + +class Runtime(db.Entity): # type: ignore """Record of runtimes of tasks.""" task = orm.PrimaryKey(str) @@ -32,13 +48,15 @@ class Runtime(db.Entity): @hookimpl(tryfirst=True) -def pytask_extend_command_line_interface(cli: click.Group): +def pytask_extend_command_line_interface(cli: click.Group) -> None: """Extend the command line interface.""" cli.add_command(profile) @hookimpl -def pytask_parse_config(config, config_from_cli): +def pytask_parse_config( + config: Dict[str, Any], config_from_cli: Dict[str, Any] +) -> None: """Parse the configuration.""" config["export"] = get_first_non_none_value( config_from_cli, key="export", default=None @@ -46,7 +64,7 @@ def pytask_parse_config(config, config_from_cli): @hookimpl -def pytask_post_parse(config): +def pytask_post_parse(config: Dict[str, Any]) -> None: """Register the export option.""" config["pm"].register(ExportNameSpace) config["pm"].register(DurationNameSpace) @@ -54,7 +72,7 @@ def pytask_post_parse(config): @hookimpl(hookwrapper=True) -def pytask_execute_task(task): +def pytask_execute_task(task: MetaTask) -> Generator[None, None, None]: """Attach the duration of the execution to the task.""" start = time.time() yield @@ -63,7 +81,7 @@ def pytask_execute_task(task): @hookimpl -def pytask_execute_task_process_report(report): +def pytask_execute_task_process_report(report: ExecutionReport) -> None: """Store runtime of successfully finishing tasks in database.""" task = report.task duration = task.attributes.get("duration") @@ -72,10 +90,10 @@ def pytask_execute_task_process_report(report): @orm.db_session -def _create_or_update_runtime(task_name, start, end): +def _create_or_update_runtime(task_name: str, start: float, end: float) -> None: """Create or update a runtime entry.""" try: - runtime = Runtime[task_name] + runtime = Runtime[task_name] # type: ignore except orm.ObjectNotFound: Runtime(task=task_name, date=start, duration=end - start) else: @@ -90,7 +108,7 @@ def _create_or_update_runtime(task_name, start, end): default=None, help="Export the profile in the specified format.", ) -def profile(**config_from_cli): +def profile(**config_from_cli: Any) -> "NoReturn": """Show profile information on collected tasks.""" config_from_cli["command"] = "profile" @@ -108,7 +126,10 @@ def profile(**config_from_cli): except (ConfigurationError, Exception): session = Session({}, None) session.exit_code = ExitCode.CONFIGURATION_FAILED - console.print(render_exc_info(*sys.exc_info(), config["show_locals"])) + exc_info: Tuple[ + Type[BaseException], BaseException, Optional[TracebackType] + ] = sys.exc_info() + console.print(render_exc_info(*exc_info, show_locals=config["show_locals"])) else: try: @@ -116,7 +137,9 @@ def profile(**config_from_cli): session.hook.pytask_collect(session=session) session.hook.pytask_resolve_dependencies(session=session) - profile = {task.name: {} for task in session.tasks} + profile: Dict[str, Dict[str, Any]] = { + task.name: {} for task in session.tasks + } session.hook.pytask_profile_add_info_on_task( session=session, tasks=session.tasks, profile=profile ) @@ -139,7 +162,9 @@ def profile(**config_from_cli): sys.exit(session.exit_code) -def _print_profile_table(profile, tasks, paths): +def _print_profile_table( + profile: Dict[str, Dict[str, Any]], tasks: List[MetaTask], paths: List[Path] +) -> None: """Print the profile table.""" name_to_task = {task.name: task for task in tasks} info_names = _get_info_names(profile) @@ -163,14 +188,16 @@ def _print_profile_table(profile, tasks, paths): class DurationNameSpace: @staticmethod @hookimpl - def pytask_profile_add_info_on_task(tasks, profile): + def pytask_profile_add_info_on_task( + tasks: List[MetaTask], profile: Dict[str, Dict[str, Any]] + ) -> None: runtimes = _collect_runtimes([task.name for task in tasks]) for name, duration in runtimes.items(): profile[name]["Last Duration (in s)"] = round(duration, 2) @orm.db_session -def _collect_runtimes(task_names): +def _collect_runtimes(task_names: List[str]) -> Dict[str, float]: """Collect runtimes.""" runtimes = [Runtime.get(task=task_name) for task_name in task_names] runtimes = [r for r in runtimes if r is not None] @@ -180,7 +207,9 @@ def _collect_runtimes(task_names): class FileSizeNameSpace: @staticmethod @hookimpl - def pytask_profile_add_info_on_task(session, tasks, profile): + def pytask_profile_add_info_on_task( + session: Session, tasks: List[MetaTask], profile: Dict[str, Dict[str, Any]] + ) -> None: for task in tasks: successors = list(session.dag.successors(task.name)) if successors: @@ -198,7 +227,7 @@ def pytask_profile_add_info_on_task(session, tasks, profile): ) -def _to_human_readable_size(bytes_, units=None): +def _to_human_readable_size(bytes_: int, units: Optional[List[str]] = None) -> str: """Convert bytes to a human readable size.""" units = [" bytes", "KB", "MB", "GB", "TB"] if units is None else units return ( @@ -208,7 +237,7 @@ def _to_human_readable_size(bytes_, units=None): ) -def _process_profile(profile): +def _process_profile(profile: Dict[str, Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: """Process profile to make it ready for printing and storing.""" info_names = _get_info_names(profile) if info_names: @@ -227,7 +256,9 @@ def _process_profile(profile): class ExportNameSpace: @staticmethod @hookimpl(trylast=True) - def pytask_profile_export_profile(session, profile): + def pytask_profile_export_profile( + session: Session, profile: Dict[str, Dict[str, Any]] + ) -> None: extension = session.config["export"] if extension == "csv": @@ -240,7 +271,7 @@ def pytask_profile_export_profile(session, profile): raise ValueError(f"The export option '{extension}' cannot be handled.") -def _export_to_csv(profile): +def _export_to_csv(profile: Dict[str, Dict[str, Any]]) -> None: """Export profile to csv.""" info_names = _get_info_names(profile) path = Path.cwd().joinpath("profile.csv") @@ -252,14 +283,26 @@ def _export_to_csv(profile): writer.writerow((task_name, *info.values())) -def _export_to_json(profile): +def _export_to_json(profile: Dict[str, Dict[str, Any]]) -> None: """Export profile to json.""" json_ = json.dumps(profile) path = Path.cwd().joinpath("profile.json") path.write_text(json_) -def _get_info_names(profile): - """Get names of infos of tasks.""" - info_names = sorted(set().union(*(set(val) for val in profile.values()))) +def _get_info_names(profile: Dict[str, Dict[str, Any]]) -> List[str]: + """Get names of infos of tasks. + + Examples + -------- + >>> _get_info_names({"t1": {"time": 1}, "t2": {"time": 1, "size": "2GB"}}) + ['size', 'time'] + >>> _get_info_names({}) + [] + >>> _get_info_names({"t1": {}}) + [] + + """ + base: Set[str] = set() + info_names: List[str] = sorted(base.union(*(set(val) for val in profile.values()))) return info_names diff --git a/src/_pytask/report.py b/src/_pytask/report.py index 1193f694..aba5ac46 100644 --- a/src/_pytask/report.py +++ b/src/_pytask/report.py @@ -1,26 +1,45 @@ """This module contains everything related to reports.""" +from types import TracebackType +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + import attr from _pytask.traceback import remove_internal_traceback_frames_from_exc_info +if TYPE_CHECKING: + from _pytask.nodes import MetaNode, MetaTask + + +ExceptionInfo = Tuple[Type[BaseException], BaseException, Union[TracebackType, None]] + + @attr.s class CollectionReport: """A collection report for a task.""" - node = attr.ib(default=None) - exc_info = attr.ib(default=None) + node = attr.ib(default=None, type="MetaNode") + exc_info = attr.ib(default=None, type=ExceptionInfo) @classmethod - def from_node(cls, node): + def from_node(cls, node: "MetaNode") -> "CollectionReport": return cls(node=node) @classmethod - def from_exception(cls, exc_info, node=None): + def from_exception( + cls: "Type[CollectionReport]", + exc_info: ExceptionInfo, + node: "Optional[MetaNode]" = None, + ) -> "CollectionReport": exc_info = remove_internal_traceback_frames_from_exc_info(exc_info) return cls(exc_info=exc_info, node=node) @property - def successful(self): + def successful(self) -> bool: return self.exc_info is None @@ -28,10 +47,10 @@ def successful(self): class ResolvingDependenciesReport: """A report for an error while resolving dependencies.""" - exc_info = attr.ib() + exc_info = attr.ib(type=ExceptionInfo) @classmethod - def from_exception(cls, exc_info): + def from_exception(cls, exc_info: ExceptionInfo) -> "ResolvingDependenciesReport": exc_info = remove_internal_traceback_frames_from_exc_info(exc_info) return cls(exc_info) @@ -40,18 +59,20 @@ def from_exception(cls, exc_info): class ExecutionReport: """A report for an executed task.""" - task = attr.ib() + task = attr.ib(type="MetaTask") success = attr.ib(type=bool) - exc_info = attr.ib(default=None) - sections = attr.ib(factory=list) - symbol = attr.ib(default="?") - color = attr.ib(default=None) + exc_info = attr.ib(default=None, type=Optional[ExceptionInfo]) + sections = attr.ib(factory=list, type=List[Tuple[str, str, str]]) + symbol = attr.ib(default="?", type=str) + color = attr.ib(default=None, type=Optional[str]) @classmethod - def from_task_and_exception(cls, task, exc_info): + def from_task_and_exception( + cls, task: "MetaTask", exc_info: ExceptionInfo + ) -> "ExecutionReport": exc_info = remove_internal_traceback_frames_from_exc_info(exc_info) return cls(task, False, exc_info, task._report_sections) @classmethod - def from_task(cls, task): + def from_task(cls, task: "MetaTask") -> "ExecutionReport": return cls(task, True, None, task._report_sections) diff --git a/src/_pytask/resolve_dependencies.py b/src/_pytask/resolve_dependencies.py index d97f8245..30ec3d31 100644 --- a/src/_pytask/resolve_dependencies.py +++ b/src/_pytask/resolve_dependencies.py @@ -2,7 +2,9 @@ import sys from typing import Dict from typing import List +from typing import Optional from typing import Tuple +from typing import Union import networkx as nx from _pytask.config import hookimpl @@ -20,8 +22,11 @@ from _pytask.exceptions import ResolvingDependenciesError from _pytask.mark import Mark from _pytask.mark_utils import get_specific_markers_from_task +from _pytask.nodes import MetaNode +from _pytask.nodes import MetaTask from _pytask.path import find_common_ancestor_of_nodes from _pytask.report import ResolvingDependenciesReport +from _pytask.session import Session from _pytask.shared import reduce_names_of_multiple_nodes from _pytask.shared import reduce_node_name from _pytask.traceback import render_exc_info @@ -30,7 +35,7 @@ @hookimpl -def pytask_resolve_dependencies(session): +def pytask_resolve_dependencies(session: Session) -> Optional[bool]: """Create a directed acyclic graph (DAG) capturing dependencies between functions. Parameters @@ -65,7 +70,7 @@ def pytask_resolve_dependencies(session): @hookimpl -def pytask_resolve_dependencies_create_dag(tasks): +def pytask_resolve_dependencies_create_dag(tasks: List[MetaTask]) -> nx.DiGraph: """Create the DAG from tasks, dependencies and products.""" dag = nx.DiGraph() @@ -86,7 +91,7 @@ def pytask_resolve_dependencies_create_dag(tasks): @hookimpl -def pytask_resolve_dependencies_select_execution_dag(dag): +def pytask_resolve_dependencies_select_execution_dag(dag: nx.DiGraph) -> None: """Select the tasks which need to be executed.""" scheduler = TopologicalSorter.from_dag(dag) visited_nodes = [] @@ -103,13 +108,13 @@ def pytask_resolve_dependencies_select_execution_dag(dag): @hookimpl -def pytask_resolve_dependencies_validate_dag(dag): +def pytask_resolve_dependencies_validate_dag(dag: nx.DiGraph) -> None: """Validate the DAG.""" _check_if_root_nodes_are_available(dag) _check_if_tasks_have_the_same_products(dag) -def _have_task_or_neighbors_changed(task_name, dag): +def _have_task_or_neighbors_changed(task_name: str, dag: nx.DiGraph) -> bool: """Indicate whether dependencies or products of a task have changed.""" return any( _has_node_changed(task_name, dag.nodes[node]) @@ -118,7 +123,9 @@ def _have_task_or_neighbors_changed(task_name, dag): @orm.db_session -def _has_node_changed(task_name: str, node_dict): +def _has_node_changed( + task_name: str, node_dict: Dict[str, Union[MetaNode, MetaTask]] +) -> bool: """Indicate whether a single dependency or product has changed.""" node = node_dict.get("task") or node_dict["node"] try: @@ -127,7 +134,7 @@ def _has_node_changed(task_name: str, node_dict): out = True else: try: - state_in_db = State[task_name, node.name].state + state_in_db = State[task_name, node.name].state # type: ignore except orm.ObjectNotFound: out = True else: @@ -136,7 +143,7 @@ def _has_node_changed(task_name: str, node_dict): return out -def _check_if_dag_has_cycles(dag): +def _check_if_dag_has_cycles(dag: nx.DiGraph) -> None: """Check if DAG has cycles.""" try: cycles = nx.algorithms.cycles.find_cycle(dag) @@ -151,7 +158,7 @@ def _check_if_dag_has_cycles(dag): ) -def _format_cycles(cycles: List[Tuple[str]]) -> str: +def _format_cycles(cycles: List[Tuple[str, ...]]) -> str: """Format cycles as a paths connected by arrows.""" chain = [x for i, x in enumerate(itertools.chain(*cycles)) if i % 2 == 0] chain += [cycles[-1][1]] @@ -165,7 +172,7 @@ def _format_cycles(cycles: List[Tuple[str]]) -> str: return text -_TEMPLATE_ERROR = ( +_TEMPLATE_ERROR: str = ( "Some dependencies do not exist or are not produced by any task. See the following " "tree which shows which dependencies are missing for which tasks.\n\n{}" ) @@ -173,9 +180,9 @@ def _format_cycles(cycles: List[Tuple[str]]) -> str: _TEMPLATE_ERROR += "\n\n(Hint: Sometimes case sensitivity is at fault.)" -def _check_if_root_nodes_are_available(dag): +def _check_if_root_nodes_are_available(dag: nx.DiGraph) -> None: missing_root_nodes = [] - is_task_skipped = {} + is_task_skipped: Dict[str, bool] = {} for node in dag.nodes: is_node = "node" in dag.nodes[node] @@ -216,7 +223,9 @@ def _check_if_root_nodes_are_available(dag): raise ResolvingDependenciesError(_TEMPLATE_ERROR.format(text)) from None -def _check_if_tasks_are_skipped(node, dag, is_task_skipped): +def _check_if_tasks_are_skipped( + node: MetaNode, dag: nx.DiGraph, is_task_skipped: Dict[str, bool] +) -> Tuple[bool, Dict[str, bool]]: """Check for a given node whether it is only used by skipped tasks.""" are_all_tasks_skipped = [] for successor in dag.successors(node): @@ -227,7 +236,7 @@ def _check_if_tasks_are_skipped(node, dag, is_task_skipped): return all(are_all_tasks_skipped), is_task_skipped -def _check_if_task_is_skipped(task_name, dag): +def _check_if_task_is_skipped(task_name: str, dag: nx.DiGraph) -> bool: task = dag.nodes[task_name]["task"] is_skipped = get_specific_markers_from_task(task, "skip") @@ -241,7 +250,7 @@ def _check_if_task_is_skipped(task_name, dag): return is_any_true -def _skipif(condition: bool, *, reason: str) -> tuple: +def _skipif(condition: bool, *, reason: str) -> Tuple[bool, str]: """Shameless copy to circumvent circular imports.""" return condition, reason @@ -262,7 +271,7 @@ def _format_dictionary_to_tree(dict_: Dict[str, List[str]], title: str) -> str: return text -def _check_if_tasks_have_the_same_products(dag): +def _check_if_tasks_have_the_same_products(dag: nx.DiGraph) -> None: nodes_created_by_multiple_tasks = [] for node in dag.nodes: @@ -297,7 +306,9 @@ def _check_if_tasks_have_the_same_products(dag): @hookimpl -def pytask_resolve_dependencies_log(session, report): +def pytask_resolve_dependencies_log( + session: Session, report: ResolvingDependenciesReport +) -> None: """Log errors which happened while resolving dependencies.""" console.print() console.rule( diff --git a/src/_pytask/session.py b/src/_pytask/session.py index d60cc702..a2fe8109 100644 --- a/src/_pytask/session.py +++ b/src/_pytask/session.py @@ -1,44 +1,66 @@ +from typing import Any +from typing import Dict +from typing import List # noqa: F401 +from typing import Optional +from typing import TYPE_CHECKING + import attr +import networkx as nx from _pytask.enums import ExitCode +# Location was moved from pluggy v0.13.1 to v1.0.0. +try: + from pluggy.hooks import _HookRelay +except ImportError: + from pluggy._hooks import _HookRelay + + +if TYPE_CHECKING: + from _pytask.report import CollectionReport # noqa: F401 + from _pytask.report import ExecutionReport # noqa: F401 + from _ptytask.report import ResolvingDependenciesReport # noqa: F401 + from _pytask.nodes import MetaTask # noqa: F401 + + @attr.s class Session: """The session of pytask.""" - config = attr.ib(factory=dict) - """dict: A dictionary containing the configuration of the session.""" - hook = attr.ib(default=None) - """pluggy.hooks._HookRelay: Holds all hooks collected by pytask.""" - collection_reports = attr.ib(factory=list) - """Optional[List[pytask.report.ExecutionReport]]: Reports for collected items. + config = attr.ib(factory=dict, type=Optional[Dict[str, Any]]) + """Optional[Dict[str, Any]]: Configuration of the session.""" + hook = attr.ib(default=None, type=Optional[_HookRelay]) + """Optional[pluggy.hooks._HookRelay]: Holds all hooks collected by pytask.""" + collection_reports = attr.ib(factory=list, type="List[CollectionReport]") + """Optional[List[CollectionReport]]: Reports for collected items. The reports capture errors which happened while collecting tasks. """ - tasks = attr.ib(factory=list) - """Optional[List[pytask.nodes.MetaTask]]: List of collected tasks.""" - resolving_dependencies_report = attr.ib(factory=list) - """Optional[pytask.report.ResolvingDependenciesReport]: A report. - - Report when resolving dependencies failed. - - """ - execution_reports = attr.ib(factory=list) - """Optional[List[pytask.report.ExecutionReport]]: Reports for executed tasks.""" - exit_code = attr.ib(default=ExitCode.OK) + tasks = attr.ib(factory=list, type="Optional[List[MetaTask]]") + """Optional[List[MetaTask]]: List of collected tasks.""" + dag = attr.ib(default=None, type=Optional[nx.DiGraph]) + resolving_dependencies_report = attr.ib( + default=None, type="ResolvingDependenciesReport" + ) + """Optional[List[ResolvingDependenciesReport]]: Reports for resolving dependencies + failed.""" + execution_reports = attr.ib(factory=list, type="Optional[List[ExecutionReport]]") + """Optional[List[ExecutionReport]]: Reports for executed tasks.""" + exit_code = attr.ib(default=ExitCode.OK, type=ExitCode) - collection_start = attr.ib(default=None) - collection_end = attr.ib(default=None) - execution_start = attr.ib(default=None) - execution_end = attr.ib(default=None) + collection_start = attr.ib(default=None, type=Optional[float]) + collection_end = attr.ib(default=None, type=Optional[float]) + execution_start = attr.ib(default=None, type=Optional[float]) + execution_end = attr.ib(default=None, type=Optional[float]) - n_tasks_failed = attr.ib(default=0) + n_tasks_failed = attr.ib(default=0, type=Optional[int]) """Optional[int]: Number of tests which have failed.""" - should_stop = attr.ib(default=False) + scheduler = attr.ib(default=None, type=Any) + should_stop = attr.ib(default=False, type=Optional[bool]) """Optional[bool]: Indicates whether the session should be stopped.""" @classmethod - def from_config(cls, config): + def from_config(cls, config: Dict[str, Any]) -> "Session": """Construct the class from a config.""" return cls(config, config["pm"].hook) diff --git a/src/_pytask/shared.py b/src/_pytask/shared.py index 4f13674b..ea854595 100644 --- a/src/_pytask/shared.py +++ b/src/_pytask/shared.py @@ -1,9 +1,15 @@ """Functions which are used across various modules.""" import glob -from collections.abc import Sequence from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict from typing import List +from typing import Optional +from typing import Sequence +from typing import Union +import networkx as nx from _pytask.console import escape_squared_brackets from _pytask.nodes import create_task_name from _pytask.nodes import MetaNode @@ -13,7 +19,7 @@ from _pytask.path import relative_to -def to_list(scalar_or_iter): +def to_list(scalar_or_iter: Any) -> List[Any]: """Convert scalars and iterables to list. Parameters @@ -39,17 +45,22 @@ def to_list(scalar_or_iter): ) -def parse_paths(x): +def parse_paths(x: Optional[Any]) -> Optional[List[Path]]: """Parse paths.""" if x is not None: paths = [Path(p) for p in to_list(x)] paths = [ Path(p).resolve() for path in paths for p in glob.glob(path.as_posix()) ] - return paths + out = paths + else: + out = None + return out -def falsy_to_none_callback(ctx, param, value): # noqa: U100 +def falsy_to_none_callback( + ctx: Any, param: Any, value: Any # noqa: U100 +) -> Optional[Any]: """Convert falsy object to ``None``. Some click arguments accept multiple inputs and instead of ``None`` as a default if @@ -71,7 +82,12 @@ def falsy_to_none_callback(ctx, param, value): # noqa: U100 return value if value else None -def get_first_non_none_value(*configs, key, default=None, callback=None): +def get_first_non_none_value( + *configs: Dict[str, Any], + key: str, + default: Optional[Any] = None, + callback: Optional[Callable[..., Any]] = None, +) -> Any: """Get the first non-None value for a key from a list of dictionaries. This function allows to prioritize information from many configurations by changing @@ -94,18 +110,21 @@ def get_first_non_none_value(*configs, key, default=None, callback=None): return next((value for value in processed_values if value is not None), default) -def parse_value_or_multiline_option(value): +def parse_value_or_multiline_option( + value: Union[str, None] +) -> Union[None, str, List[str]]: """Parse option which can hold a single value or values separated by new lines.""" if value in ["none", "None", None, ""]: - value = None + return None elif isinstance(value, str) and "\n" in value: - value = [v.strip() for v in value.split("\n") if v.strip()] - elif isinstance(value, str) and "n" not in value: - value = value.strip() - return value + return [v.strip() for v in value.split("\n") if v.strip()] + elif isinstance(value, str): + return value.strip() + else: + raise ValueError(f"Input '{value}' is neither a 'str' nor 'None'.") -def convert_truthy_or_falsy_to_bool(x): +def convert_truthy_or_falsy_to_bool(x: Union[bool, str, None]) -> bool: """Convert truthy or falsy value in .ini to Python boolean.""" if x in [True, "True", "true", "1"]: out = True @@ -120,7 +139,7 @@ def convert_truthy_or_falsy_to_bool(x): return out -def reduce_node_name(node, paths: List[Path]): +def reduce_node_name(node: "MetaNode", paths: Sequence[Union[str, Path]]) -> str: """Reduce the node name. The whole name of the node - which includes the drive letter - can be very long @@ -149,7 +168,9 @@ def reduce_node_name(node, paths: List[Path]): return name -def reduce_names_of_multiple_nodes(names, dag, paths): +def reduce_names_of_multiple_nodes( + names: List[str], dag: nx.DiGraph, paths: Sequence[Union[str, Path]] +) -> List[str]: """Reduce the names of multiple nodes in the DAG.""" return [ reduce_node_name(dag.nodes[n].get("node") or dag.nodes[n].get("task"), paths) diff --git a/src/_pytask/skipping.py b/src/_pytask/skipping.py index a8b9ee9a..6a053f8d 100644 --- a/src/_pytask/skipping.py +++ b/src/_pytask/skipping.py @@ -1,4 +1,10 @@ """This module contains everything related to skipping tasks.""" +from typing import Any +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING + from _pytask.config import hookimpl from _pytask.dag import descending_tasks from _pytask.enums import ColorCode @@ -10,18 +16,24 @@ from _pytask.traceback import remove_traceback_from_exc_info +if TYPE_CHECKING: + from _pytask.session import Session + from _pytask.nodes import MetaTask + from _pytask.report import ExecutionReport + + def skip_ancestor_failed(reason: str = "No reason provided.") -> str: """Function to parse information in ``@pytask.mark.skip_ancestor_failed``.""" return reason -def skipif(condition: bool, *, reason: str) -> tuple: +def skipif(condition: bool, *, reason: str) -> Tuple[bool, str]: """Function to parse information in ``@pytask.mark.skipif``.""" return condition, reason @hookimpl -def pytask_parse_config(config): +def pytask_parse_config(config: Dict[str, Any]) -> None: markers = { "skip": "Skip a task and all its subsequent tasks as well.", "skip_ancestor_failed": "Internal decorator applied to tasks whose ancestor " @@ -35,7 +47,7 @@ def pytask_parse_config(config): @hookimpl -def pytask_execute_task_setup(task): +def pytask_execute_task_setup(task: "MetaTask") -> None: """Take a short-cut for skipped tasks during setup with an exception.""" markers = get_specific_markers_from_task(task, "skip_unchanged") if markers: @@ -62,7 +74,9 @@ def pytask_execute_task_setup(task): @hookimpl -def pytask_execute_task_process_report(session, report): +def pytask_execute_task_process_report( + session: "Session", report: "ExecutionReport" +) -> Optional[bool]: """Process the execution reports for skipped tasks. This functions allows to turn skipped tasks to successful tasks. @@ -101,3 +115,5 @@ def pytask_execute_task_process_report(session, report): report.exc_info[1], (Skipped, SkippedUnchanged, SkippedAncestorFailed) ): return True + else: + return None diff --git a/src/_pytask/traceback.py b/src/_pytask/traceback.py index ad160cd4..eab34e9f 100644 --- a/src/_pytask/traceback.py +++ b/src/_pytask/traceback.py @@ -1,16 +1,29 @@ """Process tracebacks.""" from pathlib import Path from types import TracebackType +from typing import Generator +from typing import Tuple +from typing import Type +from typing import Union import _pytask import pluggy from rich.traceback import Traceback + _PLUGGY_DIRECTORY = Path(pluggy.__file__).parent _PYTASK_DIRECTORY = Path(_pytask.__file__).parent -def render_exc_info(exc_type, exc_value, traceback, show_locals=False): +ExceptionInfo = Tuple[Type[BaseException], BaseException, Union[TracebackType, None]] + + +def render_exc_info( + exc_type: Type[BaseException], + exc_value: BaseException, + traceback: Union[str, TracebackType], + show_locals: bool = False, +) -> Union[str, Traceback]: if isinstance(traceback, str): renderable = traceback else: @@ -21,17 +34,19 @@ def render_exc_info(exc_type, exc_value, traceback, show_locals=False): return renderable -def format_exception_without_traceback(exc_info): +def format_exception_without_traceback(exc_info: ExceptionInfo) -> str: """Format an exception without displaying the traceback.""" return f"[red bold]{exc_info[0].__name__}:[/] {exc_info[1]}" -def remove_traceback_from_exc_info(exc_info): +def remove_traceback_from_exc_info(exc_info: ExceptionInfo) -> ExceptionInfo: """Remove traceback from exception.""" return (*exc_info[:2], None) -def remove_internal_traceback_frames_from_exc_info(exc_info): +def remove_internal_traceback_frames_from_exc_info( + exc_info: ExceptionInfo, +) -> ExceptionInfo: """Remove internal traceback frames from exception info. If a non-internal traceback frame is found, return the traceback from the first @@ -46,7 +61,7 @@ def remove_internal_traceback_frames_from_exc_info(exc_info): return exc_info -def _is_internal_or_hidden_traceback_frame(frame): +def _is_internal_or_hidden_traceback_frame(frame: TracebackType) -> bool: """Returns ``True`` if traceback frame belongs to internal packages or is hidden. Internal packages are ``_pytask`` and ``pluggy``. A hidden frame is indicated by a @@ -61,7 +76,9 @@ def _is_internal_or_hidden_traceback_frame(frame): return any(root in path.parents for root in [_PLUGGY_DIRECTORY, _PYTASK_DIRECTORY]) -def _filter_internal_traceback_frames(frame): +def _filter_internal_traceback_frames( + frame: TracebackType, +) -> TracebackType: """Filter internal traceback frames from traceback. If the first external frame is visited, return the frame. Else return ``None``. @@ -73,7 +90,9 @@ def _filter_internal_traceback_frames(frame): return frame -def _yield_traceback_frames(frame): +def _yield_traceback_frames( + frame: TracebackType, +) -> Generator[TracebackType, None, None]: """Yield traceback frames.""" yield frame yield from _yield_traceback_frames(frame.tb_next) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/conftest.py b/tests/conftest.py index 3a176627..e4e71a16 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,5 @@ +from pathlib import Path + import pytest from click.testing import CliRunner @@ -5,3 +7,8 @@ @pytest.fixture() def runner(): return CliRunner() + + +@pytest.fixture(autouse=True) +def _add_objects_to_doctest_namespace(doctest_namespace): + doctest_namespace["Path"] = Path diff --git a/tests/test_dag.py b/tests/test_dag.py index 5f45aad7..57d2f44a 100644 --- a/tests/test_dag.py +++ b/tests/test_dag.py @@ -20,10 +20,13 @@ class _DummyTask(MetaTask): base_name = "" def execute(self): - pass + ... def state(self): - pass + ... + + def add_report_section(self): + ... @pytest.fixture() diff --git a/tests/test_nodes.py b/tests/test_nodes.py index 8c0abf37..c90f5ea5 100644 --- a/tests/test_nodes.py +++ b/tests/test_nodes.py @@ -81,10 +81,13 @@ class Task(MetaTask): class Task(MetaTask): def execute(self): - pass + ... def state(self): - pass + ... + + def add_report_section(self): + ... task = Task() assert isinstance(task, MetaTask) @@ -208,10 +211,13 @@ class DummyTask(MetaTask): base_name = attr.ib() def state(): - pass + ... def execute(): - pass + ... + + def add_report_section(): + ... @attr.s diff --git a/tests/test_path.py b/tests/test_path.py index 7fd2f9e6..625e86d4 100644 --- a/tests/test_path.py +++ b/tests/test_path.py @@ -15,6 +15,7 @@ @pytest.mark.parametrize( "path, source, include_source, expected", [ + ("src/hello.py", "src", True, Path("src/hello.py")), (Path("src/hello.py"), Path("src"), True, Path("src/hello.py")), (Path("src/hello.py"), Path("src"), False, Path("hello.py")), ], @@ -28,6 +29,7 @@ def test_relative_to(path, source, include_source, expected): @pytest.mark.parametrize( "path, potential_ancestors, expected", [ + ("src/task.py", ["src", "bld"], Path("src")), (Path("src/task.py"), [Path("src"), Path("bld")], Path("src")), (Path("tasks/task.py"), [Path("src"), Path("bld")], None), (Path("src/ts/task.py"), [Path("src"), Path("src/ts")], Path("src/ts")), @@ -37,7 +39,7 @@ def test_relative_to(path, source, include_source, expected): ) def test_find_closest_ancestor(monkeypatch, path, potential_ancestors, expected): # Ensures that files are detected by an existing suffix not if they also exist. - monkeypatch.setattr("_pytask.nodes.pathlib.Path.is_file", lambda x: bool(x.suffix)) + monkeypatch.setattr("_pytask.nodes.Path.is_file", lambda x: bool(x.suffix)) result = find_closest_ancestor(path, potential_ancestors) assert result == expected diff --git a/tests/test_shared.py b/tests/test_shared.py index b8c1d80f..07ab2664 100644 --- a/tests/test_shared.py +++ b/tests/test_shared.py @@ -1,3 +1,5 @@ +from contextlib import ExitStack as does_not_raise # noqa: N813 + import pytest from _pytask.shared import convert_truthy_or_falsy_to_bool from _pytask.shared import parse_value_or_multiline_option @@ -37,16 +39,18 @@ def test_raise_error_convert_truthy_or_falsy_to_bool(value, expectation): @pytest.mark.unit @pytest.mark.parametrize( - "value, expected", + "value, expectation, expected", [ - (None, None), - ("None", None), - ("none", None), - ("first\nsecond", ["first", "second"]), - ("first", "first"), - ("", None), + (None, does_not_raise(), None), + ("None", does_not_raise(), None), + ("none", does_not_raise(), None), + ("first\nsecond", does_not_raise(), ["first", "second"]), + ("first", does_not_raise(), "first"), + ("", does_not_raise(), None), + (1, pytest.raises(ValueError, match="Input '1'"), None), ], ) -def test_parse_value_or_multiline_option(value, expected): - result = parse_value_or_multiline_option(value) - assert result == expected +def test_parse_value_or_multiline_option(value, expectation, expected): + with expectation: + result = parse_value_or_multiline_option(value) + assert result == expected diff --git a/tox.ini b/tox.ini index 16183b95..2c374f87 100644 --- a/tox.ini +++ b/tox.ini @@ -39,7 +39,7 @@ commands = changedir = docs/source conda_env = docs/rtd_environment.yml commands = - sphinx-build -WT -b html -d {envtmpdir}/doctrees . {envtmpdir}/html + sphinx-build -T -b html -d {envtmpdir}/doctrees . {envtmpdir}/html - sphinx-build -T -b linkcheck -d {envtmpdir}/doctrees . {envtmpdir}/linkcheck