Skip to content

Commit

Permalink
Merge pull request #658 from pytest-dev/ab/fix-typing
Browse files Browse the repository at this point in the history
Fix typing
  • Loading branch information
youtux authored Dec 5, 2024
2 parents 6cdd340 + f4413e5 commit 9bb4967
Show file tree
Hide file tree
Showing 14 changed files with 333 additions and 160 deletions.
8 changes: 7 additions & 1 deletion CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,15 @@ Deprecated

Removed
+++++++
* The following private attributes are not available anymore (`#658 <https://github.com/pytest-dev/pytest-bdd/pull/658>`_):
* ``_pytest.reports.TestReport.scenario``; replaced by ``pytest_bdd.reporting.test_report_context`` WeakKeyDictionary (internal use)
* ``__scenario__`` attribute of test functions generated by the ``@scenario`` (and ``@scenarios``) decorator; replaced by ``pytest_bdd.scenario.scenario_wrapper_template_registry`` WeakKeyDictionary (internal use)
* ``_pytest.nodes.Item.__scenario_report__``; replaced by ``pytest_bdd.reporting.scenario_reports_registry`` WeakKeyDictionary (internal use)
* ``_pytest_bdd_step_context`` attribute of internal test function markers; replaced by ``pytest_bdd.steps.step_function_context_registry`` WeakKeyDictionary (internal use)

Fixed
+++++
* Made type annotations stronger and removed most of the ``typing.Any`` usages and ``# type: ignore`` annotations. `#658 <https://github.com/pytest-dev/pytest-bdd/pull/658>`_

Security
++++++++
Expand Down Expand Up @@ -137,7 +143,7 @@ Fixed

7.0.1
-----
- Fix errors occurring if `pytest_unconfigure` is called before `pytest_configure`. `#362 <https://github.com/pytest-dev/pytest-bdd/issues/362>`_ `#641 <https://github.com/pytest-dev/pytest-bdd/pull/641>`_
- Fix errors occurring if ``pytest_unconfigure`` is called before `pytest_configure`. `#362 <https://github.com/pytest-dev/pytest-bdd/issues/362>`_ `#641 <https://github.com/pytest-dev/pytest-bdd/pull/641>`_

7.0.0
----------
Expand Down
13 changes: 8 additions & 5 deletions src/pytest_bdd/compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from collections.abc import Sequence
from importlib.metadata import version
from typing import Any

from _pytest.fixtures import FixtureDef, FixtureManager, FixtureRequest
from _pytest.nodes import Node
Expand All @@ -14,10 +13,12 @@

if pytest_version.release >= (8, 1):

def getfixturedefs(fixturemanager: FixtureManager, fixturename: str, node: Node) -> Sequence[FixtureDef] | None:
def getfixturedefs(
fixturemanager: FixtureManager, fixturename: str, node: Node
) -> Sequence[FixtureDef[object]] | None:
return fixturemanager.getfixturedefs(fixturename, node)

def inject_fixture(request: FixtureRequest, arg: str, value: Any) -> None:
def inject_fixture(request: FixtureRequest, arg: str, value: object) -> None:
"""Inject fixture into pytest fixture request.
:param request: pytest fixture request
Expand All @@ -38,10 +39,12 @@ def inject_fixture(request: FixtureRequest, arg: str, value: Any) -> None:

else:

def getfixturedefs(fixturemanager: FixtureManager, fixturename: str, node: Node) -> Sequence[FixtureDef] | None:
def getfixturedefs(
fixturemanager: FixtureManager, fixturename: str, node: Node
) -> Sequence[FixtureDef[object]] | None:
return fixturemanager.getfixturedefs(fixturename, node.nodeid) # type: ignore

def inject_fixture(request: FixtureRequest, arg: str, value: Any) -> None:
def inject_fixture(request: FixtureRequest, arg: str, value: object) -> None:
"""Inject fixture into pytest fixture request.
:param request: pytest fixture request
Expand Down
98 changes: 78 additions & 20 deletions src/pytest_bdd/cucumber_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,69 @@
import math
import os
import time
import typing
from typing import TYPE_CHECKING, Literal, TypedDict

if typing.TYPE_CHECKING:
from typing import Any
from typing_extensions import NotRequired

from .reporting import FeatureDict, ScenarioReportDict, StepReportDict, test_report_context_registry

if TYPE_CHECKING:
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.reports import TestReport
from _pytest.terminal import TerminalReporter


class ResultElementDict(TypedDict):
status: Literal["passed", "failed", "skipped"]
duration: int # in nanoseconds
error_message: NotRequired[str]


class TagElementDict(TypedDict):
name: str
line: int


class MatchElementDict(TypedDict):
location: str


class StepElementDict(TypedDict):
keyword: str
name: str
line: int
match: MatchElementDict
result: ResultElementDict


class ScenarioElementDict(TypedDict):
keyword: str
id: str
name: str
line: int
description: str
tags: list[TagElementDict]
type: Literal["scenario"]
steps: list[StepElementDict]


class FeatureElementDict(TypedDict):
keyword: str
uri: str
name: str
id: str
line: int
description: str
language: str
tags: list[TagElementDict]
elements: list[ScenarioElementDict]


class FeaturesDict(TypedDict):
features: dict[str, FeatureElementDict]


def add_options(parser: Parser) -> None:
"""Add pytest-bdd options."""
group = parser.getgroup("bdd", "Cucumber JSON")
Expand Down Expand Up @@ -52,26 +104,32 @@ class LogBDDCucumberJSON:
def __init__(self, logfile: str) -> None:
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.features: dict[str, dict] = {}
self.features: dict[str, FeatureElementDict] = {}

def _get_result(self, step: dict[str, Any], report: TestReport, error_message: bool = False) -> dict[str, Any]:
def _get_result(self, step: StepReportDict, report: TestReport, error_message: bool = False) -> ResultElementDict:
"""Get scenario test run result.
:param step: `Step` step we get result for
:param report: pytest `Report` object
:return: `dict` in form {"status": "<passed|failed|skipped>", ["error_message": "<error_message>"]}
"""
result: dict[str, Any] = {}
if report.passed or not step["failed"]: # ignore setup/teardown
result = {"status": "passed"}
elif report.failed:
result = {"status": "failed", "error_message": str(report.longrepr) if error_message else ""}
elif report.skipped:
result = {"status": "skipped"}
result["duration"] = int(math.floor((10**9) * step["duration"])) # nanosec
return result

def _serialize_tags(self, item: dict[str, Any]) -> list[dict[str, Any]]:
status: Literal["passed", "failed", "skipped"]
res_message = None
if report.outcome == "passed" or not step["failed"]: # ignore setup/teardown
status = "passed"
elif report.outcome == "failed":
status = "failed"
res_message = str(report.longrepr) if error_message else ""
elif report.outcome == "skipped":
status = "skipped"
else:
raise ValueError(f"Unknown test outcome {report.outcome}")
res: ResultElementDict = {"status": status, "duration": int(math.floor((10**9) * step["duration"]))} # nanosec
if res_message is not None:
res["error_message"] = res_message
return res

def _serialize_tags(self, item: FeatureDict | ScenarioReportDict) -> list[TagElementDict]:
"""Serialize item's tags.
:param item: json-serialized `Scenario` or `Feature`.
Expand All @@ -87,16 +145,16 @@ def _serialize_tags(self, item: dict[str, Any]) -> list[dict[str, Any]]:

def pytest_runtest_logreport(self, report: TestReport) -> None:
try:
scenario = report.scenario
except AttributeError:
scenario = test_report_context_registry[report].scenario
except KeyError:
# skip reporting for non-bdd tests
return

if not scenario["steps"] or report.when != "call":
# skip if there isn't a result or scenario has no steps
return

def stepmap(step: dict[str, Any]) -> dict[str, Any]:
def stepmap(step: StepReportDict) -> StepElementDict:
error_message = False
if step["failed"] and not scenario.setdefault("failed", False):
scenario["failed"] = True
Expand Down Expand Up @@ -128,7 +186,7 @@ def stepmap(step: dict[str, Any]) -> dict[str, Any]:
self.features[scenario["feature"]["filename"]]["elements"].append(
{
"keyword": scenario["keyword"],
"id": report.item["name"],
"id": test_report_context_registry[report].name,
"name": scenario["name"],
"line": scenario["line_number"],
"description": scenario["description"],
Expand Down
3 changes: 2 additions & 1 deletion src/pytest_bdd/feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

import glob
import os.path
from collections.abc import Iterable

from .parser import Feature, FeatureParser

Expand Down Expand Up @@ -57,7 +58,7 @@ def get_feature(base_path: str, filename: str, encoding: str = "utf-8") -> Featu
return feature


def get_features(paths: list[str], encoding: str = "utf-8") -> list[Feature]:
def get_features(paths: Iterable[str], encoding: str = "utf-8") -> list[Feature]:
"""Get features for given paths.
:param list paths: `list` of paths (file or dirs)
Expand Down
26 changes: 18 additions & 8 deletions src/pytest_bdd/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,30 @@
from typing import TYPE_CHECKING, cast

from _pytest._io import TerminalWriter
from _pytest.python import Function
from mako.lookup import TemplateLookup # type: ignore

from .compat import getfixturedefs
from .feature import get_features
from .parser import Feature, ScenarioTemplate, Step
from .scenario import inject_fixturedefs_for_step, make_python_docstring, make_python_name, make_string_literal
from .scenario import (
inject_fixturedefs_for_step,
make_python_docstring,
make_python_name,
make_string_literal,
scenario_wrapper_template_registry,
)
from .steps import get_step_fixture_name
from .types import STEP_TYPES

if TYPE_CHECKING:
from collections.abc import Sequence
from typing import Any

from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureDef, FixtureManager
from _pytest.main import Session
from _pytest.python import Function
from _pytest.nodes import Node


template_lookup = TemplateLookup(directories=[os.path.join(os.path.dirname(__file__), "templates")])
Expand Down Expand Up @@ -127,23 +133,25 @@ def print_missing_code(scenarios: list[ScenarioTemplate], steps: list[Step]) ->


def _find_step_fixturedef(
fixturemanager: FixtureManager, item: Function, step: Step
) -> Sequence[FixtureDef[Any]] | None:
fixturemanager: FixtureManager, item: Node, step: Step
) -> Sequence[FixtureDef[object]] | None:
"""Find step fixturedef."""
with inject_fixturedefs_for_step(step=step, fixturemanager=fixturemanager, node=item):
bdd_name = get_step_fixture_name(step=step)
return getfixturedefs(fixturemanager, bdd_name, item)


def parse_feature_files(paths: list[str], **kwargs: Any) -> tuple[list[Feature], list[ScenarioTemplate], list[Step]]:
def parse_feature_files(
paths: list[str], encoding: str = "utf-8"
) -> tuple[list[Feature], list[ScenarioTemplate], list[Step]]:
"""Parse feature files of given paths.
:param paths: `list` of paths (file or dirs)
:return: `list` of `tuple` in form:
(`list` of `Feature` objects, `list` of `Scenario` objects, `list` of `Step` objects).
"""
features = get_features(paths, **kwargs)
features = get_features(paths, encoding=encoding)
scenarios = sorted(
itertools.chain.from_iterable(feature.scenarios.values() for feature in features),
key=lambda scenario: (scenario.feature.name or scenario.feature.filename, scenario.name),
Expand Down Expand Up @@ -182,7 +190,9 @@ def _show_missing_code_main(config: Config, session: Session) -> None:
features, scenarios, steps = parse_feature_files(config.option.features)

for item in session.items:
if scenario := getattr(item.obj, "__scenario__", None): # type: ignore
if not isinstance(item, Function):
continue
if (scenario := scenario_wrapper_template_registry.get(item.obj)) is not None:
if scenario in scenarios:
scenarios.remove(scenario)
for step in scenario.steps:
Expand Down
38 changes: 21 additions & 17 deletions src/pytest_bdd/gherkin_terminal_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@

from _pytest.terminal import TerminalReporter

if typing.TYPE_CHECKING:
from typing import Any
from .reporting import test_report_context_registry

if typing.TYPE_CHECKING:
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.reports import TestReport
Expand Down Expand Up @@ -43,12 +43,12 @@ def configure(config: Config) -> None:
raise Exception("gherkin-terminal-reporter is not compatible with 'xdist' plugin.")


class GherkinTerminalReporter(TerminalReporter): # type: ignore
class GherkinTerminalReporter(TerminalReporter): # type: ignore[misc]
def __init__(self, config: Config) -> None:
super().__init__(config)
self.current_rule = None
self.current_rule: str | None = None

def pytest_runtest_logreport(self, report: TestReport) -> Any:
def pytest_runtest_logreport(self, report: TestReport) -> None:
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
cat, letter, word = res
Expand All @@ -69,16 +69,21 @@ def pytest_runtest_logreport(self, report: TestReport) -> Any:
scenario_markup = word_markup
rule_markup = {"purple": True}

if self.verbosity <= 0 or not hasattr(report, "scenario"):
try:
scenario = test_report_context_registry[report].scenario
except KeyError:
scenario = None

if self.verbosity <= 0 or scenario is None:
return super().pytest_runtest_logreport(rep)

rule = report.scenario.get("rule")
rule = scenario.get("rule")
indent = " " if rule else ""

if self.verbosity == 1:
self.ensure_newline()
self._tw.write(f"{report.scenario['feature']['keyword']}: ", **feature_markup)
self._tw.write(report.scenario["feature"]["name"], **feature_markup)
self._tw.write(f"{scenario['feature']['keyword']}: ", **feature_markup)
self._tw.write(scenario["feature"]["name"], **feature_markup)
self._tw.write("\n")

if rule and rule["name"] != self.current_rule:
Expand All @@ -87,15 +92,15 @@ def pytest_runtest_logreport(self, report: TestReport) -> Any:
self._tw.write("\n")
self.current_rule = rule["name"]

self._tw.write(f"{indent} {report.scenario['keyword']}: ", **scenario_markup)
self._tw.write(report.scenario["name"], **scenario_markup)
self._tw.write(f"{indent} {scenario['keyword']}: ", **scenario_markup)
self._tw.write(scenario["name"], **scenario_markup)
self._tw.write(" ")
self._tw.write(word, **word_markup)
self._tw.write("\n")
elif self.verbosity > 1:
self.ensure_newline()
self._tw.write(f"{report.scenario['feature']['keyword']}: ", **feature_markup)
self._tw.write(report.scenario["feature"]["name"], **feature_markup)
self._tw.write(f"{scenario['feature']['keyword']}: ", **feature_markup)
self._tw.write(scenario["feature"]["name"], **feature_markup)
self._tw.write("\n")

if rule and rule["name"] != self.current_rule:
Expand All @@ -104,13 +109,12 @@ def pytest_runtest_logreport(self, report: TestReport) -> Any:
self._tw.write("\n")
self.current_rule = rule["name"]

self._tw.write(f"{indent} {report.scenario['keyword']}: ", **scenario_markup)
self._tw.write(report.scenario["name"], **scenario_markup)
self._tw.write(f"{indent} {scenario['keyword']}: ", **scenario_markup)
self._tw.write(scenario["name"], **scenario_markup)
self._tw.write("\n")
for step in report.scenario["steps"]:
for step in scenario["steps"]:
self._tw.write(f"{indent} {step['keyword']} {step['name']}\n", **scenario_markup)
self._tw.write(f"{indent} {word}", **word_markup)
self._tw.write("\n\n")

self.stats.setdefault(cat, []).append(rep)
return None
Loading

0 comments on commit 9bb4967

Please sign in to comment.