Skip to content

Commit

Permalink
Merge pull request #41 from nicke46/nicke/active_outline
Browse files Browse the repository at this point in the history
active_outline added to scenario description
  • Loading branch information
HardNorth authored Oct 19, 2023
2 parents 7fa2fa0 + 727b639 commit b22d734
Show file tree
Hide file tree
Showing 7 changed files with 53 additions and 23 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# Changelog

## [Unreleased]
### Added
- Scenario Outline scenarios will add markdown table with active params to scenario description, by @nicke46

## [4.0.0]
### Added
Expand Down
6 changes: 3 additions & 3 deletions behave.ini
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[report_portal]
token =
api_key =
project =
endpoint =
step_based=
launch_attributes=
log_layout =
launch_attributes =
26 changes: 17 additions & 9 deletions behave_reportportal/behave_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,15 +108,15 @@ def finish_launch(self, _, **kwargs):
self._rp.close()

@check_rp_enabled
def start_feature(self, _, feature, **kwargs):
def start_feature(self, context, feature, **kwargs):
"""Start feature in ReportPortal."""
if feature.tags and "skip" in feature.tags:
feature.skip("Marked with @skip")
self._feature_id = self._rp.start_test_item(
name=feature.name,
start_time=timestamp(),
item_type="SUITE",
description=self._item_description(feature),
description=self._item_description(context, feature),
code_ref=self._code_ref(feature),
attributes=self._attributes(feature),
**kwargs,
Expand All @@ -138,7 +138,7 @@ def finish_feature(self, context, feature, status=None, **kwargs):
)

@check_rp_enabled
def start_scenario(self, _, scenario, **kwargs):
def start_scenario(self, context, scenario, **kwargs):
"""Start scenario in ReportPortal."""
if scenario.tags and "skip" in scenario.tags:
scenario.skip("Marked with @skip")
Expand All @@ -150,7 +150,7 @@ def start_scenario(self, _, scenario, **kwargs):
code_ref=self._code_ref(scenario),
attributes=self._attributes(scenario),
parameters=self._get_parameters(scenario),
description=self._item_description(scenario),
description=self._item_description(context, scenario),
test_case_id=self._test_case_id(scenario),
**kwargs,
)
Expand Down Expand Up @@ -282,11 +282,11 @@ def _finish_step_step_based(self, step, status=None, **kwargs):
self._log_item_id = self._scenario_id

def _finish_step_scenario_based(self, step, **kwargs):
step_content = self._build_step_content(step)
self._rp.log(
item_id=self._scenario_id,
time=timestamp(),
message=f"[{step.keyword}]: {step.name}.\n"
f"{self._build_step_content(step)}",
message=f"[{step.keyword}]: {step.name}." + (f"\n\n{step_content}" if step_content else ""),
level="INFO",
**kwargs,
)
Expand Down Expand Up @@ -399,10 +399,18 @@ def _log_cleanups(self, context, scope):
)

@staticmethod
def _item_description(item):
def _item_description(context, item):
desc = ""
if item.description:
desc = "\n".join(item.description)
return f"Description:\n{desc}"
text_desc = "\n".join(item.description)
desc = f"Description:\n{text_desc}"
if context.active_outline:
pt = PrettyTable(field_names=context.active_outline.headings)
pt.add_row(context.active_outline.cells)
pt.set_style(MARKDOWN)
desc += ("\n\n" if desc else "")
desc += pt.get_string()
return desc

@staticmethod
def _get_parameters(scenario):
Expand Down
2 changes: 1 addition & 1 deletion behave_reportportal/behave_agent.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class BehaveAgent:
def _test_case_id(scenario: Scenario) -> str: ...

@staticmethod
def _item_description(item: Union[Scenario, Feature]) -> str: ...
def _item_description(context: Context, item: Union[Scenario, Feature]) -> str: ...

@staticmethod
def convert_to_rp_status(behave_status: str) -> str: ...
Expand Down
4 changes: 2 additions & 2 deletions behave_reportportal/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ def __init__(
if not self.api_key:
warn(
message="Argument `api_key` is `None` or empty string, "
"that's not supposed to happen because Report "
"Portal is usually requires an authorization key. "
"that's not supposed to happen because ReportPortal "
"is usually requires an authorization key. "
"Please check your code.",
category=RuntimeWarning,
stacklevel=2
Expand Down
1 change: 1 addition & 0 deletions tests/features/passed.feature
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ Feature: Calculator functionality
Then Result should be correct

Scenario Outline: Scenario with examples
Description for Scenario with examples
Given I want to calculate <number_a> and <number_b>
When Use addition operation
Then Result is <expected>
Expand Down
35 changes: 27 additions & 8 deletions tests/units/test_rp_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import pytest
from behave.model_core import Status
from delayed_assert import assert_expectations, expect
from prettytable import PrettyTable
from prettytable import MARKDOWN, PrettyTable
from reportportal_client import RPClient, BatchedRPClient, ThreadedRPClient
from reportportal_client.logs import MAX_LOG_BATCH_PAYLOAD_SIZE

Expand Down Expand Up @@ -234,17 +234,34 @@ def test_init_valid_config(config):
def test_item_description():
mock_item = mock.Mock()
mock_item.description = None
mock_context = mock.Mock()
mock_context.active_outline = None
expect(
BehaveAgent._item_description(mock_item) is None,
"Description is not None",
BehaveAgent._item_description(mock_context, mock_item) == "",
"Description is not \"\"",
)
mock_item.description = ["a", "b"]
expect(
BehaveAgent._item_description(mock_item) == "Description:\na\nb",
BehaveAgent._item_description(mock_context, mock_item) == "Description:\na\nb",
f"Description is incorrect:\n"
f"Actual: {BehaveAgent._item_description(mock_item)}\n"
f"Actual: {BehaveAgent._item_description(mock_context, mock_item)}\n"
f"Expected: Description:\na\nb",
)
mock_context.active_outline = mock.Mock()
mock_context.active_outline.headings = ["number_a", "number_b"]
mock_context.active_outline.cells = ["1", "2"]

pt = PrettyTable(field_names=mock_context.active_outline.headings)
pt.add_row(mock_context.active_outline.cells)
pt.set_style(MARKDOWN)
table = pt.get_string()
expect(
BehaveAgent._item_description(mock_context, mock_item) == f"Description:\na\nb\n\n{table}",
f"Description is incorrect:\n"
f"Actual: {BehaveAgent._item_description(mock_context, mock_item)}\n"
f"Expected: Description:\na\nb\n\n{table}",
)

assert_expectations()


Expand Down Expand Up @@ -368,6 +385,7 @@ def verify_start_feature(mock_feature, config):
mock_rps = mock.create_autospec(RPClient)
mock_rps.start_test_item.return_value = "feature_id"
mock_context = mock.Mock()
mock_context.active_outline = None
mock_feature.name = "feature_name"
mock_feature.description = ["A", "B"]
ba = BehaveAgent(config, mock_rps)
Expand All @@ -377,7 +395,7 @@ def verify_start_feature(mock_feature, config):
name="feature_name",
start_time=123,
item_type="SUITE",
description=BehaveAgent._item_description(mock_feature),
description=BehaveAgent._item_description(mock_context, mock_feature),
code_ref=BehaveAgent._code_ref(mock_feature),
attributes=ba._attributes(mock_feature),
some_key="some_value",
Expand Down Expand Up @@ -434,6 +452,7 @@ def verify_start_scenario(mock_scenario, config):
mock_rps = mock.create_autospec(RPClient)
mock_rps.start_test_item.return_value = "scenario_id"
mock_context = mock.Mock()
mock_context.active_outline = None
mock_scenario.name = "scenario_name"
mock_scenario._row = None
mock_scenario.description = ["A", "B"]
Expand All @@ -446,7 +465,7 @@ def verify_start_scenario(mock_scenario, config):
start_time=123,
item_type="STEP",
parent_item_id="feature_id",
description=BehaveAgent._item_description(mock_scenario),
description=BehaveAgent._item_description(mock_context, mock_scenario),
code_ref=BehaveAgent._code_ref(mock_scenario),
parameters=BehaveAgent._get_parameters(mock_scenario),
attributes=ba._attributes(mock_scenario),
Expand Down Expand Up @@ -695,7 +714,7 @@ def test_finish_failed_step_scenario_based(mock_timestamp, config):
item_id="scenario_id",
time=123,
level="INFO",
message="[keyword]: name.\n",
message="[keyword]: name.",
),
]
mock_rps.log.assert_has_calls(calls, any_order=True)
Expand Down

0 comments on commit b22d734

Please sign in to comment.