Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Broken and failed tests are not represented corectly in the Jenkins Allure report history #826

Closed
1 of 3 tasks
zmicek opened this issue Aug 16, 2018 · 1 comment
Closed
1 of 3 tasks

Comments

@zmicek
Copy link

zmicek commented Aug 16, 2018

I'm submitting a ...

  • bug report
  • feature request
  • support request => Please do not submit support request here, see note at the top of this template.

What is the current behavior?

Allure test history is shown correctly in the Allure report but not in the Jenkins project. Failed tests are represented as broken tests with yellow color instead of red like in the report. See figures.
If mouse is hovered over yellow area it will show number of broken tests.

If the current behavior is a bug, please provide the steps to reproduce and if possible a minimal demo of the problem

I have created Jenkins job and run simple test script written in Python 3.6. Test script have some simple test functions which just show functionality of the Pytest library.

Failed tests are represented as broken:
image

How it should look like:
image

Environment:

Allure version 2.6.0
Test framework Allure Jenkins plugin
Allure adaptor allure-pytest 2.5.0

Other information

Code that you can use to reproduce the issue:

import pytest
import time
import logging
import os
import sys
import allure

logger = logging.getLogger(__name__)
logging.getLogger().setLevel(logging.INFO)

# path to script dir and file
(script_dir, script_file) = os.path.split(os.path.abspath(__file__))

# Get the filename and remove the .py extension
model_name = script_dir.split('.')[-1]
file_path = os.path.join(model_name + '.log')

logging.basicConfig(filename=file_path, filemode='w', level=logging.DEBUG)
logging.info("MODULE BODY ROOT LOGGER: {}".format(logging.getLogger()))


#################################################
def test_command_line(vdev, vcfg):
    """Example with command line parameters."""
    logging.info(vdev)
    logging.info(vcfg)
    logging.info("Something")
    logging.info("Something")
    logging.info("Something")
    logger.info("Info")
    logger.error("Error")
    logger.warning("Warning")
    assert 1


#################################################
@pytest.mark.skip(reason="no way of currently testing this")
def test_skip():
    """Skipping using default marker."""
    pass


#################################################
@pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6")
def test_function():
    """Skips test if Python version is below 3.6"""
    pass


#################################################
@pytest.mark.xfail()
def test_xfail():
    """Expected failure."""
    assert 1 == 2


#################################################
@pytest.mark.xfail()
def test_xpass():
    """Unexpected pass."""
    assert 1 == 1


#################################################
def test_skipping():
    """Skipping test from test function."""
    pytest.skip('Test is skipped')


#################################################
@pytest.mark.parametrize("a", [1, 2, 3])
@pytest.mark.parametrize("b", [1, 2, 3])
def test_mark_parametrize(a, b):
    """Parametrize example - each with everyone."""
    logging.info("{}, {}".format(a, b))


#################################################
@pytest.mark.parametrize("a, b", [(1, 2), ('F', 'R')], ids=["forward", "backward"])
def test_using_multiple_parameters(a, b):
    """Parametrization with 2 parameters defined with added IDs"""
    logger.info("{}, {}".format(a, b))


#################################################
@pytest.mark.parametrize("params", [pytest.param(("Case 1", "expected fail"), marks=pytest.mark.xfail),
                                    ("Case 2", "should not fail"),
                                    pytest.param(("Case 3", "expected fail"), marks=pytest.mark.xfail),
                                    ("Case 4", "should not fail")])
def test_parametrize_with_fails(params):
    """Test parametrization where some test cases are expected to fail."""
    test_case, failure = params
    if failure == "expected fail":
        assert 0
    else:
        assert 1


#################################################
@pytest.mark.parametrize('test_input,expected', [
    ('3+5', 8),
    pytest.param('1+7', 8, marks=pytest.mark.basic),
    pytest.param('1+3', 8, marks=pytest.mark.basic),
    pytest.param('2+4', 6, marks=pytest.mark.basic, id='basic_2+4'),
    pytest.param('6*9', 42, marks=[pytest.mark.basic, pytest.mark.xfail], id='basic_6*9'),
])
def test_eval(test_input, expected):
    """Set marks or test ID for individual parametrized test"""
    assert eval(test_input) == expected


#################################################
# scope='module'   => called once per module
# scope='function' => called once per function
@pytest.fixture(scope="module",
                params=["smtp.gmail.com", "mail.python.org"])
def a(request):
    logging.info("This fixture is executed once in the module.")
    return request.param


@pytest.fixture
def b():
    return 'b'


def test_fixture_parametrize(a, b):
    """Parametrization using fixtures. This is solution if test require several model compilations."""
    logging.info("Example with parametrize using fixtures")
    logging.info("{}, {}".format(a, b))


#################################################
def test_fail():
    """Test that fails."""
    logger.info("Info")
    logger.error("Error")
    logger.warning("Warning")
    assert 1 == 5


#################################################
def test_approx_equal_abs_error():
    """Comparing of the 2 float numbers. Absolute error is used."""
    assert pytest.approx(1.0, abs=1e-6) == 1.0000001


def test_approx_equal_rel_error():
    """Comparing of the 2 float numbers. Relative error is used."""
    assert pytest.approx(1.0, rel=1e-6) == 1.0000001

    
#################################################
def test_assertion_message():
    """Example that shows how additional message can be added into the assertion"""
    a = 100
    b = 200
    assert a == b, "Error description goes here."
@baev
Copy link
Member

baev commented Jan 15, 2019

Duplicate of jenkinsci/allure-plugin#215

@baev baev marked this as a duplicate of jenkinsci/allure-plugin#215 Jan 15, 2019
@baev baev closed this as completed Jan 15, 2019
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants