diff --git a/qiskit/test/base.py b/qiskit/test/base.py index 149104911848..4fec66f16a44 100644 --- a/qiskit/test/base.py +++ b/qiskit/test/base.py @@ -23,7 +23,6 @@ """ import inspect -import itertools import logging import os import sys @@ -33,64 +32,47 @@ try: import fixtures - from testtools.compat import advance_iterator - from testtools import content + import testtools HAS_FIXTURES = True except ImportError: HAS_FIXTURES = False -from qiskit.exceptions import MissingOptionalLibraryError from .decorators import enforce_subclasses_call -from .runtest import RunTest, MultipleExceptions from .utils import Path, setup_test_logging __unittest = True # Allows shorter stack trace for .assertDictAlmostEqual -def _copy_content(content_object): - """Make a copy of the given content object. +# If testtools is installed use that as a (mostly) drop in replacement for +# unittest's TestCase. This will enable the fixtures used for capturing stdout +# stderr, and pylogging to attach the output to stestr's result stream. +if HAS_FIXTURES: - The content within ``content_object`` is iterated and saved. This is - useful when the source of the content is volatile, a log file in a - temporary directory for example. + class BaseTestCase(testtools.TestCase): + """Base test class.""" - Args: - content_object (content.Content): A ``content.Content`` instance. + # testtools maintains their own version of assert functions which mostly + # behave as value adds to the std unittest assertion methods. However, + # for assertEquals and assertRaises modern unittest has diverged from + # the forks in testtools and offer more (or different) options that are + # incompatible testtools versions. Just use the stdlib versions so that + # our tests work as expected. + assertRaises = unittest.TestCase.assertRaises + assertEqual = unittest.TestCase.assertEqual - Returns: - content.Content: An instance with the same mime-type as - ``content_object`` and a non-volatile copy of its content. - """ - content_bytes = list(content_object.iter_bytes()) - def content_callback(): - return content_bytes +else: - return content.Content(content_object.content_type, content_callback) + class BaseTestCase(unittest.TestCase): + """Base test class.""" - -def gather_details(source_dict, target_dict): - """Merge the details from ``source_dict`` into ``target_dict``. - - ``gather_details`` evaluates all details in ``source_dict``. Do not use it - if the details are not ready to be evaluated. - - :param source_dict: A dictionary of details will be gathered. - :param target_dict: A dictionary into which details will be gathered. - """ - for name, content_object in source_dict.items(): - new_name = name - disambiguator = itertools.count(1) - while new_name in target_dict: - new_name = "%s-%d" % (name, advance_iterator(disambiguator)) - name = new_name - target_dict[name] = _copy_content(content_object) + pass @enforce_subclasses_call(["setUp", "setUpClass", "tearDown", "tearDownClass"]) -class BaseQiskitTestCase(unittest.TestCase): +class BaseQiskitTestCase(BaseTestCase): """Additions for test cases for all Qiskit-family packages. The additions here are intended for all packages, not just Terra. Terra-specific logic should @@ -256,165 +238,6 @@ class FullQiskitTestCase(QiskitTestCase): If you derive directly from it, you may try and instantiate the class without satisfying its dependencies.""" - run_tests_with = RunTest - - def __init__(self, *args, **kwargs): - """Construct a TestCase.""" - if not HAS_FIXTURES: - raise MissingOptionalLibraryError( - libname="testtools", - name="test runner", - pip_install="pip install testtools", - ) - super().__init__(*args, **kwargs) - self.__RunTest = self.run_tests_with - self._reset() - self.__exception_handlers = [] - self.exception_handlers = [ - (unittest.SkipTest, self._report_skip), - (self.failureException, self._report_failure), - (unittest.case._UnexpectedSuccess, self._report_unexpected_success), - (Exception, self._report_error), - ] - - def _reset(self): - """Reset the test case as if it had never been run.""" - self._cleanups = [] - self._unique_id_gen = itertools.count(1) - # Generators to ensure unique traceback ids. Maps traceback label to - # iterators. - self._traceback_id_gens = {} - self.__details = None - - def onException(self, exc_info, tb_label="traceback"): - """Called when an exception propagates from test code. - - :seealso addOnException: - """ - if exc_info[0] not in [unittest.SkipTest, unittest.case._UnexpectedSuccess]: - self._report_traceback(exc_info, tb_label=tb_label) - for handler in self.__exception_handlers: - handler(exc_info) - - def _run_teardown(self, result): - """Run the tearDown function for this test.""" - self.tearDown() - - def _get_test_method(self): - method_name = getattr(self, "_testMethodName") - return getattr(self, method_name) - - def _run_test_method(self, result): - """Run the test method for this test.""" - return self._get_test_method()() - - def useFixture(self, fixture): - """Use fixture in a test case. - - The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called. - - Args: - fixture: The fixture to use. - - Returns: - fixture: The fixture, after setting it up and scheduling a cleanup - for it. - - Raises: - MultipleExceptions: When there is an error during fixture setUp - Exception: If an exception is raised during fixture setUp - """ - try: - fixture.setUp() - except MultipleExceptions as e: - if fixtures is not None and e.args[-1][0] is fixtures.fixture.SetupError: - gather_details(e.args[-1][1].args[0], self.getDetails()) - raise - except Exception: - exc_info = sys.exc_info() - try: - # fixture._details is not available if using the newer - # _setUp() API in Fixtures because it already cleaned up - # the fixture. Ideally this whole try/except is not - # really needed any more, however, we keep this code to - # remain compatible with the older setUp(). - if hasattr(fixture, "_details") and fixture._details is not None: - gather_details(fixture.getDetails(), self.getDetails()) - except Exception: - # Report the setUp exception, then raise the error during - # gather_details. - self._report_traceback(exc_info) - raise - else: - # Gather_details worked, so raise the exception setUp - # encountered. - def reraise(exc_class, exc_obj, exc_tb, _marker=object()): - """Re-raise an exception received from sys.exc_info() or similar.""" - raise exc_obj.with_traceback(exc_tb) - - reraise(*exc_info) - else: - self.addCleanup(fixture.cleanUp) - self.addCleanup(gather_details, fixture.getDetails(), self.getDetails()) - return fixture - - def _run_setup(self, result): - """Run the setUp function for this test.""" - self.setUp() - - def _add_reason(self, reason): - self.addDetail("reason", content.text_content(reason)) - - @staticmethod - def _report_error(self, result, err): - result.addError(self, details=self.getDetails()) - - @staticmethod - def _report_expected_failure(self, result, err): - result.addExpectedFailure(self, details=self.getDetails()) - - @staticmethod - def _report_failure(self, result, err): - result.addFailure(self, details=self.getDetails()) - - @staticmethod - def _report_skip(self, result, err): - if err.args: - reason = err.args[0] - else: - reason = "no reason given." - self._add_reason(reason) - result.addSkip(self, details=self.getDetails()) - - def _report_traceback(self, exc_info, tb_label="traceback"): - id_gen = self._traceback_id_gens.setdefault(tb_label, itertools.count(0)) - while True: - tb_id = advance_iterator(id_gen) - if tb_id: - tb_label = "%s-%d" % (tb_label, tb_id) - if tb_label not in self.getDetails(): - break - self.addDetail( - tb_label, - content.TracebackContent( - exc_info, self, capture_locals=getattr(self, "__testtools_tb_locals__", False) - ), - ) - - @staticmethod - def _report_unexpected_success(self, result, err): - result.addUnexpectedSuccess(self, details=self.getDetails()) - - def run(self, result=None): - self._reset() - try: - run_test = self.__RunTest(self, self.exception_handlers, last_resort=self._report_error) - except TypeError: - # Backwards compat: if we can't call the constructor - # with last_resort, try without that. - run_test = self.__RunTest(self, self.exception_handlers) - return run_test.run(result) - def setUp(self): super().setUp() if os.environ.get("QISKIT_TEST_CAPTURE_STREAMS"): @@ -424,42 +247,6 @@ def setUp(self): self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr)) self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, level=None)) - def addDetail(self, name, content_object): - """Add a detail to be reported with this test's outcome. - - :param name: The name to give this detail. - :param content_object: The content object for this detail. See - testtools.content for more detail. - """ - if self.__details is None: - self.__details = {} - self.__details[name] = content_object - - def addDetailUniqueName(self, name, content_object): - """Add a detail to the test, but ensure it's name is unique. - - This method checks whether ``name`` conflicts with a detail that has - already been added to the test. If it does, it will modify ``name`` to - avoid the conflict. - - :param name: The name to give this detail. - :param content_object: The content object for this detail. See - testtools.content for more detail. - """ - existing_details = self.getDetails() - full_name = name - suffix = 1 - while full_name in existing_details: - full_name = "%s-%d" % (name, suffix) - suffix += 1 - self.addDetail(full_name, content_object) - - def getDetails(self): - """Get the details dict that will be reported with this test's outcome.""" - if self.__details is None: - self.__details = {} - return self.__details - def dicts_almost_equal(dict1, dict2, delta=None, places=None, default_value=0): """Test if two dictionaries with numeric values are almost equal. diff --git a/qiskit/test/runtest.py b/qiskit/test/runtest.py deleted file mode 100644 index 0747e6fa3210..000000000000 --- a/qiskit/test/runtest.py +++ /dev/null @@ -1,248 +0,0 @@ -# This code is part of Qiskit. -# -# (C) Copyright IBM 2017, 2018. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -# Copyright (c) 2009-2010 testtools developers. - -# Forked from testing-cabal/testtools - -# pylint: disable=missing-return-type-doc,missing-type-doc,missing-type-doc,invalid-name -# pylint: disable=broad-except,attribute-defined-outside-init,unused-argument - -"""Individual test case execution.""" - -__all__ = [ - "MultipleExceptions", - "RunTest", -] - -import sys - -try: - from testtools.testresult import ExtendedToOriginalDecorator - - HAS_TESTTOOLS = True -except ImportError: - HAS_TESTTOOLS = False - -from qiskit.exceptions import MissingOptionalLibraryError - - -class MultipleExceptions(Exception): - """Represents many exceptions raised from some operation. - :ivar args: The sys.exc_info() tuples for each exception. - """ - - -class RunTest: - """An object to run a test. - RunTest objects are used to implement the internal logic involved in - running a test. TestCase.__init__ stores _RunTest as the class of RunTest - to execute. Passing the runTest= parameter to TestCase.__init__ allows a - different RunTest class to be used to execute the test. - Subclassing or replacing RunTest can be useful to add functionality to the - way that tests are run in a given project. - :ivar case: The test case that is to be run. - :ivar result: The result object a case is reporting to. - :ivar handlers: A list of (ExceptionClass, handler_function) for - exceptions that should be caught if raised from the user - code. Exceptions that are caught are checked against this list in - first to last order. There is a catch-all of 'Exception' at the end - of the list, so to add a new exception to the list, insert it at the - front (which ensures that it will be checked before any existing base - classes in the list. If you add multiple exceptions some of which are - subclasses of each other, add the most specific exceptions last (so - they come before their parent classes in the list). - :ivar exception_caught: An object returned when _run_user catches an - exception. - :ivar _exceptions: A list of caught exceptions, used to do the single - reporting of error/failure/skip etc. - """ - - def __init__(self, case, handlers=None, last_resort=None): - """Create a RunTest to run a case. - - Args: - case: A test case object. - handlers: Exception handlers for this RunTest. These are stored - in self.handlers and can be modified later if needed. - last_resort: A handler of last resort: any exception which is - not handled by handlers will cause the last resort handler to be - called as last_resort(exc_info), and then the exception will be - raised - aborting the test run as this is inside the runner - machinery rather than the confined context of the test. - Raises: - MissingOptionalLibraryError: If test requirements aren't installed - """ - if not HAS_TESTTOOLS: - raise MissingOptionalLibraryError( - libname="testtools", - name="test runner", - pip_install="pip install testtools", - ) - self.case = case - self.handlers = handlers or [] - self.exception_caught = object() - self._exceptions = [] - self.last_resort = last_resort or (lambda case, result, exc: None) - - def run(self, result=None): - """Run self.case reporting activity to result. - Args: - result: Optional testtools.TestResult to report activity to. - - Returns: - The result object the test was run against. - """ - if result is None: - actual_result = self.case.defaultTestResult() - actual_result.startTestRun() - else: - actual_result = result - try: - return self._run_one(actual_result) - finally: - if result is None: - actual_result.stopTestRun() - - def _run_one(self, result): - """Run one test reporting to result. - :param result: A testtools.TestResult to report activity to. - This result object is decorated with an ExtendedToOriginalDecorator - to ensure that the latest TestResult API can be used with - confidence by client code. - :return: The result object the test was run against. - """ - return self._run_prepared_result(ExtendedToOriginalDecorator(result)) - - def _run_prepared_result(self, result): - """Run one test reporting to result. - :param result: A testtools.TestResult to report activity to. - :return: The result object the test was run against. - """ - result.startTest(self.case) - self.result = result - try: - self._exceptions = [] - self.case.__testtools_tb_locals__ = getattr(result, "tb_locals", False) - self._run_core() - if self._exceptions: - # One or more caught exceptions, now trigger the test's - # reporting method for just one. - e = self._exceptions.pop() - for exc_class, handler in self.handlers: - if isinstance(e, exc_class): - handler(self.case, self.result, e) - break - else: - self.last_resort(self.case, self.result, e) - raise e - finally: - result.stopTest(self.case) - return result - - def _run_core(self): - """Run the user supplied test code.""" - test_method = self.case._get_test_method() - skip_case = getattr(self.case, "__unittest_skip__", False) - if skip_case or getattr(test_method, "__unittest_skip__", False): - self.result.addSkip( - self.case, - reason=getattr( - self.case if skip_case else test_method, "__unittest_skip_why__", None - ), - ) - return - - if self.exception_caught == self._run_user(self.case._run_setup, self.result): - # Don't run the test method if we failed getting here. - self._run_cleanups(self.result) - return - # Run everything from here on in. If any of the methods raise an - # exception we'll have failed. - failed = False - try: - if self.exception_caught == self._run_user(self.case._run_test_method, self.result): - failed = True - finally: - try: - if self.exception_caught == self._run_user(self.case._run_teardown, self.result): - failed = True - finally: - try: - if self.exception_caught == self._run_user(self._run_cleanups, self.result): - failed = True - finally: - if getattr(self.case, "force_failure", None): - self._run_user(_raise_force_fail_error) - failed = True - if not failed: - self.result.addSuccess(self.case, details=self.case.getDetails()) - - def _run_cleanups(self, result): - """Run the cleanups that have been added with addCleanup. - See the docstring for addCleanup for more information. - :return: None if all cleanups ran without error, - ``exception_caught`` if there was an error. - """ - failing = False - while self.case._cleanups: - function, arguments, keywordArguments = self.case._cleanups.pop() - got_exception = self._run_user(function, *arguments, **keywordArguments) - if got_exception == self.exception_caught: - failing = True - if failing: - return self.exception_caught - - def _run_user(self, fn, *args, **kwargs): - """Run a user supplied function. - Exceptions are processed by `_got_user_exception`. - :return: Either whatever 'fn' returns or ``exception_caught`` if - 'fn' raised an exception. - """ - try: - return fn(*args, **kwargs) - except Exception: - return self._got_user_exception(sys.exc_info()) - - def _got_user_exception(self, exc_info, tb_label="traceback"): - """Called when user code raises an exception. - If 'exc_info' is a `MultipleExceptions`, then we recurse into it - unpacking the errors that it's made up from. - :param exc_info: A sys.exc_info() tuple for the user error. - :param tb_label: An optional string label for the error. If - not specified, will default to 'traceback'. - :return: 'exception_caught' if we catch one of the exceptions that - have handlers in 'handlers', otherwise raise the error. - """ - if exc_info[0] is MultipleExceptions: - for sub_exc_info in exc_info[1].args: - self._got_user_exception(sub_exc_info, tb_label) - return self.exception_caught - try: - e = exc_info[1] - self.case.onException(exc_info, tb_label=tb_label) - finally: - del exc_info - self._exceptions.append(e) - # Yes, this means we catch everything - we re-raise KeyBoardInterrupt - # etc later, after tearDown and cleanUp - since those may be cleaning up - # external processes. - return self.exception_caught - - -def _raise_force_fail_error(): - raise AssertionError("Forced Test Failure") - - -# Signal that this is part of the testing framework, and that code from this -# should not normally appear in tracebacks. -__unittest = True