diff --git a/doc/conf.py b/doc/conf.py index f69a53f0..732abe5e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # pyperf documentation build configuration file, created by # sphinx-quickstart on Wed Jun 1 15:28:03 2016. @@ -42,9 +41,9 @@ master_doc = 'index' # General information about the project. -project = u'pyperf' -copyright = u'2016, Victor Stinner' -author = u'Victor Stinner' +project = 'pyperf' +copyright = '2016, Victor Stinner' +author = 'Victor Stinner' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -216,8 +215,8 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'pyperf.tex', u'pyperf Documentation', - u'Victor Stinner', 'manual'), + (master_doc, 'pyperf.tex', 'pyperf Documentation', + 'Victor Stinner', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -246,7 +245,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'pyperf', u'pyperf Documentation', + (master_doc, 'pyperf', 'pyperf Documentation', [author], 1) ] @@ -260,7 +259,7 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'pyperf', u'pyperf Documentation', + (master_doc, 'pyperf', 'pyperf Documentation', author, 'pyperf', 'One line description of project.', 'Miscellaneous'), ] diff --git a/doc/examples/plot.py b/doc/examples/plot.py index 4acd6cb7..e000ca02 100755 --- a/doc/examples/plot.py +++ b/doc/examples/plot.py @@ -25,16 +25,14 @@ def plot_bench(args, bench): values = [] width = None for run_index, run in enumerate(bench.get_runs()): - index = 0 x = [] y = [] run_values = run.values if args.skip: run_values = run_values[args.skip:] - for value in run_values: + for index, value in enumerate(run_values): x.append(index) y.append(value) - index += 1 plt.plot(x, y, color='blue') values.extend(run_values) width = len(run_values) diff --git a/pyperf/__main__.py b/pyperf/__main__.py index aee19d18..f73a5c0e 100644 --- a/pyperf/__main__.py +++ b/pyperf/__main__.py @@ -330,14 +330,8 @@ def group_by_name(self): for suite in self.suites: benchmark = suite.get_benchmark(name) filename = format_filename(suite.filename) - if show_name: - if not show_filename: - title = name - else: - # name is displayed in the group title - title = filename - else: - title = None + # name is displayed in the group title + title = (filename if show_filename else name) if show_name else None benchmarks.append(GroupItem(benchmark, title, filename)) is_last = (index == (len(names) - 1)) diff --git a/pyperf/_bench.py b/pyperf/_bench.py index fae94bbf..a26b8844 100644 --- a/pyperf/_bench.py +++ b/pyperf/_bench.py @@ -86,7 +86,7 @@ def method(self): return method -class Run(object): +class Run: # Run is immutable, so it can be shared/exchanged between two benchmarks __slots__ = ('_warmups', '_values', '_metadata') @@ -131,10 +131,7 @@ def __init__(self, values, warmups=None, def _replace(self, values=None, warmups=True, metadata=None): if values is None: values = self._values - if warmups: - warmups = self._warmups - else: - warmups = None + warmups = self._warmups if warmups else None if metadata is None: # share metadata dict since Run metadata is immutable metadata = self._metadata @@ -286,10 +283,7 @@ def _extract_metadata(self, name): raise KeyError("run has no metadata %r" % name) info = get_metadata_info(name) - if info.unit: - metadata = dict(self._metadata, unit=info.unit) - else: - metadata = None + metadata = dict(self._metadata, unit=info.unit) if info.unit else None if not isinstance(value, NUMBER_TYPES): raise TypeError("metadata %r value is not an integer: got %s" @@ -319,7 +313,7 @@ def _update_metadata(self, metadata): return self._replace(metadata=metadata2) -class Benchmark(object): +class Benchmark: def __init__(self, runs): self._runs = [] # list of Run objects self._clear_runs_cache() @@ -627,7 +621,7 @@ def update_metadata(self, metadata): self._replace_runs(new_runs) -class BenchmarkSuite(object): +class BenchmarkSuite: def __init__(self, benchmarks, filename=None): if not benchmarks: raise ValueError("benchmarks must be a non-empty " @@ -724,10 +718,7 @@ def _json_load(cls, filename, data): @staticmethod def _load_open(filename): - if isinstance(filename, bytes): - suffix = b'.gz' - else: - suffix = u'.gz' + suffix = b'.gz' if isinstance(filename, bytes) else '.gz' if filename.endswith(suffix): # Use lazy import to limit imports on 'import pyperf' @@ -767,10 +758,7 @@ def loads(cls, string): @staticmethod def _dump_open(filename, replace): - if isinstance(filename, bytes): - suffix = b'.gz' - else: - suffix = u'.gz' + suffix = b'.gz' if isinstance(filename, bytes) else '.gz' if not replace and os.path.exists(filename): raise OSError(errno.EEXIST, "File already exists") diff --git a/pyperf/_cli.py b/pyperf/_cli.py index f8017018..1b87d5b6 100644 --- a/pyperf/_cli.py +++ b/pyperf/_cli.py @@ -20,10 +20,7 @@ def format_title(title, level=1, lines=None): empty_line(lines) lines.append(title) - if level == 1: - char = '=' - else: - char = '-' + char = '=' if level == 1 else '-' lines.append(char * len(title)) return lines @@ -77,10 +74,7 @@ def format_run(bench, run_index, run, common_metadata=None, raw=False, loops = run._get_calibration_loops() action = 'calibrate the number of loops: %s' % format_number(loops) lines.append("Run %s: %s" % (run_index, action)) - if raw: - name = 'raw calibrate' - else: - name = 'calibrate' + name = 'raw calibrate' if raw else 'calibrate' unit = bench.get_unit() format_value = bench.format_value for index, warmup in enumerate(run.warmups, 1): @@ -131,17 +125,11 @@ def format_run(bench, run_index, run, common_metadata=None, raw=False, lines.append("Run %s:" % run_index) if warmups and show_warmup: - if raw: - name = 'raw warmup' - else: - name = 'warmup' + name = 'raw warmup' if raw else 'warmup' for index, warmup in enumerate(warmups, 1): lines.append('- %s %s: %s' % (name, index, warmup)) - if raw: - name = 'raw value' - else: - name = 'value' + name = 'raw value' if raw else 'value' for index, value in enumerate(values, 1): lines.append('- %s %s: %s' % (name, index, value)) @@ -487,15 +475,9 @@ def format_result_value(bench): loops = None warmups = None for run in bench._runs: - if run._is_calibration_warmups(): - warmups = run._get_calibration_warmups() - elif run._is_recalibration_warmups(): + if run._is_calibration_warmups() or run._is_recalibration_warmups(): warmups = run._get_calibration_warmups() - elif run._is_recalibration_loops(): - loops = run._get_calibration_loops() - elif run._is_calibration_warmups(): - loops = run._get_calibration_loops() - elif run._is_calibration_loops(): + elif run._is_recalibration_loops() or run._is_calibration_warmups() or run._is_calibration_loops(): loops = run._get_calibration_loops() else: loops = None @@ -517,15 +499,9 @@ def format_result(bench): loops = None warmups = None for run in bench._runs: - if run._is_calibration_warmups(): - warmups = run._get_calibration_warmups() - elif run._is_recalibration_warmups(): + if run._is_calibration_warmups() or run._is_recalibration_warmups(): warmups = run._get_calibration_warmups() - elif run._is_recalibration_loops(): - loops = run._get_calibration_loops() - elif run._is_calibration_warmups(): - loops = run._get_calibration_loops() - elif run._is_calibration_loops(): + elif run._is_recalibration_loops() or run._is_calibration_warmups() or run._is_calibration_loops(): loops = run._get_calibration_loops() else: loops = None @@ -589,10 +565,7 @@ def multiline_output(args): @contextlib.contextmanager def catch_broken_pipe_error(file=None): - if file is None: - files = [sys.stdout, sys.stderr] - else: - files = [file] + files = [sys.stdout, sys.stderr] if file is None else [file] try: for file in files: @@ -604,7 +577,7 @@ def catch_broken_pipe_error(file=None): # was closed by the consumer for file in files: file.flush() - except IOError as exc: + except OSError as exc: if exc.errno != errno.EPIPE: raise # got a broken pipe error: ignore it @@ -613,7 +586,5 @@ def catch_broken_pipe_error(file=None): # close at exit which would log the error: # "Exception ignored in: ... BrokenPipeError: ..." for file in files: - try: + with contextlib.suppress(OSError): file.close() - except IOError: - pass diff --git a/pyperf/_collect_metadata.py b/pyperf/_collect_metadata.py index 33481502..77a16d3a 100644 --- a/pyperf/_collect_metadata.py +++ b/pyperf/_collect_metadata.py @@ -67,10 +67,7 @@ def collect_python_metadata(metadata): # 'bbd45126bc691f669c4ebdfbd74456cd274c6b92' # in 'Python 2.7.10 (bbd45126bc691f669c4ebdfbd74456cd274c6b92,' match = re.search(r'^[^(]+\(([a-f0-9]{6,}\+?),', sys.version) - if match: - revision = match.group(1) - else: - revision = None + revision = match.group(1) if match else None if revision: version = '%s revision %s' % (version, revision) metadata['python_version'] = version @@ -136,7 +133,7 @@ def read_proc(path): with open_text(path) as fp: for line in fp: yield line.rstrip() - except (OSError, IOError): + except OSError: return @@ -332,7 +329,7 @@ def get_cpu_temperature(path, cpu_temp): try: temp_label = read_first_line(template % 'label', error=True) - except IOError: + except OSError: break temp_input = read_first_line(template % 'input', error=True) diff --git a/pyperf/_compare.py b/pyperf/_compare.py index dc2afdc9..9b5669aa 100644 --- a/pyperf/_compare.py +++ b/pyperf/_compare.py @@ -53,7 +53,7 @@ def get_tags_for_result(result): return result.ref.benchmark.get_metadata().get("tags", []) -class CompareResult(object): +class CompareResult: def __init__(self, ref, changed, min_speed=None): # CompareData object self.ref = ref diff --git a/pyperf/_cpu_utils.py b/pyperf/_cpu_utils.py index f810df25..8a059016 100644 --- a/pyperf/_cpu_utils.py +++ b/pyperf/_cpu_utils.py @@ -3,6 +3,7 @@ import re from pyperf._utils import sysfs_path, proc_path, read_first_line, USE_PSUTIL +import contextlib try: if not USE_PSUTIL: @@ -27,10 +28,8 @@ def get_logical_cpu_count(): except ImportError: pass else: - try: + with contextlib.suppress(NotImplementedError): cpu_count = multiprocessing.cpu_count() - except NotImplementedError: - pass if cpu_count is not None and cpu_count < 1: return None diff --git a/pyperf/_linux_memory.py b/pyperf/_linux_memory.py index e8e8f55a..05c34360 100644 --- a/pyperf/_linux_memory.py +++ b/pyperf/_linux_memory.py @@ -13,8 +13,7 @@ # Need Linux 2.6.16 or newer. def read_smap_file(): total = 0 - fp = open(proc_path("self/smaps"), "rb") - with fp: + with open(proc_path("self/smaps"), "rb") as fp: for line in fp: # Include both Private_Clean and Private_Dirty sections. line = line.rstrip() @@ -54,7 +53,7 @@ def check_tracking_memory(): mem_thread = PeakMemoryUsageThread() try: mem_thread.get() - except IOError as exc: + except OSError as exc: path = proc_path("self/smaps") return "unable to read %s: %s" % (path, exc) diff --git a/pyperf/_manager.py b/pyperf/_manager.py index 3a1fcb86..1dcbd703 100644 --- a/pyperf/_manager.py +++ b/pyperf/_manager.py @@ -12,7 +12,7 @@ MAX_CALIBRATION = 5 -class Manager(object): +class Manager: """ Manager process which spawns worker processes to: - calibrate warmups diff --git a/pyperf/_metadata.py b/pyperf/_metadata.py index b7a7ba8e..e2ff96c5 100644 --- a/pyperf/_metadata.py +++ b/pyperf/_metadata.py @@ -137,7 +137,7 @@ def format_metadata(name, value): return info.formatter(value) -class Metadata(object): +class Metadata: def __init__(self, name, value): self._name = name self._value = value diff --git a/pyperf/_runner.py b/pyperf/_runner.py index cf4bbbe6..7066c6e4 100644 --- a/pyperf/_runner.py +++ b/pyperf/_runner.py @@ -98,25 +98,12 @@ def __init__(self, values=None, processes=None, import argparse has_jit = pyperf.python_has_jit() - if not values: - if has_jit: - # Since PyPy JIT has less processes: - # run more values per process - values = 10 - else: - values = 3 - if not processes: - if has_jit: - # Use less processes than non-JIT, because JIT requires more - # warmups and so each worker is slower - processes = 6 - else: - processes = 20 - - if metadata is not None: - self.metadata = metadata - else: - self.metadata = {} + # Since PyPy JIT has less processes: run more values per process + values = values or 10 if has_jit else 3 + # Use less processes than non-JIT, because JIT requires more warmups and so + # each worker is slower + processes = processes or 6 if has_jit else 20 + self.metadata = metadata or {} # Worker task identifier: count how many times _worker() was called, # see the --worker-task command line option @@ -144,10 +131,7 @@ def __init__(self, values=None, processes=None, self._program_args = (sys.argv[0],) self._show_name = show_name - if _argparser is not None: - parser = _argparser - else: - parser = argparse.ArgumentParser() + parser = _argparser if _argparser is not None else argparse.ArgumentParser() parser.description = 'Benchmark' parser.add_argument('--rigorous', action="store_true", help='Spend longer running tests ' @@ -587,10 +571,7 @@ async def main(): # asyncio.run gained the `loop_factory` arg only in Python 3.12. # we can go back to asyncio.run when Python 3.12 is the oldest # supported version for pyperf. - if loop_factory is None: - loop = asyncio.new_event_loop() - else: - loop = loop_factory() + loop = asyncio.new_event_loop() if loop_factory is None else loop_factory() asyncio.set_event_loop(loop) try: dt = loop.run_until_complete(main()) @@ -639,9 +620,8 @@ def _display_result(self, bench, checks=True): if args.pipe is not None: wpipe = WritePipe.from_subprocess(args.pipe) - with wpipe.open_text() as wfile: - with catch_broken_pipe_error(wfile): - bench.dump(wfile) + with wpipe.open_text() as wfile, catch_broken_pipe_error(wfile): + bench.dump(wfile) else: lines = format_benchmark(bench, checks=checks, diff --git a/pyperf/_system.py b/pyperf/_system.py index 15aa6c8b..9c80302a 100644 --- a/pyperf/_system.py +++ b/pyperf/_system.py @@ -74,7 +74,7 @@ def use_intel_pstate(): return (scaling_driver == 'intel_pstate') -class Operation(object): +class Operation: @staticmethod def available(): return True @@ -107,7 +107,7 @@ def check_permission_error(self, exc): def read_first_line(self, path): try: return read_first_line(path, error=True) - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) return '' @@ -219,7 +219,7 @@ def write_msr(self, cpu, reg_num, value): os.write(fd, data) finally: os.close(fd) - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) self.error("Failed to write %#x into MSR %#x using %s: %s" % (value, reg_num, path, exc)) @@ -233,10 +233,7 @@ def write_cpu(self, cpu, enabled): return False mask = (1 << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT) - if not enabled: - new_value = value | mask - else: - new_value = value & ~mask + new_value = value | mask if not enabled else value & ~mask if new_value == value: return True @@ -251,10 +248,7 @@ def write_cpu(self, cpu, enabled): def write(self, tune): enabled = (not tune) - if tune: - cpus = self.system.cpus - else: - cpus = range(self.system.logical_cpu_count) + cpus = self.system.cpus if tune else range(self.system.logical_cpu_count) for cpu in cpus: if not self.write_cpu(cpu, enabled): @@ -308,7 +302,7 @@ def write(self, tune): content = '0' if enable else '1' try: write_text(self.path, content) - except IOError as exc: + except OSError as exc: # don't log a permission error if the user is root: permission # error as root means that Turbo Boost is disabled in the BIOS if not is_root(): @@ -376,7 +370,7 @@ def write(self, tune): return try: write_text(self.path, new_governor) - except IOError as exc: + except OSError as exc: self.error("Failed to set the CPU scaling governor: %s" % exc) else: self.log_action("CPU scaling governor set to %s" % new_governor) @@ -495,7 +489,7 @@ def write(self, tune): try: write_text(self.path, new_value) - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) self.error("Failed to write into %s: %s" % (self.path, exc)) else: @@ -552,7 +546,7 @@ def read_freq(self, filename): try: with open(filename, "rb") as fp: return fp.readline() - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) return None @@ -579,7 +573,7 @@ def write_cpu(self, cpu, tune): filename = os.path.join(cpu_path, "scaling_min_freq") try: return self.write_freq(filename, freq) - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) self.error("Unable to write scaling_max_freq of CPU %s: %s" % (cpu, exc)) @@ -768,7 +762,7 @@ def write_default(self, new_affinity): mask = format_cpus_as_mask(new_affinity) try: write_text(self.default_affinity_path, mask) - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) self.error("Failed to write %r into %s: %s" % (mask, self.default_affinity_path, exc)) @@ -782,7 +776,7 @@ def write_irq(self, irq, cpus): try: write_text(path, mask) return True - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) # EIO means that the IRQ doesn't support SMP affinity: # ignore the error @@ -926,10 +920,7 @@ def show(self): self.advice("Set max sample rate to %s" % self.BENCHMARK_RATE) def write(self, tune): - if tune: - new_rate = self.BENCHMARK_RATE - else: - new_rate = 100000 + new_rate = self.BENCHMARK_RATE if tune else 100000 max_sample_rate = self.read_max_sample_rate() if max_sample_rate == new_rate: @@ -937,7 +928,7 @@ def write(self, tune): try: write_text(self.path, str(new_rate)) - except IOError as exc: + except OSError as exc: self.check_permission_error(exc) self.error("Failed to write into %s: %s" % (self.path, exc)) else: diff --git a/pyperf/_timeit.py b/pyperf/_timeit.py index 64da6a51..a10c8a32 100644 --- a/pyperf/_timeit.py +++ b/pyperf/_timeit.py @@ -85,10 +85,7 @@ def __init__(self, stmt="pass", setup="pass", teardown="pass", else: raise ValueError("teardown is neither a string nor callable") - if PYPY: - template = PYPY_TEMPLATE - else: - template = TEMPLATE + template = PYPY_TEMPLATE if PYPY else TEMPLATE src = template.format(stmt=stmt, setup=setup, init=init, teardown=teardown) self.src = src # Save for traceback display diff --git a/pyperf/_utils.py b/pyperf/_utils.py index 87bb1a7f..051d9390 100644 --- a/pyperf/_utils.py +++ b/pyperf/_utils.py @@ -155,7 +155,7 @@ def read_first_line(path, error=False): with open_text(path) as fp: line = fp.readline() return line.rstrip() - except IOError: + except OSError: if error: raise else: @@ -193,9 +193,7 @@ def python_has_jit(): implementation_name = python_implementation() if implementation_name == 'pypy': return sys.pypy_translation_info["translation.jit"] - elif implementation_name in ['graalpython', 'graalpy']: - return True - elif hasattr(sys, "pyston_version_info") or "pyston_lite" in sys.modules: + elif implementation_name in ['graalpython', 'graalpy'] or (hasattr(sys, "pyston_version_info") or "pyston_lite" in sys.modules): return True return False @@ -282,7 +280,7 @@ def create_environ(inherit_environ, locale, copy_all): return env -class _Pipe(object): +class _Pipe: _OPEN_MODE = "r" def __init__(self, fd): @@ -333,10 +331,7 @@ def to_subprocess(self): @classmethod def from_subprocess(cls, arg): arg = int(arg) - if MS_WINDOWS: - fd = msvcrt.open_osfhandle(arg, os.O_WRONLY) - else: - fd = arg + fd = msvcrt.open_osfhandle(arg, os.O_WRONLY) if MS_WINDOWS else arg return cls(fd) def open_text(self): diff --git a/pyperf/_worker.py b/pyperf/_worker.py index 5a5e9896..cbb3fd9c 100644 --- a/pyperf/_worker.py +++ b/pyperf/_worker.py @@ -51,10 +51,7 @@ def _compute_values(self, values, nvalue, if self.loops <= 0: raise ValueError("loops must be >= 1") - if is_warmup: - value_name = 'Warmup' - else: - value_name = 'Value' + value_name = "Warmup" if is_warmup else "Value" task_func = self.task_func @@ -200,10 +197,7 @@ def calibrate_warmups(self): if self.loops < 1: raise ValueError("loops must be >= 1") - if self.args.recalibrate_warmups: - nwarmup = self.args.warmups - else: - nwarmup = 1 + nwarmup = self.args.warmups if self.args.recalibrate_warmups else 1 unit = self.metadata.get('unit') start = 0 @@ -242,10 +236,7 @@ def calibrate_loops(self): if not args.recalibrate_loops: self.loops = 1 - if args.warmups is not None: - nvalue = args.warmups - else: - nvalue = 1 + nvalue = args.warmups if args.warmups is not None else 1 nvalue += args.values self._compute_values(self.warmups, nvalue, is_warmup=True, diff --git a/pyperf/tests/__init__.py b/pyperf/tests/__init__.py index 2b3dc8f5..8ce1bdbe 100644 --- a/pyperf/tests/__init__.py +++ b/pyperf/tests/__init__.py @@ -55,7 +55,7 @@ def temporary_directory(): def benchmark_as_json(benchmark, compact=True): with temporary_file() as tmp_name: benchmark.dump(tmp_name, compact=compact) - with io.open(tmp_name, 'r', encoding='utf-8') as tmp: + with open(tmp_name, 'r', encoding='utf-8') as tmp: return tmp.read() diff --git a/pyperf/tests/replay.py b/pyperf/tests/replay.py index 8c5dd84d..fb9aea8f 100644 --- a/pyperf/tests/replay.py +++ b/pyperf/tests/replay.py @@ -14,7 +14,7 @@ def get_raw_values(filename, run_id): return (run, raw_values) -class Replay(object): +class Replay: def __init__(self, runner, filename): self.runner = runner self.args = runner.args diff --git a/pyperf/tests/test_metadata.py b/pyperf/tests/test_metadata.py index 4b3573bb..58a2481a 100644 --- a/pyperf/tests/test_metadata.py +++ b/pyperf/tests/test_metadata.py @@ -159,7 +159,7 @@ def mock_open(filename, *args, **kw): elif filename.startswith('/sys/devices/system/cpu/nohz_full'): data = nohz_full elif filename.startswith('/sys/devices/system/cpu/cpu2'): - raise IOError + raise OSError elif filename == '/sys/devices/system/cpu/cpuidle/current_driver': data = 'IDLE_DRV\n' else: @@ -194,7 +194,7 @@ def mock_open(filename, *args, **kw): elif filename == '/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor': data = 'GOVERNOR\n' elif filename.startswith('/sys/devices/system/cpu/cpu2'): - raise IOError + raise OSError else: raise ValueError("unexpect open: %r" % filename) return io.StringIO(data) diff --git a/pyperf/tests/test_perf_cli.py b/pyperf/tests/test_perf_cli.py index 2a19728b..b911f425 100644 --- a/pyperf/tests/test_perf_cli.py +++ b/pyperf/tests/test_perf_cli.py @@ -11,7 +11,7 @@ TELCO = os.path.join(TESTDIR, 'telco.json') -class BaseTestCase(object): +class BaseTestCase: maxDiff = 100 * 80 def create_bench(self, values, metadata=None): diff --git a/pyperf/tests/test_runner.py b/pyperf/tests/test_runner.py index afc66d8d..a0a7012d 100644 --- a/pyperf/tests/test_runner.py +++ b/pyperf/tests/test_runner.py @@ -249,9 +249,8 @@ def test_calibration_zero(self): def time_func(loops): return 0 - with self.assertRaises(SystemExit): - with tests.capture_stdout() as stdout: - runner.bench_time_func('bench', time_func) + with self.assertRaises(SystemExit), tests.capture_stdout() as stdout: + runner.bench_time_func('bench', time_func) self.assertIn('ERROR: failed to calibrate the number of loops', stdout.getvalue())