diff --git a/package/MDAnalysis/lib/NeighborSearch.py b/package/MDAnalysis/lib/NeighborSearch.py
index d09284773e..b1f5fa7185 100644
--- a/package/MDAnalysis/lib/NeighborSearch.py
+++ b/package/MDAnalysis/lib/NeighborSearch.py
@@ -44,8 +44,9 @@ class AtomNeighborSearch(object):
     :class:`~MDAnalysis.lib.distances.capped_distance`.
     """
 
-    def __init__(self, atom_group: AtomGroup,
-                 box: Optional[npt.ArrayLike] = None) -> None:
+    def __init__(
+        self, atom_group: AtomGroup, box: Optional[npt.ArrayLike] = None
+    ) -> None:
         """
 
         Parameters
@@ -62,10 +63,9 @@ def __init__(self, atom_group: AtomGroup,
         self._u = atom_group.universe
         self._box = box
 
-    def search(self, atoms: AtomGroup,
-               radius: float,
-               level: str = 'A'
-               ) -> Optional[Union[AtomGroup, ResidueGroup, SegmentGroup]]:
+    def search(
+        self, atoms: AtomGroup, radius: float, level: str = "A"
+    ) -> Optional[Union[AtomGroup, ResidueGroup, SegmentGroup]]:
         """
         Return all atoms/residues/segments that are within *radius* of the
         atoms in *atoms*.
@@ -102,17 +102,21 @@ def search(self, atoms: AtomGroup,
         except AttributeError:
             # For atom, take the position attribute
             position = atoms.position
-        pairs = capped_distance(position, self.atom_group.positions,
-                                radius, box=self._box, return_distances=False)
+        pairs = capped_distance(
+            position,
+            self.atom_group.positions,
+            radius,
+            box=self._box,
+            return_distances=False,
+        )
 
         if pairs.size > 0:
             unique_idx = unique_int_1d(np.asarray(pairs[:, 1], dtype=np.intp))
         return self._index2level(unique_idx, level)
 
-    def _index2level(self,
-                     indices: List[int],
-                     level: str
-                     ) -> Union[AtomGroup, ResidueGroup, SegmentGroup]:
+    def _index2level(
+        self, indices: List[int], level: str
+    ) -> Union[AtomGroup, ResidueGroup, SegmentGroup]:
         """Convert list of atom_indices in a AtomGroup to either the
         Atoms or segments/residues containing these atoms.
 
@@ -125,11 +129,13 @@ def _index2level(self,
           *radius* of *atoms*.
         """
         atomgroup = self.atom_group[indices]
-        if level == 'A':
+        if level == "A":
             return atomgroup
-        elif level == 'R':
+        elif level == "R":
             return atomgroup.residues
-        elif level == 'S':
+        elif level == "S":
             return atomgroup.segments
         else:
-            raise NotImplementedError('{0}: level not implemented'.format(level))
+            raise NotImplementedError(
+                "{0}: level not implemented".format(level)
+            )
diff --git a/package/MDAnalysis/lib/__init__.py b/package/MDAnalysis/lib/__init__.py
index a5bc6f8e87..cba6900d5b 100644
--- a/package/MDAnalysis/lib/__init__.py
+++ b/package/MDAnalysis/lib/__init__.py
@@ -27,8 +27,17 @@
 ================================================================
 """
 
-__all__ = ['log', 'transformations', 'util', 'mdamath', 'distances',
-           'NeighborSearch', 'formats', 'pkdtree', 'nsgrid']
+__all__ = [
+    "log",
+    "transformations",
+    "util",
+    "mdamath",
+    "distances",
+    "NeighborSearch",
+    "formats",
+    "pkdtree",
+    "nsgrid",
+]
 
 from . import log
 from . import transformations
@@ -39,6 +48,8 @@
 from . import formats
 from . import pkdtree
 from . import nsgrid
-from .picklable_file_io import (FileIOPicklable,
-                                BufferIOPicklable,
-                                TextIOPicklable)
+from .picklable_file_io import (
+    FileIOPicklable,
+    BufferIOPicklable,
+    TextIOPicklable,
+)
diff --git a/package/MDAnalysis/lib/_distopia.py b/package/MDAnalysis/lib/_distopia.py
index c2564bc2d2..297ce4a3b5 100644
--- a/package/MDAnalysis/lib/_distopia.py
+++ b/package/MDAnalysis/lib/_distopia.py
@@ -39,13 +39,15 @@
 
     # check for compatibility: currently needs to be >=0.2.0,<0.3.0 (issue
     # #4740) No distopia.__version__ available so we have to do some probing.
-    needed_funcs = ['calc_bonds_no_box_float', 'calc_bonds_ortho_float']
+    needed_funcs = ["calc_bonds_no_box_float", "calc_bonds_ortho_float"]
     has_distopia_020 = all([hasattr(distopia, func) for func in needed_funcs])
     if not has_distopia_020:
-        warnings.warn("Install 'distopia>=0.2.0,<0.3.0' to be used with this "
-                      "release of MDAnalysis. Your installed version of "
-                      "distopia >=0.3.0 will NOT be used.",
-                      category=RuntimeWarning)
+        warnings.warn(
+            "Install 'distopia>=0.2.0,<0.3.0' to be used with this "
+            "release of MDAnalysis. Your installed version of "
+            "distopia >=0.3.0 will NOT be used.",
+            category=RuntimeWarning,
+        )
         del distopia
         HAS_DISTOPIA = False
 
@@ -59,23 +61,22 @@
 def calc_bond_distance_ortho(
     coords1, coords2: np.ndarray, box: np.ndarray, results: np.ndarray
 ) -> None:
-    distopia.calc_bonds_ortho_float(
-        coords1, coords2, box[:3], results=results
-    )
+    distopia.calc_bonds_ortho_float(coords1, coords2, box[:3], results=results)
     # upcast is currently required, change for 3.0, see #3927
 
 
 def calc_bond_distance(
     coords1: np.ndarray, coords2: np.ndarray, results: np.ndarray
 ) -> None:
-    distopia.calc_bonds_no_box_float(
-        coords1, coords2, results=results
-    )
+    distopia.calc_bonds_no_box_float(coords1, coords2, results=results)
     # upcast is currently required, change for 3.0, see #3927
 
 
 def calc_bond_distance_triclinic(
-    coords1: np.ndarray, coords2: np.ndarray, box: np.ndarray, results: np.ndarray
+    coords1: np.ndarray,
+    coords2: np.ndarray,
+    box: np.ndarray,
+    results: np.ndarray,
 ) -> None:
     # redirect to serial backend
     warnings.warn(
diff --git a/package/MDAnalysis/lib/correlations.py b/package/MDAnalysis/lib/correlations.py
index 1ce0338c67..af14df99fd 100644
--- a/package/MDAnalysis/lib/correlations.py
+++ b/package/MDAnalysis/lib/correlations.py
@@ -135,12 +135,18 @@ def autocorrelation(list_of_sets, tau_max, window_step=1):
     """
 
     # check types
-    if (type(list_of_sets) != list and len(list_of_sets) != 0) or type(list_of_sets[0]) != set:
-        raise TypeError("list_of_sets must be a one-dimensional list of sets")  # pragma: no cover
+    if (type(list_of_sets) != list and len(list_of_sets) != 0) or type(
+        list_of_sets[0]
+    ) != set:
+        raise TypeError(
+            "list_of_sets must be a one-dimensional list of sets"
+        )  # pragma: no cover
 
     # Check dimensions of parameters
     if len(list_of_sets) < tau_max:
-        raise ValueError("tau_max cannot be greater than the length of list_of_sets") # pragma: no cover
+        raise ValueError(
+            "tau_max cannot be greater than the length of list_of_sets"
+        )  # pragma: no cover
 
     tau_timeseries = list(range(1, tau_max + 1))
     timeseries_data = [[] for _ in range(tau_max)]
@@ -157,7 +163,7 @@ def autocorrelation(list_of_sets, tau_max, window_step=1):
                 break
 
             # continuous: IDs that survive from t to t + tau and at every frame in between
-            Ntau = len(set.intersection(*list_of_sets[t:t + tau + 1]))
+            Ntau = len(set.intersection(*list_of_sets[t : t + tau + 1]))
             timeseries_data[tau - 1].append(Ntau / float(Nt))
 
     timeseries = [np.mean(x) for x in timeseries_data]
@@ -257,4 +263,3 @@ def correct_intermittency(list_of_sets, intermittency):
 
                 seen_frames_ago[element] = 0
     return list_of_sets
-
diff --git a/package/MDAnalysis/lib/distances.py b/package/MDAnalysis/lib/distances.py
index a6c30abacd..524b9f4063 100644
--- a/package/MDAnalysis/lib/distances.py
+++ b/package/MDAnalysis/lib/distances.py
@@ -123,6 +123,7 @@
 
 from typing import Union, Optional, Callable
 from typing import TYPE_CHECKING
+
 if TYPE_CHECKING:  # pragma: no cover
     from ..core.groups import AtomGroup
 from .util import check_coords, check_box
@@ -137,22 +138,31 @@
 # the cython parallel code (prange) in parallel.distances is
 # independent from the OpenMP code
 import importlib
+
 _distances = {}
-_distances['serial'] = importlib.import_module(".c_distances",
-                                         package="MDAnalysis.lib")
+_distances["serial"] = importlib.import_module(
+    ".c_distances", package="MDAnalysis.lib"
+)
 try:
-    _distances['openmp'] = importlib.import_module(".c_distances_openmp",
-                                          package="MDAnalysis.lib")
+    _distances["openmp"] = importlib.import_module(
+        ".c_distances_openmp", package="MDAnalysis.lib"
+    )
 except ImportError:
     pass
 
 if HAS_DISTOPIA:
-    _distances["distopia"] = importlib.import_module("._distopia",
-                             package="MDAnalysis.lib")
+    _distances["distopia"] = importlib.import_module(
+        "._distopia", package="MDAnalysis.lib"
+    )
 del importlib
 
-def _run(funcname: str, args: Optional[tuple] = None,
-         kwargs: Optional[dict] = None, backend: str = "serial") -> Callable:
+
+def _run(
+    funcname: str,
+    args: Optional[tuple] = None,
+    kwargs: Optional[dict] = None,
+    backend: str = "serial",
+) -> Callable:
     """Helper function to select a backend function `funcname`."""
     args = args if args is not None else tuple()
     kwargs = kwargs if kwargs is not None else dict()
@@ -160,38 +170,44 @@ def _run(funcname: str, args: Optional[tuple] = None,
     try:
         func = getattr(_distances[backend], funcname)
     except KeyError:
-        errmsg = (f"Function {funcname} not available with backend {backend} "
-                  f"try one of: {_distances.keys()}")
+        errmsg = (
+            f"Function {funcname} not available with backend {backend} "
+            f"try one of: {_distances.keys()}"
+        )
         raise ValueError(errmsg) from None
     return func(*args, **kwargs)
 
+
 # serial versions are always available (and are typically used within
 # the core and topology modules)
-from .c_distances import (_UINT64_MAX,
-                          calc_distance_array,
-                          calc_distance_array_ortho,
-                          calc_distance_array_triclinic,
-                          calc_self_distance_array,
-                          calc_self_distance_array_ortho,
-                          calc_self_distance_array_triclinic,
-                          coord_transform,
-                          calc_bond_distance,
-                          calc_bond_distance_ortho,
-                          calc_bond_distance_triclinic,
-                          calc_angle,
-                          calc_angle_ortho,
-                          calc_angle_triclinic,
-                          calc_dihedral,
-                          calc_dihedral_ortho,
-                          calc_dihedral_triclinic,
-                          ortho_pbc,
-                          triclinic_pbc)
+from .c_distances import (
+    _UINT64_MAX,
+    calc_distance_array,
+    calc_distance_array_ortho,
+    calc_distance_array_triclinic,
+    calc_self_distance_array,
+    calc_self_distance_array_ortho,
+    calc_self_distance_array_triclinic,
+    coord_transform,
+    calc_bond_distance,
+    calc_bond_distance_ortho,
+    calc_bond_distance_triclinic,
+    calc_angle,
+    calc_angle_ortho,
+    calc_angle_triclinic,
+    calc_dihedral,
+    calc_dihedral_ortho,
+    calc_dihedral_triclinic,
+    ortho_pbc,
+    triclinic_pbc,
+)
 
 from .c_distances_openmp import OPENMP_ENABLED as USED_OPENMP
 
 
-def _check_result_array(result: Optional[npt.NDArray],
-                        shape: tuple) -> npt.NDArray:
+def _check_result_array(
+    result: Optional[npt.NDArray], shape: tuple
+) -> npt.NDArray:
     """Check if the result array is ok to use.
 
     The `result` array must meet the following requirements:
@@ -221,24 +237,35 @@ def _check_result_array(result: Optional[npt.NDArray],
     if result is None:
         return np.zeros(shape, dtype=np.float64)
     if result.shape != shape:
-        raise ValueError("Result array has incorrect shape, should be {0}, got "
-                         "{1}.".format(shape, result.shape))
+        raise ValueError(
+            "Result array has incorrect shape, should be {0}, got "
+            "{1}.".format(shape, result.shape)
+        )
     if result.dtype != np.float64:
-        raise TypeError("Result array must be of type numpy.float64, got {}."
-                        "".format(result.dtype))
-# The following two lines would break a lot of tests. WHY?!
-#    if not coords.flags['C_CONTIGUOUS']:
-#        raise ValueError("{0} is not C-contiguous.".format(desc))
+        raise TypeError(
+            "Result array must be of type numpy.float64, got {}."
+            "".format(result.dtype)
+        )
+    # The following two lines would break a lot of tests. WHY?!
+    #    if not coords.flags['C_CONTIGUOUS']:
+    #        raise ValueError("{0} is not C-contiguous.".format(desc))
     return result
 
 
-@check_coords('reference', 'configuration', reduce_result_if_single=False,
-              check_lengths_match=False, allow_atomgroup=True)
-def distance_array(reference: Union[npt.NDArray, 'AtomGroup'],
-                   configuration: Union[npt.NDArray, 'AtomGroup'],
-                   box: Optional[npt.NDArray] = None,
-                   result: Optional[npt.NDArray] = None,
-                   backend: str = "serial") -> npt.NDArray:
+@check_coords(
+    "reference",
+    "configuration",
+    reduce_result_if_single=False,
+    check_lengths_match=False,
+    allow_atomgroup=True,
+)
+def distance_array(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    configuration: Union[npt.NDArray, "AtomGroup"],
+    box: Optional[npt.NDArray] = None,
+    result: Optional[npt.NDArray] = None,
+    backend: str = "serial",
+) -> npt.NDArray:
     """Calculate all possible distances between a reference set and another
     configuration.
 
@@ -297,35 +324,45 @@ def distance_array(reference: Union[npt.NDArray, 'AtomGroup'],
 
     # check resulting array will not overflow UINT64_MAX
     if refnum * confnum > _UINT64_MAX:
-        raise ValueError(f"Size of resulting array {refnum * confnum} elements"
-                         " larger than size of maximum integer")
+        raise ValueError(
+            f"Size of resulting array {refnum * confnum} elements"
+            " larger than size of maximum integer"
+        )
 
     distances = _check_result_array(result, (refnum, confnum))
     if len(distances) == 0:
         return distances
     if box is not None:
         boxtype, box = check_box(box)
-        if boxtype == 'ortho':
-            _run("calc_distance_array_ortho",
-                 args=(reference, configuration, box, distances),
-                 backend=backend)
+        if boxtype == "ortho":
+            _run(
+                "calc_distance_array_ortho",
+                args=(reference, configuration, box, distances),
+                backend=backend,
+            )
         else:
-            _run("calc_distance_array_triclinic",
-                 args=(reference, configuration, box, distances),
-                 backend=backend)
+            _run(
+                "calc_distance_array_triclinic",
+                args=(reference, configuration, box, distances),
+                backend=backend,
+            )
     else:
-        _run("calc_distance_array",
-             args=(reference, configuration, distances),
-             backend=backend)
+        _run(
+            "calc_distance_array",
+            args=(reference, configuration, distances),
+            backend=backend,
+        )
 
     return distances
 
 
-@check_coords('reference', reduce_result_if_single=False, allow_atomgroup=True)
-def self_distance_array(reference: Union[npt.NDArray, 'AtomGroup'],
-                        box: Optional[npt.NDArray] = None,
-                        result: Optional[npt.NDArray] = None,
-                        backend: str = "serial") -> npt.NDArray:
+@check_coords("reference", reduce_result_if_single=False, allow_atomgroup=True)
+def self_distance_array(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    box: Optional[npt.NDArray] = None,
+    result: Optional[npt.NDArray] = None,
+    backend: str = "serial",
+) -> npt.NDArray:
     """Calculate all possible distances within a configuration `reference`.
 
     If the optional argument `box` is supplied, the minimum image convention is
@@ -380,39 +417,55 @@ def self_distance_array(reference: Union[npt.NDArray, 'AtomGroup'],
     distnum = refnum * (refnum - 1) // 2
     # check resulting array will not overflow UINT64_MAX
     if distnum > _UINT64_MAX:
-        raise ValueError(f"Size of resulting array {distnum} elements larger"
-                         " than size of maximum integer")
+        raise ValueError(
+            f"Size of resulting array {distnum} elements larger"
+            " than size of maximum integer"
+        )
 
     distances = _check_result_array(result, (distnum,))
     if len(distances) == 0:
         return distances
     if box is not None:
         boxtype, box = check_box(box)
-        if boxtype == 'ortho':
-            _run("calc_self_distance_array_ortho",
-                 args=(reference, box, distances),
-                 backend=backend)
+        if boxtype == "ortho":
+            _run(
+                "calc_self_distance_array_ortho",
+                args=(reference, box, distances),
+                backend=backend,
+            )
         else:
-            _run("calc_self_distance_array_triclinic",
-                 args=(reference, box, distances),
-                 backend=backend)
+            _run(
+                "calc_self_distance_array_triclinic",
+                args=(reference, box, distances),
+                backend=backend,
+            )
     else:
-        _run("calc_self_distance_array",
-             args=(reference, distances),
-             backend=backend)
+        _run(
+            "calc_self_distance_array",
+            args=(reference, distances),
+            backend=backend,
+        )
 
     return distances
 
 
-@check_coords('reference', 'configuration', enforce_copy=False,
-              reduce_result_if_single=False, check_lengths_match=False,
-              allow_atomgroup=True)
-def capped_distance(reference: Union[npt.NDArray, 'AtomGroup'],
-                    configuration: Union[npt.NDArray, 'AtomGroup'],
-                    max_cutoff: float, min_cutoff: Optional[float] = None,
-                    box: Optional[npt.NDArray] = None,
-                    method: Optional[str] = None,
-                    return_distances: Optional[bool] = True):
+@check_coords(
+    "reference",
+    "configuration",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    check_lengths_match=False,
+    allow_atomgroup=True,
+)
+def capped_distance(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    configuration: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    method: Optional[str] = None,
+    return_distances: Optional[bool] = True,
+):
     """Calculates pairs of indices corresponding to entries in the `reference`
     and `configuration` arrays which are separated by a distance lying within
     the specified cutoff(s). Optionally, these distances can be returned as
@@ -496,27 +549,43 @@ def capped_distance(reference: Union[npt.NDArray, 'AtomGroup'],
     if box is not None:
         box = np.asarray(box, dtype=np.float32)
         if box.shape[0] != 6:
-            raise ValueError("Box Argument is of incompatible type. The "
-                             "dimension should be either None or of the form "
-                             "[lx, ly, lz, alpha, beta, gamma]")
+            raise ValueError(
+                "Box Argument is of incompatible type. The "
+                "dimension should be either None or of the form "
+                "[lx, ly, lz, alpha, beta, gamma]"
+            )
 
     # The check_coords decorator made sure that reference and configuration
     # are arrays of positions. Mypy does not know about that so we have to
     # tell it.
     reference_positions: npt.NDArray = reference  # type: ignore
     configuration_positions: npt.NDArray = configuration  # type: ignore
-    function = _determine_method(reference_positions, configuration_positions,
-                                 max_cutoff, min_cutoff=min_cutoff,
-                                 box=box, method=method)
-    return function(reference, configuration,
-                    max_cutoff, min_cutoff=min_cutoff,
-                    box=box, return_distances=return_distances)
-
-
-def _determine_method(reference: npt.NDArray, configuration: npt.NDArray,
-                      max_cutoff: float, min_cutoff: Optional[float] = None,
-                      box: Optional[npt.NDArray] = None,
-                      method: Optional[str] = None) -> Callable:
+    function = _determine_method(
+        reference_positions,
+        configuration_positions,
+        max_cutoff,
+        min_cutoff=min_cutoff,
+        box=box,
+        method=method,
+    )
+    return function(
+        reference,
+        configuration,
+        max_cutoff,
+        min_cutoff=min_cutoff,
+        box=box,
+        return_distances=return_distances,
+    )
+
+
+def _determine_method(
+    reference: npt.NDArray,
+    configuration: npt.NDArray,
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    method: Optional[str] = None,
+) -> Callable:
     """Guesses the fastest method for capped distance calculations based on the
     size of the coordinate sets and the relative size of the target volume.
 
@@ -554,46 +623,57 @@ def _determine_method(reference: npt.NDArray, configuration: npt.NDArray,
     .. versionchanged:: 1.1.0
        enabled nsgrid again
     """
-    methods = {'bruteforce': _bruteforce_capped,
-               'pkdtree': _pkdtree_capped,
-               'nsgrid': _nsgrid_capped,
+    methods = {
+        "bruteforce": _bruteforce_capped,
+        "pkdtree": _pkdtree_capped,
+        "nsgrid": _nsgrid_capped,
     }
 
     if method is not None:
         return methods[method.lower()]
 
     if len(reference) < 10 or len(configuration) < 10:
-        return methods['bruteforce']
+        return methods["bruteforce"]
     elif len(reference) * len(configuration) >= 1e8:
         # CAUTION : for large datasets, shouldnt go into 'bruteforce'
         # in any case. Arbitrary number, but can be characterized
-        return methods['nsgrid']
+        return methods["nsgrid"]
     else:
         if box is None:
-            min_dim = np.array([reference.min(axis=0),
-                                configuration.min(axis=0)])
-            max_dim = np.array([reference.max(axis=0),
-                                configuration.max(axis=0)])
+            min_dim = np.array(
+                [reference.min(axis=0), configuration.min(axis=0)]
+            )
+            max_dim = np.array(
+                [reference.max(axis=0), configuration.max(axis=0)]
+            )
             size = max_dim.max(axis=0) - min_dim.min(axis=0)
         elif np.all(box[3:] == 90.0):
             size = box[:3]
         else:
             tribox = triclinic_vectors(box)
             size = tribox.max(axis=0) - tribox.min(axis=0)
-        if np.any(max_cutoff > 0.3*size):
-            return methods['bruteforce']
+        if np.any(max_cutoff > 0.3 * size):
+            return methods["bruteforce"]
         else:
-            return methods['nsgrid']
-
-
-@check_coords('reference', 'configuration', enforce_copy=False,
-              reduce_result_if_single=False, check_lengths_match=False,
-              allow_atomgroup=True)
-def _bruteforce_capped(reference: Union[npt.NDArray, 'AtomGroup'],
-                       configuration: Union[npt.NDArray, 'AtomGroup'],
-                       max_cutoff: float, min_cutoff: Optional[float] = None,
-                       box: Optional[npt.NDArray] = None,
-                       return_distances: Optional[bool] = True):
+            return methods["nsgrid"]
+
+
+@check_coords(
+    "reference",
+    "configuration",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    check_lengths_match=False,
+    allow_atomgroup=True,
+)
+def _bruteforce_capped(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    configuration: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    return_distances: Optional[bool] = True,
+):
     """Capped distance evaluations using a brute force method.
 
     Computes and returns an array containing pairs of indices corresponding to
@@ -658,8 +738,9 @@ def _bruteforce_capped(reference: Union[npt.NDArray, 'AtomGroup'],
     if len(reference) > 0 and len(configuration) > 0:
         _distances = distance_array(reference, configuration, box=box)
         if min_cutoff is not None:
-            mask = np.where((_distances <= max_cutoff) & \
-                            (_distances > min_cutoff))
+            mask = np.where(
+                (_distances <= max_cutoff) & (_distances > min_cutoff)
+            )
         else:
             mask = np.where((_distances <= max_cutoff))
         if mask[0].size > 0:
@@ -673,14 +754,22 @@ def _bruteforce_capped(reference: Union[npt.NDArray, 'AtomGroup'],
         return pairs
 
 
-@check_coords('reference', 'configuration', enforce_copy=False,
-              reduce_result_if_single=False, check_lengths_match=False,
-              allow_atomgroup=True)
-def _pkdtree_capped(reference: Union[npt.NDArray, 'AtomGroup'],
-                    configuration: Union[npt.NDArray, 'AtomGroup'],
-                    max_cutoff: float, min_cutoff: Optional[float] = None,
-                    box: Optional[npt.NDArray] = None,
-                    return_distances: Optional[bool] = True):
+@check_coords(
+    "reference",
+    "configuration",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    check_lengths_match=False,
+    allow_atomgroup=True,
+)
+def _pkdtree_capped(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    configuration: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    return_distances: Optional[bool] = True,
+):
     """Capped distance evaluations using a KDtree method.
 
     Computes and returns an array containing pairs of indices corresponding to
@@ -738,7 +827,9 @@ def _pkdtree_capped(reference: Union[npt.NDArray, 'AtomGroup'],
        Can now accept an :class:`~MDAnalysis.core.groups.AtomGroup` as an
        argument in any position and checks inputs using type hinting.
     """
-    from .pkdtree import PeriodicKDTree  # must be here to avoid circular import
+    from .pkdtree import (
+        PeriodicKDTree,
+    )  # must be here to avoid circular import
 
     # Default return values (will be overwritten only if pairs are found):
     pairs = np.empty((0, 2), dtype=np.intp)
@@ -751,10 +842,11 @@ def _pkdtree_capped(reference: Union[npt.NDArray, 'AtomGroup'],
         _pairs = kdtree.search_tree(reference, max_cutoff)
         if _pairs.size > 0:
             pairs = _pairs
-            if (return_distances or (min_cutoff is not None)):
+            if return_distances or (min_cutoff is not None):
                 refA, refB = pairs[:, 0], pairs[:, 1]
-                distances = calc_bonds(reference[refA], configuration[refB],
-                                       box=box)
+                distances = calc_bonds(
+                    reference[refA], configuration[refB], box=box
+                )
                 if min_cutoff is not None:
                     mask = np.where(distances > min_cutoff)
                     pairs, distances = pairs[mask], distances[mask]
@@ -765,14 +857,22 @@ def _pkdtree_capped(reference: Union[npt.NDArray, 'AtomGroup'],
         return pairs
 
 
-@check_coords('reference', 'configuration', enforce_copy=False,
-              reduce_result_if_single=False, check_lengths_match=False,
-              allow_atomgroup=True)
-def _nsgrid_capped(reference: Union[npt.NDArray, 'AtomGroup'],
-                   configuration: Union[npt.NDArray, 'AtomGroup'],
-                   max_cutoff: float, min_cutoff: Optional[float] = None,
-                   box: Optional[npt.NDArray] = None,
-                   return_distances: Optional[bool] = True):
+@check_coords(
+    "reference",
+    "configuration",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    check_lengths_match=False,
+    allow_atomgroup=True,
+)
+def _nsgrid_capped(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    configuration: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    return_distances: Optional[bool] = True,
+):
     """Capped distance evaluations using a grid-based search method.
 
     Computes and returns an array containing pairs of indices corresponding to
@@ -845,17 +945,19 @@ def _nsgrid_capped(reference: Union[npt.NDArray, 'AtomGroup'],
             lmax = all_coords.max(axis=0)
             lmin = all_coords.min(axis=0)
             # Using maximum dimension as the box size
-            boxsize = (lmax-lmin).max()
+            boxsize = (lmax - lmin).max()
             # to avoid failures for very close particles but with
             # larger cutoff
             boxsize = np.maximum(boxsize, 2 * max_cutoff)
-            pseudobox[:3] = boxsize + 2.2*max_cutoff
-            pseudobox[3:] = 90.
+            pseudobox[:3] = boxsize + 2.2 * max_cutoff
+            pseudobox[3:] = 90.0
             shiftref, shiftconf = reference.copy(), configuration.copy()
             # Extra padding near the origin
-            shiftref -= lmin - 0.1*max_cutoff
-            shiftconf -= lmin - 0.1*max_cutoff
-            gridsearch = FastNS(max_cutoff, shiftconf, box=pseudobox, pbc=False)
+            shiftref -= lmin - 0.1 * max_cutoff
+            shiftconf -= lmin - 0.1 * max_cutoff
+            gridsearch = FastNS(
+                max_cutoff, shiftconf, box=pseudobox, pbc=False
+            )
             results = gridsearch.search(shiftref)
         else:
             gridsearch = FastNS(max_cutoff, configuration, box=box)
@@ -874,15 +976,21 @@ def _nsgrid_capped(reference: Union[npt.NDArray, 'AtomGroup'],
         return pairs
 
 
-@check_coords('reference', enforce_copy=False,
-              reduce_result_if_single=False, check_lengths_match=False,
-              allow_atomgroup=True)
-def self_capped_distance(reference: Union[npt.NDArray, 'AtomGroup'],
-                         max_cutoff: float,
-                         min_cutoff: Optional[float] = None,
-                         box: Optional[npt.NDArray] = None,
-                         method: Optional[str] = None,
-                         return_distances: Optional[bool] = True):
+@check_coords(
+    "reference",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    check_lengths_match=False,
+    allow_atomgroup=True,
+)
+def self_capped_distance(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    method: Optional[str] = None,
+    return_distances: Optional[bool] = True,
+):
     """Calculates pairs of indices corresponding to entries in the `reference`
     array which are separated by a distance lying within the specified
     cutoff(s). Optionally, these distances can be returned as well.
@@ -968,24 +1076,38 @@ def self_capped_distance(reference: Union[npt.NDArray, 'AtomGroup'],
     if box is not None:
         box = np.asarray(box, dtype=np.float32)
         if box.shape[0] != 6:
-            raise ValueError("Box Argument is of incompatible type. The "
-                             "dimension should be either None or of the form "
-                             "[lx, ly, lz, alpha, beta, gamma]")
+            raise ValueError(
+                "Box Argument is of incompatible type. The "
+                "dimension should be either None or of the form "
+                "[lx, ly, lz, alpha, beta, gamma]"
+            )
     # The check_coords decorator made sure that reference is an
     # array of positions. Mypy does not know about that so we have to
     # tell it.
     reference_positions: npt.NDArray = reference  # type: ignore
-    function = _determine_method_self(reference_positions,
-                                      max_cutoff, min_cutoff=min_cutoff,
-                                      box=box, method=method)
-    return function(reference,  max_cutoff, min_cutoff=min_cutoff, box=box,
-                    return_distances=return_distances)
-
-
-def _determine_method_self(reference: npt.NDArray, max_cutoff: float,
-                           min_cutoff: Optional[float] = None,
-                           box: Optional[npt.NDArray] = None,
-                           method: Optional[str] = None):
+    function = _determine_method_self(
+        reference_positions,
+        max_cutoff,
+        min_cutoff=min_cutoff,
+        box=box,
+        method=method,
+    )
+    return function(
+        reference,
+        max_cutoff,
+        min_cutoff=min_cutoff,
+        box=box,
+        return_distances=return_distances,
+    )
+
+
+def _determine_method_self(
+    reference: npt.NDArray,
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    method: Optional[str] = None,
+):
     """Guesses the fastest method for capped distance calculations based on the
     size of the `reference` coordinate set and the relative size of the target
     volume.
@@ -1020,16 +1142,17 @@ def _determine_method_self(reference: npt.NDArray, max_cutoff: float,
     .. versionchanged:: 1.0.2
        enabled nsgrid again
     """
-    methods = {'bruteforce': _bruteforce_capped_self,
-               'pkdtree': _pkdtree_capped_self,
-               'nsgrid': _nsgrid_capped_self,
+    methods = {
+        "bruteforce": _bruteforce_capped_self,
+        "pkdtree": _pkdtree_capped_self,
+        "nsgrid": _nsgrid_capped_self,
     }
 
     if method is not None:
         return methods[method.lower()]
 
     if len(reference) < 100:
-        return methods['bruteforce']
+        return methods["bruteforce"]
 
     if box is None:
         min_dim = np.array([reference.min(axis=0)])
@@ -1041,19 +1164,25 @@ def _determine_method_self(reference: npt.NDArray, max_cutoff: float,
         tribox = triclinic_vectors(box)
         size = tribox.max(axis=0) - tribox.min(axis=0)
 
-    if max_cutoff < 0.03*size.min():
-        return methods['pkdtree']
+    if max_cutoff < 0.03 * size.min():
+        return methods["pkdtree"]
     else:
-        return methods['nsgrid']
-
-
-@check_coords('reference', enforce_copy=False, reduce_result_if_single=False,
-              allow_atomgroup=True)
-def _bruteforce_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
-                            max_cutoff: float,
-                            min_cutoff: Optional[float] = None,
-                            box: Optional[npt.NDArray] = None,
-                            return_distances: Optional[bool] = True):
+        return methods["nsgrid"]
+
+
+@check_coords(
+    "reference",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    allow_atomgroup=True,
+)
+def _bruteforce_capped_self(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    return_distances: Optional[bool] = True,
+):
     """Capped distance evaluations using a brute force method.
 
     Computes and returns an array containing pairs of indices corresponding to
@@ -1130,13 +1259,19 @@ def _bruteforce_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
     return pairs
 
 
-@check_coords('reference', enforce_copy=False, reduce_result_if_single=False,
-              allow_atomgroup=True)
-def _pkdtree_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
-                         max_cutoff: float,
-                         min_cutoff: Optional[float] = None,
-                         box: Optional[npt.NDArray] = None,
-                         return_distances: Optional[bool] = True):
+@check_coords(
+    "reference",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    allow_atomgroup=True,
+)
+def _pkdtree_capped_self(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    return_distances: Optional[bool] = True,
+):
     """Capped distance evaluations using a KDtree method.
 
     Computes and returns an array containing pairs of indices corresponding to
@@ -1188,7 +1323,9 @@ def _pkdtree_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
        Can now accept an :class:`~MDAnalysis.core.groups.AtomGroup` as an
        argument in any position and checks inputs using type hinting.
     """
-    from .pkdtree import PeriodicKDTree  # must be here to avoid circular import
+    from .pkdtree import (
+        PeriodicKDTree,
+    )  # must be here to avoid circular import
 
     # Default return values (will be overwritten only if pairs are found):
     pairs = np.empty((0, 2), dtype=np.intp)
@@ -1203,9 +1340,11 @@ def _pkdtree_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
         _pairs = kdtree.search_pairs(max_cutoff)
         if _pairs.size > 0:
             pairs = _pairs
-            if (return_distances or (min_cutoff is not None)):
+            if return_distances or (min_cutoff is not None):
                 refA, refB = pairs[:, 0], pairs[:, 1]
-                distances = calc_bonds(reference[refA], reference[refB], box=box)
+                distances = calc_bonds(
+                    reference[refA], reference[refB], box=box
+                )
                 if min_cutoff is not None:
                     idx = distances > min_cutoff
                     pairs, distances = pairs[idx], distances[idx]
@@ -1214,13 +1353,19 @@ def _pkdtree_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
     return pairs
 
 
-@check_coords('reference', enforce_copy=False, reduce_result_if_single=False,
-              allow_atomgroup=True)
-def _nsgrid_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
-                        max_cutoff: float,
-                        min_cutoff: Optional[float] = None,
-                        box: Optional[npt.NDArray] = None,
-                        return_distances: Optional[bool] = True):
+@check_coords(
+    "reference",
+    enforce_copy=False,
+    reduce_result_if_single=False,
+    allow_atomgroup=True,
+)
+def _nsgrid_capped_self(
+    reference: Union[npt.NDArray, "AtomGroup"],
+    max_cutoff: float,
+    min_cutoff: Optional[float] = None,
+    box: Optional[npt.NDArray] = None,
+    return_distances: Optional[bool] = True,
+):
     """Capped distance evaluations using a grid-based search method.
 
     Computes and returns an array containing pairs of indices corresponding to
@@ -1286,19 +1431,19 @@ def _nsgrid_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
             lmax = reference.max(axis=0)
             lmin = reference.min(axis=0)
             # Using maximum dimension as the box size
-            boxsize = (lmax-lmin).max()
+            boxsize = (lmax - lmin).max()
             # to avoid failures of very close particles
             # but with larger cutoff
-            if boxsize < 2*max_cutoff:
+            if boxsize < 2 * max_cutoff:
                 # just enough box size so that NSGrid doesnot fails
-                sizefactor = 2.2*max_cutoff/boxsize
+                sizefactor = 2.2 * max_cutoff / boxsize
             else:
                 sizefactor = 1.2
-            pseudobox[:3] = sizefactor*boxsize
-            pseudobox[3:] = 90.
+            pseudobox[:3] = sizefactor * boxsize
+            pseudobox[3:] = 90.0
             shiftref = reference.copy()
             # Extra padding near the origin
-            shiftref -= lmin - 0.1*boxsize
+            shiftref -= lmin - 0.1 * boxsize
             gridsearch = FastNS(max_cutoff, shiftref, box=pseudobox, pbc=False)
             results = gridsearch.self_search()
         else:
@@ -1317,7 +1462,7 @@ def _nsgrid_capped_self(reference: Union[npt.NDArray, 'AtomGroup'],
     return pairs
 
 
-@check_coords('coords')
+@check_coords("coords")
 def transform_RtoS(coords, box, backend="serial"):
     """Transform an array of coordinates from real space to S space (a.k.a.
     lambda space)
@@ -1354,20 +1499,20 @@ def transform_RtoS(coords, box, backend="serial"):
     if len(coords) == 0:
         return coords
     boxtype, box = check_box(box)
-    if boxtype == 'ortho':
+    if boxtype == "ortho":
         box = np.diag(box)
     box = box.astype(np.float64)
 
     # Create inverse matrix of box
     # need order C here
-    inv = np.array(np.linalg.inv(box), order='C')
+    inv = np.array(np.linalg.inv(box), order="C")
 
     _run("coord_transform", args=(coords, inv), backend=backend)
 
     return coords
 
 
-@check_coords('coords')
+@check_coords("coords")
 def transform_StoR(coords, box, backend="serial"):
     """Transform an array of coordinates from S space into real space.
 
@@ -1403,7 +1548,7 @@ def transform_StoR(coords, box, backend="serial"):
     if len(coords) == 0:
         return coords
     boxtype, box = check_box(box)
-    if boxtype == 'ortho':
+    if boxtype == "ortho":
         box = np.diag(box)
     box = box.astype(np.float64)
 
@@ -1411,12 +1556,14 @@ def transform_StoR(coords, box, backend="serial"):
     return coords
 
 
-@check_coords('coords1', 'coords2', allow_atomgroup=True)
-def calc_bonds(coords1: Union[npt.NDArray, 'AtomGroup'],
-               coords2: Union[npt.NDArray, 'AtomGroup'],
-               box: Optional[npt.NDArray] = None,
-               result: Optional[npt.NDArray] = None,
-               backend: str = "serial") -> npt.NDArray:
+@check_coords("coords1", "coords2", allow_atomgroup=True)
+def calc_bonds(
+    coords1: Union[npt.NDArray, "AtomGroup"],
+    coords2: Union[npt.NDArray, "AtomGroup"],
+    box: Optional[npt.NDArray] = None,
+    result: Optional[npt.NDArray] = None,
+    backend: str = "serial",
+) -> npt.NDArray:
     """Calculates the bond lengths between pairs of atom positions from the two
     coordinate arrays `coords1` and `coords2`, which must contain the same
     number of coordinates. ``coords1[i]`` and ``coords2[i]`` represent the
@@ -1489,7 +1636,7 @@ def calc_bonds(coords1: Union[npt.NDArray, 'AtomGroup'],
         if box is not None:
             boxtype, box = check_box(box)
             if boxtype == "ortho":
-                if backend == 'distopia':
+                if backend == "distopia":
                     bondlengths = bondlengths.astype(np.float32)
                 _run(
                     "calc_bond_distance_ortho",
@@ -1503,25 +1650,27 @@ def calc_bonds(coords1: Union[npt.NDArray, 'AtomGroup'],
                     backend=backend,
                 )
         else:
-            if backend == 'distopia':
+            if backend == "distopia":
                 bondlengths = bondlengths.astype(np.float32)
             _run(
                 "calc_bond_distance",
                 args=(coords1, coords2, bondlengths),
                 backend=backend,
             )
-    if backend == 'distopia':
+    if backend == "distopia":
         bondlengths = bondlengths.astype(np.float64)
     return bondlengths
 
 
-@check_coords('coords1', 'coords2', 'coords3', allow_atomgroup=True)
-def calc_angles(coords1: Union[npt.NDArray, 'AtomGroup'],
-                coords2: Union[npt.NDArray, 'AtomGroup'],
-                coords3: Union[npt.NDArray, 'AtomGroup'],
-                box: Optional[npt.NDArray] = None,
-                result: Optional[npt.NDArray] = None,
-                backend: str = "serial") -> npt.NDArray:
+@check_coords("coords1", "coords2", "coords3", allow_atomgroup=True)
+def calc_angles(
+    coords1: Union[npt.NDArray, "AtomGroup"],
+    coords2: Union[npt.NDArray, "AtomGroup"],
+    coords3: Union[npt.NDArray, "AtomGroup"],
+    box: Optional[npt.NDArray] = None,
+    result: Optional[npt.NDArray] = None,
+    backend: str = "serial",
+) -> npt.NDArray:
     """Calculates the angles formed between triplets of atom positions from the
     three coordinate arrays `coords1`, `coords2`, and `coords3`. All coordinate
     arrays must contain the same number of coordinates.
@@ -1601,30 +1750,38 @@ def calc_angles(coords1: Union[npt.NDArray, 'AtomGroup'],
     if numatom > 0:
         if box is not None:
             boxtype, box = check_box(box)
-            if boxtype == 'ortho':
-                _run("calc_angle_ortho",
-                       args=(coords1, coords2, coords3, box, angles),
-                       backend=backend)
+            if boxtype == "ortho":
+                _run(
+                    "calc_angle_ortho",
+                    args=(coords1, coords2, coords3, box, angles),
+                    backend=backend,
+                )
             else:
-                _run("calc_angle_triclinic",
-                       args=(coords1, coords2, coords3, box, angles),
-                       backend=backend)
+                _run(
+                    "calc_angle_triclinic",
+                    args=(coords1, coords2, coords3, box, angles),
+                    backend=backend,
+                )
         else:
-            _run("calc_angle",
-                   args=(coords1, coords2, coords3, angles),
-                   backend=backend)
+            _run(
+                "calc_angle",
+                args=(coords1, coords2, coords3, angles),
+                backend=backend,
+            )
 
     return angles
 
 
-@check_coords('coords1', 'coords2', 'coords3', 'coords4', allow_atomgroup=True)
-def calc_dihedrals(coords1: Union[npt.NDArray, 'AtomGroup'],
-                   coords2: Union[npt.NDArray, 'AtomGroup'],
-                   coords3: Union[npt.NDArray, 'AtomGroup'],
-                   coords4: Union[npt.NDArray, 'AtomGroup'],
-                   box: Optional[npt.NDArray] = None,
-                   result: Optional[npt.NDArray] = None,
-                   backend: str = "serial") -> npt.NDArray:
+@check_coords("coords1", "coords2", "coords3", "coords4", allow_atomgroup=True)
+def calc_dihedrals(
+    coords1: Union[npt.NDArray, "AtomGroup"],
+    coords2: Union[npt.NDArray, "AtomGroup"],
+    coords3: Union[npt.NDArray, "AtomGroup"],
+    coords4: Union[npt.NDArray, "AtomGroup"],
+    box: Optional[npt.NDArray] = None,
+    result: Optional[npt.NDArray] = None,
+    backend: str = "serial",
+) -> npt.NDArray:
     r"""Calculates the dihedral angles formed between quadruplets of positions
     from the four coordinate arrays `coords1`, `coords2`, `coords3`, and
     `coords4`, which must contain the same number of coordinates.
@@ -1694,7 +1851,7 @@ def calc_dihedrals(coords1: Union[npt.NDArray, 'AtomGroup'],
         Array containing the dihedral angles formed by each quadruplet of
         coordinates. Values are returned in radians (rad). If four single
         coordinates were supplied, the dihedral angle is returned as a single
-        number instead of an array. The range of dihedral angle is 
+        number instead of an array. The range of dihedral angle is
         :math:`(-\pi, \pi)`.
 
 
@@ -1719,26 +1876,34 @@ def calc_dihedrals(coords1: Union[npt.NDArray, 'AtomGroup'],
     if numatom > 0:
         if box is not None:
             boxtype, box = check_box(box)
-            if boxtype == 'ortho':
-                _run("calc_dihedral_ortho",
-                     args=(coords1, coords2, coords3, coords4, box, dihedrals),
-                     backend=backend)
+            if boxtype == "ortho":
+                _run(
+                    "calc_dihedral_ortho",
+                    args=(coords1, coords2, coords3, coords4, box, dihedrals),
+                    backend=backend,
+                )
             else:
-                _run("calc_dihedral_triclinic",
-                     args=(coords1, coords2, coords3, coords4, box, dihedrals),
-                     backend=backend)
+                _run(
+                    "calc_dihedral_triclinic",
+                    args=(coords1, coords2, coords3, coords4, box, dihedrals),
+                    backend=backend,
+                )
         else:
-            _run("calc_dihedral",
-                 args=(coords1, coords2, coords3, coords4, dihedrals),
-                 backend=backend)
+            _run(
+                "calc_dihedral",
+                args=(coords1, coords2, coords3, coords4, dihedrals),
+                backend=backend,
+            )
 
     return dihedrals
 
 
-@check_coords('coords', allow_atomgroup=True)
-def apply_PBC(coords: Union[npt.NDArray, 'AtomGroup'],
-              box: Optional[npt.NDArray] = None,
-              backend: str = "serial") -> npt.NDArray:
+@check_coords("coords", allow_atomgroup=True)
+def apply_PBC(
+    coords: Union[npt.NDArray, "AtomGroup"],
+    box: Optional[npt.NDArray] = None,
+    backend: str = "serial",
+) -> npt.NDArray:
     """Moves coordinates into the primary unit cell.
 
     Parameters
@@ -1779,7 +1944,7 @@ def apply_PBC(coords: Union[npt.NDArray, 'AtomGroup'],
     if len(coords_array) == 0:
         return coords_array
     boxtype, box = check_box(box)
-    if boxtype == 'ortho':
+    if boxtype == "ortho":
         _run("ortho_pbc", args=(coords_array, box), backend=backend)
     else:
         _run("triclinic_pbc", args=(coords_array, box), backend=backend)
@@ -1787,7 +1952,7 @@ def apply_PBC(coords: Union[npt.NDArray, 'AtomGroup'],
     return coords_array
 
 
-@check_coords('vectors', enforce_copy=False, enforce_dtype=False)
+@check_coords("vectors", enforce_copy=False, enforce_dtype=False)
 def minimize_vectors(vectors: npt.NDArray, box: npt.NDArray) -> npt.NDArray:
     """Apply minimum image convention to an array of vectors
 
@@ -1822,7 +1987,7 @@ def minimize_vectors(vectors: npt.NDArray, box: npt.NDArray) -> npt.NDArray:
     # use box which is same precision as input vectors
     box = box.astype(vectors.dtype)
 
-    if boxtype == 'ortho':
+    if boxtype == "ortho":
         _minimize_vectors_ortho(vectors, box, output)
     else:
         _minimize_vectors_triclinic(vectors, box.ravel(), output)
diff --git a/package/MDAnalysis/lib/formats/__init__.py b/package/MDAnalysis/lib/formats/__init__.py
index cf484ea477..2760c495d6 100644
--- a/package/MDAnalysis/lib/formats/__init__.py
+++ b/package/MDAnalysis/lib/formats/__init__.py
@@ -23,4 +23,4 @@
 from . import libmdaxdr
 from . import libdcd
 
-__all__ = ['libmdaxdr', 'libdcd']
+__all__ = ["libmdaxdr", "libdcd"]
diff --git a/package/MDAnalysis/lib/log.py b/package/MDAnalysis/lib/log.py
index 15100ef488..d63ec54782 100644
--- a/package/MDAnalysis/lib/log.py
+++ b/package/MDAnalysis/lib/log.py
@@ -101,7 +101,8 @@ def start_logging(logfile="MDAnalysis.log", version=version.__version__):
     """
     create("MDAnalysis", logfile=logfile)
     logging.getLogger("MDAnalysis").info(
-        "MDAnalysis %s STARTED logging to %r", version, logfile)
+        "MDAnalysis %s STARTED logging to %r", version, logfile
+    )
 
 
 def stop_logging():
@@ -136,7 +137,8 @@ def create(logger_name="MDAnalysis", logfile="MDAnalysis.log"):
     # handler that writes to logfile
     logfile_handler = logging.FileHandler(logfile)
     logfile_formatter = logging.Formatter(
-        '%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
+        "%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
+    )
     logfile_handler.setFormatter(logfile_formatter)
     logger.addHandler(logfile_handler)
 
@@ -144,7 +146,7 @@ def create(logger_name="MDAnalysis", logfile="MDAnalysis.log"):
     console_handler = logging.StreamHandler()
     console_handler.setLevel(logging.INFO)
     # set a format which is simpler for console use
-    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+    formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s")
     console_handler.setFormatter(formatter)
     logger.addHandler(console_handler)
 
@@ -334,11 +336,11 @@ def __init__(self, *args, **kwargs):
         """"""
         # ^^^^ keep the empty doc string to avoid Sphinx doc errors with the
         # original doc string from tqdm.auto.tqdm
-        verbose = kwargs.pop('verbose', True)
+        verbose = kwargs.pop("verbose", True)
         # disable: Whether to disable the entire progressbar wrapper [default: False].
         # If set to None, disable on non-TTY.
         # disable should be the opposite of verbose unless it's None
         disable = verbose if verbose is None else not verbose
         # disable should take precedence over verbose if both are set
-        kwargs['disable'] = kwargs.pop('disable', disable)
+        kwargs["disable"] = kwargs.pop("disable", disable)
         super(ProgressBar, self).__init__(*args, **kwargs)
diff --git a/package/MDAnalysis/lib/mdamath.py b/package/MDAnalysis/lib/mdamath.py
index e904116a1a..cef449c8f2 100644
--- a/package/MDAnalysis/lib/mdamath.py
+++ b/package/MDAnalysis/lib/mdamath.py
@@ -61,8 +61,12 @@
 
 from ..exceptions import NoDataError
 from . import util
-from ._cutil import (make_whole, find_fragments, _sarrus_det_single,
-                     _sarrus_det_multiple)
+from ._cutil import (
+    make_whole,
+    find_fragments,
+    _sarrus_det_single,
+    _sarrus_det_multiple,
+)
 import numpy.typing as npt
 from typing import Union
 
@@ -127,7 +131,7 @@ def pdot(a: npt.NDArray, b: npt.NDArray) -> npt.NDArray:
     -------
     :class:`numpy.ndarray` of shape (N,)
     """
-    return np.einsum('ij,ij->i', a, b)
+    return np.einsum("ij,ij->i", a, b)
 
 
 def pnorm(a: npt.NDArray) -> npt.NDArray:
@@ -141,7 +145,7 @@ def pnorm(a: npt.NDArray) -> npt.NDArray:
     -------
     :class:`numpy.ndarray` of shape (N,)
     """
-    return pdot(a, a)**0.5
+    return pdot(a, a) ** 0.5
 
 
 def angle(a: npt.ArrayLike, b: npt.ArrayLike) -> float:
@@ -159,7 +163,9 @@ def angle(a: npt.ArrayLike, b: npt.ArrayLike) -> float:
     return np.arccos(x)
 
 
-def stp(vec1: npt.ArrayLike, vec2: npt.ArrayLike, vec3: npt.ArrayLike) -> float:
+def stp(
+    vec1: npt.ArrayLike, vec2: npt.ArrayLike, vec3: npt.ArrayLike
+) -> float:
     r"""Takes the scalar triple product of three vectors.
 
     Returns the volume *V* of the parallel epiped spanned by the three
@@ -195,7 +201,7 @@ def dihedral(ab: npt.ArrayLike, bc: npt.ArrayLike, cd: npt.ArrayLike) -> float:
        Moved into lib.mdamath
     """
     x = angle(normal(ab, bc), normal(bc, cd))
-    return (x if stp(ab, bc, cd) <= 0.0 else -x)
+    return x if stp(ab, bc, cd) <= 0.0 else -x
 
 
 def sarrus_det(matrix: npt.NDArray) -> Union[float, npt.NDArray]:
@@ -236,14 +242,18 @@ def sarrus_det(matrix: npt.NDArray) -> Union[float, npt.NDArray]:
     shape = m.shape
     ndim = m.ndim
     if ndim < 2 or shape[-2:] != (3, 3):
-        raise ValueError("Invalid matrix shape: must be (3, 3) or (..., 3, 3), "
-                         "got {}.".format(shape))
+        raise ValueError(
+            "Invalid matrix shape: must be (3, 3) or (..., 3, 3), "
+            "got {}.".format(shape)
+        )
     if ndim == 2:
         return _sarrus_det_single(m)
     return _sarrus_det_multiple(m.reshape((-1, 3, 3))).reshape(shape[:-2])
 
 
-def triclinic_box(x: npt.ArrayLike, y: npt.ArrayLike, z: npt.ArrayLike) -> npt.NDArray:
+def triclinic_box(
+    x: npt.ArrayLike, y: npt.ArrayLike, z: npt.ArrayLike
+) -> npt.NDArray:
     """Convert the three triclinic box vectors to
     ``[lx, ly, lz, alpha, beta, gamma]``.
 
@@ -306,8 +316,9 @@ def triclinic_box(x: npt.ArrayLike, y: npt.ArrayLike, z: npt.ArrayLike) -> npt.N
     return np.zeros(6, dtype=np.float32)
 
 
-def triclinic_vectors(dimensions: npt.ArrayLike,
-                      dtype: npt.DTypeLike = np.float32) -> npt.NDArray:
+def triclinic_vectors(
+    dimensions: npt.ArrayLike, dtype: npt.DTypeLike = np.float32
+) -> npt.NDArray:
     """Convert ``[lx, ly, lz, alpha, beta, gamma]`` to a triclinic matrix
     representation.
 
@@ -357,8 +368,9 @@ def triclinic_vectors(dimensions: npt.ArrayLike,
     dim = np.asarray(dimensions, dtype=np.float64)
     lx, ly, lz, alpha, beta, gamma = dim
     # Only positive edge lengths and angles in (0, 180) are allowed:
-    if not (np.all(dim > 0.0) and
-            alpha < 180.0 and beta < 180.0 and gamma < 180.0):
+    if not (
+        np.all(dim > 0.0) and alpha < 180.0 and beta < 180.0 and gamma < 180.0
+    ):
         # invalid box, return zero vectors:
         box_matrix = np.zeros((3, 3), dtype=dtype)
     # detect orthogonal boxes:
@@ -389,8 +401,9 @@ def triclinic_vectors(dimensions: npt.ArrayLike,
         box_matrix[1, 1] = ly * sin_gamma
         box_matrix[2, 0] = lz * cos_beta
         box_matrix[2, 1] = lz * (cos_alpha - cos_beta * cos_gamma) / sin_gamma
-        box_matrix[2, 2] = np.sqrt(lz * lz - box_matrix[2, 0] ** 2 -
-                                   box_matrix[2, 1] ** 2)
+        box_matrix[2, 2] = np.sqrt(
+            lz * lz - box_matrix[2, 0] ** 2 - box_matrix[2, 1] ** 2
+        )
         # The discriminant of the above square root is only negative or zero for
         # triplets of box angles that lead to an invalid box (i.e., the sum of
         # any two angles is less than or equal to the third).
diff --git a/package/MDAnalysis/lib/picklable_file_io.py b/package/MDAnalysis/lib/picklable_file_io.py
index e27bca4b77..f8050b14e5 100644
--- a/package/MDAnalysis/lib/picklable_file_io.py
+++ b/package/MDAnalysis/lib/picklable_file_io.py
@@ -107,28 +107,30 @@ class FileIOPicklable(io.FileIO):
 
     .. versionadded:: 2.0.0
     """
-    def __init__(self, name, mode='r'):
+
+    def __init__(self, name, mode="r"):
         self._mode = mode
         super().__init__(name, mode)
 
-
     def __setstate__(self, state):
         name = state["name_val"]
-        self.__init__(name, mode='r')
+        self.__init__(name, mode="r")
         try:
             self.seek(state["tell_val"])
         except KeyError:
             pass
 
-
     def __reduce_ex__(self, prot):
-        if self._mode != 'r':
-            raise RuntimeError("Can only pickle files that were opened "
-                               "in read mode, not {}".format(self._mode))
-        return (self.__class__,
-                (self.name, self._mode),
-                {"name_val": self.name,
-                 "tell_val": self.tell()})
+        if self._mode != "r":
+            raise RuntimeError(
+                "Can only pickle files that were opened "
+                "in read mode, not {}".format(self._mode)
+            )
+        return (
+            self.__class__,
+            (self.name, self._mode),
+            {"name_val": self.name, "tell_val": self.tell()},
+        )
 
 
 class BufferIOPicklable(io.BufferedReader):
@@ -157,11 +159,11 @@ class BufferIOPicklable(io.BufferedReader):
 
     .. versionadded:: 2.0.0
     """
+
     def __init__(self, raw):
         super().__init__(raw)
         self.raw_class = raw.__class__
 
-
     def __setstate__(self, state):
         raw_class = state["raw_class"]
         name = state["name_val"]
@@ -172,11 +174,15 @@ def __setstate__(self, state):
     def __reduce_ex__(self, prot):
         # don't ask, for Python 3.12+ see:
         # https://github.com/python/cpython/pull/104370
-        return (self.raw_class,
-                (self.name,),
-                {"raw_class": self.raw_class,
-                 "name_val": self.name,
-                 "tell_val": self.tell()})
+        return (
+            self.raw_class,
+            (self.name,),
+            {
+                "raw_class": self.raw_class,
+                "name_val": self.name,
+                "tell_val": self.tell(),
+            },
+        )
 
 
 class TextIOPicklable(io.TextIOWrapper):
@@ -210,6 +216,7 @@ class TextIOPicklable(io.TextIOWrapper):
        so `universe.trajectory[i]` is not needed to seek to the
        original position.
     """
+
     def __init__(self, raw):
         super().__init__(raw)
         self.raw_class = raw.__class__
@@ -236,11 +243,15 @@ def __reduce_ex__(self, prot):
         except AttributeError:
             # This is kind of ugly--BZ2File does not save its name.
             name = self.buffer._fp.name
-        return (self.__class__.__new__,
-                (self.__class__,),
-                {"raw_class": self.raw_class,
-                 "name_val": name,
-                 "tell_val": curr_loc})
+        return (
+            self.__class__.__new__,
+            (self.__class__,),
+            {
+                "raw_class": self.raw_class,
+                "name_val": name,
+                "tell_val": curr_loc,
+            },
+        )
 
 
 class BZ2Picklable(bz2.BZ2File):
@@ -292,14 +303,17 @@ class BZ2Picklable(bz2.BZ2File):
 
     .. versionadded:: 2.0.0
     """
-    def __init__(self, name, mode='rb'):
+
+    def __init__(self, name, mode="rb"):
         self._bz_mode = mode
         super().__init__(name, mode)
 
     def __getstate__(self):
-        if not self._bz_mode.startswith('r'):
-            raise RuntimeError("Can only pickle files that were opened "
-                               "in read mode, not {}".format(self._bz_mode))
+        if not self._bz_mode.startswith("r"):
+            raise RuntimeError(
+                "Can only pickle files that were opened "
+                "in read mode, not {}".format(self._bz_mode)
+            )
         return {"name_val": self._fp.name, "tell_val": self.tell()}
 
     def __setstate__(self, args):
@@ -361,16 +375,18 @@ class GzipPicklable(gzip.GzipFile):
 
     .. versionadded:: 2.0.0
     """
-    def __init__(self, name, mode='rb'):
+
+    def __init__(self, name, mode="rb"):
         self._gz_mode = mode
         super().__init__(name, mode)
 
     def __getstate__(self):
-        if not self._gz_mode.startswith('r'):
-            raise RuntimeError("Can only pickle files that were opened "
-                               "in read mode, not {}".format(self._gz_mode))
-        return {"name_val": self.name,
-                "tell_val": self.tell()}
+        if not self._gz_mode.startswith("r"):
+            raise RuntimeError(
+                "Can only pickle files that were opened "
+                "in read mode, not {}".format(self._gz_mode)
+            )
+        return {"name_val": self.name, "tell_val": self.tell()}
 
     def __setstate__(self, args):
         name = args["name_val"]
@@ -382,7 +398,7 @@ def __setstate__(self, args):
             pass
 
 
-def pickle_open(name, mode='rt'):
+def pickle_open(name, mode="rt"):
     """Open file and return a stream with pickle function implemented.
 
     This function returns a FileIOPicklable object wrapped in a
@@ -443,18 +459,19 @@ def pickle_open(name, mode='rt'):
 
     .. versionadded:: 2.0.0
     """
-    if mode not in {'r', 'rt', 'rb'}:
-        raise ValueError("Only read mode ('r', 'rt', 'rb') "
-                         "files can be pickled.")
+    if mode not in {"r", "rt", "rb"}:
+        raise ValueError(
+            "Only read mode ('r', 'rt', 'rb') " "files can be pickled."
+        )
     name = os.fspath(name)
     raw = FileIOPicklable(name)
-    if mode == 'rb':
+    if mode == "rb":
         return BufferIOPicklable(raw)
-    elif mode in {'r', 'rt'}:
+    elif mode in {"r", "rt"}:
         return TextIOPicklable(raw)
 
 
-def bz2_pickle_open(name, mode='rb'):
+def bz2_pickle_open(name, mode="rb"):
     """Open a bzip2-compressed file in binary or text mode
     with pickle function implemented.
 
@@ -515,9 +532,10 @@ def bz2_pickle_open(name, mode='rb'):
 
     .. versionadded:: 2.0.0
     """
-    if mode not in {'r', 'rt', 'rb'}:
-        raise ValueError("Only read mode ('r', 'rt', 'rb') "
-                         "files can be pickled.")
+    if mode not in {"r", "rt", "rb"}:
+        raise ValueError(
+            "Only read mode ('r', 'rt', 'rb') " "files can be pickled."
+        )
     bz_mode = mode.replace("t", "")
     binary_file = BZ2Picklable(name, bz_mode)
     if "t" in mode:
@@ -526,7 +544,7 @@ def bz2_pickle_open(name, mode='rb'):
         return binary_file
 
 
-def gzip_pickle_open(name, mode='rb'):
+def gzip_pickle_open(name, mode="rb"):
     """Open a gzip-compressed file in binary or text mode
     with pickle function implemented.
 
@@ -587,9 +605,10 @@ def gzip_pickle_open(name, mode='rb'):
 
     .. versionadded:: 2.0.0
     """
-    if mode not in {'r', 'rt', 'rb'}:
-        raise ValueError("Only read mode ('r', 'rt', 'rb') "
-                         "files can be pickled.")
+    if mode not in {"r", "rt", "rb"}:
+        raise ValueError(
+            "Only read mode ('r', 'rt', 'rb') " "files can be pickled."
+        )
     gz_mode = mode.replace("t", "")
     binary_file = GzipPicklable(name, gz_mode)
     if "t" in mode:
diff --git a/package/MDAnalysis/lib/pkdtree.py b/package/MDAnalysis/lib/pkdtree.py
index f50d16da9f..952b4672e3 100644
--- a/package/MDAnalysis/lib/pkdtree.py
+++ b/package/MDAnalysis/lib/pkdtree.py
@@ -40,9 +40,7 @@
 import numpy.typing as npt
 from typing import Optional, ClassVar
 
-__all__ = [
-    'PeriodicKDTree'
-]
+__all__ = ["PeriodicKDTree"]
 
 
 class PeriodicKDTree(object):
@@ -64,7 +62,9 @@ class PeriodicKDTree(object):
 
     """
 
-    def __init__(self, box: Optional[npt.ArrayLike] = None, leafsize: int = 10) -> None:
+    def __init__(
+        self, box: Optional[npt.ArrayLike] = None, leafsize: int = 10
+    ) -> None:
         """
 
         Parameters
@@ -98,7 +98,9 @@ def pbc(self):
         """
         return self.box is not None
 
-    def set_coords(self, coords: npt.ArrayLike, cutoff: Optional[float] = None) -> None:
+    def set_coords(
+        self, coords: npt.ArrayLike, cutoff: Optional[float] = None
+    ) -> None:
         """Constructs KDTree from the coordinates
 
         Wrapping of coordinates to the primary unit cell is enforced
@@ -138,23 +140,26 @@ def set_coords(self, coords: npt.ArrayLike, cutoff: Optional[float] = None) -> N
         if self.pbc:
             self.cutoff = cutoff
             if cutoff is None:
-                raise RuntimeError('Provide a cutoff distance'
-                                   ' with tree.set_coords(...)')
+                raise RuntimeError(
+                    "Provide a cutoff distance" " with tree.set_coords(...)"
+                )
 
             # Bring the coordinates in the central cell
             self.coords = apply_PBC(coords, self.box)
             # generate duplicate images
-            self.aug, self.mapping = augment_coordinates(self.coords,
-                                                         self.box,
-                                                         cutoff)
+            self.aug, self.mapping = augment_coordinates(
+                self.coords, self.box, cutoff
+            )
             # Images + coords
             self.all_coords = np.concatenate([self.coords, self.aug])
             self.ckdt = cKDTree(self.all_coords, leafsize=self.leafsize)
         else:
             # if cutoff distance is provided for non PBC calculations
             if cutoff is not None:
-                raise RuntimeError('Donot provide cutoff distance for'
-                                   ' non PBC aware calculations')
+                raise RuntimeError(
+                    "Donot provide cutoff distance for"
+                    " non PBC aware calculations"
+                )
             self.coords = coords
             self.ckdt = cKDTree(self.coords, self.leafsize)
         self._built = True
@@ -175,37 +180,38 @@ def search(self, centers: npt.ArrayLike, radius: float) -> npt.NDArray:
         """
 
         if not self._built:
-            raise RuntimeError('Unbuilt tree. Run tree.set_coords(...)')
+            raise RuntimeError("Unbuilt tree. Run tree.set_coords(...)")
 
         centers = np.asarray(centers)
-        if centers.shape == (self.dim, ):
+        if centers.shape == (self.dim,):
             centers = centers.reshape((1, self.dim))
 
         # Sanity check
         if self.pbc:
             if self.cutoff is None:
                 raise ValueError(
-                    "Cutoff needs to be provided when working with PBC.")
+                    "Cutoff needs to be provided when working with PBC."
+                )
             if self.cutoff < radius:
-                raise RuntimeError('Set cutoff greater or equal to the radius.')
+                raise RuntimeError(
+                    "Set cutoff greater or equal to the radius."
+                )
             # Bring all query points to the central cell
             wrapped_centers = apply_PBC(centers, self.box)
-            indices = list(self.ckdt.query_ball_point(wrapped_centers,
-                                                      radius))
-            self._indices = np.array(list(
-                                     itertools.chain.from_iterable(indices)),
-                                     dtype=np.intp)
+            indices = list(self.ckdt.query_ball_point(wrapped_centers, radius))
+            self._indices = np.array(
+                list(itertools.chain.from_iterable(indices)), dtype=np.intp
+            )
             if self._indices.size > 0:
-                self._indices = undo_augment(self._indices,
-                                             self.mapping,
-                                             len(self.coords))
+                self._indices = undo_augment(
+                    self._indices, self.mapping, len(self.coords)
+                )
         else:
             wrapped_centers = np.asarray(centers)
-            indices = list(self.ckdt.query_ball_point(wrapped_centers,
-                                                      radius))
-            self._indices = np.array(list(
-                                     itertools.chain.from_iterable(indices)),
-                                     dtype=np.intp)
+            indices = list(self.ckdt.query_ball_point(wrapped_centers, radius))
+            self._indices = np.array(
+                list(itertools.chain.from_iterable(indices)), dtype=np.intp
+            )
         self._indices = np.asarray(unique_int_1d(self._indices))
         return self._indices
 
@@ -233,22 +239,27 @@ def search_pairs(self, radius: float) -> npt.NDArray:
           Indices of all the pairs which are within the specified radius
         """
         if not self._built:
-            raise RuntimeError(' Unbuilt Tree. Run tree.set_coords(...)')
+            raise RuntimeError(" Unbuilt Tree. Run tree.set_coords(...)")
 
         if self.pbc:
             if self.cutoff is None:
                 raise ValueError(
-                    "Cutoff needs to be provided when working with PBC.")
+                    "Cutoff needs to be provided when working with PBC."
+                )
             if self.cutoff < radius:
-                raise RuntimeError('Set cutoff greater or equal to the radius.')
+                raise RuntimeError(
+                    "Set cutoff greater or equal to the radius."
+                )
 
         pairs = np.array(list(self.ckdt.query_pairs(radius)), dtype=np.intp)
         if self.pbc:
             if len(pairs) > 1:
-                pairs[:, 0] = undo_augment(pairs[:, 0], self.mapping,
-                                           len(self.coords))
-                pairs[:, 1] = undo_augment(pairs[:, 1], self.mapping,
-                                           len(self.coords))
+                pairs[:, 0] = undo_augment(
+                    pairs[:, 0], self.mapping, len(self.coords)
+                )
+                pairs[:, 1] = undo_augment(
+                    pairs[:, 1], self.mapping, len(self.coords)
+                )
         if pairs.size > 0:
             # First sort the pairs then pick the unique pairs
             pairs = np.sort(pairs, axis=1)
@@ -287,34 +298,41 @@ class initialization
         """
 
         if not self._built:
-            raise RuntimeError('Unbuilt tree. Run tree.set_coords(...)')
+            raise RuntimeError("Unbuilt tree. Run tree.set_coords(...)")
 
         centers = np.asarray(centers)
-        if centers.shape == (self.dim, ):
+        if centers.shape == (self.dim,):
             centers = centers.reshape((1, self.dim))
 
         # Sanity check
         if self.pbc:
             if self.cutoff is None:
                 raise ValueError(
-                    "Cutoff needs to be provided when working with PBC.")
+                    "Cutoff needs to be provided when working with PBC."
+                )
             if self.cutoff < radius:
-                raise RuntimeError('Set cutoff greater or equal to the radius.')
+                raise RuntimeError(
+                    "Set cutoff greater or equal to the radius."
+                )
             # Bring all query points to the central cell
             wrapped_centers = apply_PBC(centers, self.box)
             other_tree = cKDTree(wrapped_centers, leafsize=self.leafsize)
             pairs = other_tree.query_ball_tree(self.ckdt, radius)
-            pairs = np.array([[i, j] for i, lst in enumerate(pairs) for j in lst],
-                                   dtype=np.intp)
+            pairs = np.array(
+                [[i, j] for i, lst in enumerate(pairs) for j in lst],
+                dtype=np.intp,
+            )
             if pairs.size > 0:
-                pairs[:, 1] = undo_augment(pairs[:, 1],
-                                             self.mapping,
-                                             len(self.coords))
+                pairs[:, 1] = undo_augment(
+                    pairs[:, 1], self.mapping, len(self.coords)
+                )
         else:
             other_tree = cKDTree(centers, leafsize=self.leafsize)
             pairs = other_tree.query_ball_tree(self.ckdt, radius)
-            pairs = np.array([[i, j] for i, lst in enumerate(pairs) for j in lst],
-                                   dtype=np.intp)
+            pairs = np.array(
+                [[i, j] for i, lst in enumerate(pairs) for j in lst],
+                dtype=np.intp,
+            )
         if pairs.size > 0:
             pairs = unique_rows(pairs)
         return pairs
diff --git a/package/MDAnalysis/lib/transformations.py b/package/MDAnalysis/lib/transformations.py
index 5c386a0104..27e5f01db9 100644
--- a/package/MDAnalysis/lib/transformations.py
+++ b/package/MDAnalysis/lib/transformations.py
@@ -162,16 +162,19 @@
    MDAnalysis.lib.transformations
 """
 
-import sys
+import math
 import os
+import sys
 import warnings
-import math
+
 import numpy as np
 from numpy.linalg import norm
 
-from .mdamath import angle as vecangle
 from MDAnalysis.lib.util import no_copy_shim
 
+from .mdamath import angle as vecangle
+
+
 def identity_matrix():
     """Return 4x4 identity/unit matrix.
 
@@ -316,13 +319,20 @@ def rotation_matrix(angle, direction, point=None):
         (
             (cosa, 0.0, 0.0),
             (0.0, cosa, 0.0),
-            (0.0, 0.0, cosa)), dtype=np.float64)
+            (0.0, 0.0, cosa),
+        ),
+        dtype=np.float64,
+    )
     R += np.outer(direction, direction) * (1.0 - cosa)
     direction *= sina
     R += np.array(
-        ((0.0, -direction[2], direction[1]),
-        (direction[2], 0.0, -direction[0]),
-        (-direction[1], direction[0], 0.0)), dtype=np.float64)
+        (
+            (0.0, -direction[2], direction[1]),
+            (direction[2], 0.0, -direction[0]),
+            (-direction[1], direction[0], 0.0),
+        ),
+        dtype=np.float64,
+    )
     M = np.identity(4)
     M[:3, :3] = R
     if point is not None:
@@ -367,11 +377,17 @@ def rotation_from_matrix(matrix):
     # rotation angle depending on direction
     cosa = (np.trace(R33) - 1.0) / 2.0
     if abs(direction[2]) > 1e-8:
-        sina = (R[1, 0] + (cosa - 1.0) * direction[0] * direction[1]) / direction[2]
+        sina = (
+            R[1, 0] + (cosa - 1.0) * direction[0] * direction[1]
+        ) / direction[2]
     elif abs(direction[1]) > 1e-8:
-        sina = (R[0, 2] + (cosa - 1.0) * direction[0] * direction[2]) / direction[1]
+        sina = (
+            R[0, 2] + (cosa - 1.0) * direction[0] * direction[2]
+        ) / direction[1]
     else:
-        sina = (R[2, 1] + (cosa - 1.0) * direction[1] * direction[2]) / direction[0]
+        sina = (
+            R[2, 1] + (cosa - 1.0) * direction[1] * direction[2]
+        ) / direction[0]
     angle = math.atan2(sina, cosa)
     return angle, direction, point
 
@@ -399,10 +415,14 @@ def scale_matrix(factor, origin=None, direction=None):
     if direction is None:
         # uniform scaling
         M = np.array(
-            ((factor, 0.0, 0.0, 0.0),
-            (0.0, factor, 0.0, 0.0),
-            (0.0, 0.0, factor, 0.0),
-            (0.0, 0.0, 0.0, 1.0)), dtype=np.float64)
+            (
+                (factor, 0.0, 0.0, 0.0),
+                (0.0, factor, 0.0, 0.0),
+                (0.0, 0.0, factor, 0.0),
+                (0.0, 0.0, 0.0, 1.0),
+            ),
+            dtype=np.float64,
+        )
         if origin is not None:
             M[:3, 3] = origin[:3]
             M[:3, 3] *= 1.0 - factor
@@ -462,8 +482,9 @@ def scale_from_matrix(matrix):
     return factor, origin, direction
 
 
-def projection_matrix(point, normal, direction=None,
-                      perspective=None, pseudo=False):
+def projection_matrix(
+    point, normal, direction=None, perspective=None, pseudo=False
+):
     """Return matrix to project onto plane defined by point and normal.
 
     Using either perspective point, projection direction, or none of both.
@@ -502,8 +523,7 @@ def projection_matrix(point, normal, direction=None,
     normal = unit_vector(normal[:3])
     if perspective is not None:
         # perspective projection
-        perspective = np.array(perspective[:3], dtype=np.float64,
-                                  copy=False)
+        perspective = np.array(perspective[:3], dtype=np.float64, copy=False)
         M[0, 0] = M[1, 1] = M[2, 2] = np.dot(perspective - point, normal)
         M[:3, :3] -= np.outer(perspective, normal)
         if pseudo:
@@ -516,7 +536,9 @@ def projection_matrix(point, normal, direction=None,
         M[3, 3] = np.dot(perspective, normal)
     elif direction is not None:
         # parallel projection
-        direction = np.array(direction[:3], dtype=np.float64, copy=no_copy_shim)
+        direction = np.array(
+            direction[:3], dtype=np.float64, copy=no_copy_shim
+        )
         scale = np.dot(direction, normal)
         M[:3, :3] -= np.outer(direction, normal) / scale
         M[:3, 3] = direction * (np.dot(point, normal) / scale)
@@ -593,10 +615,11 @@ def projection_from_matrix(matrix, pseudo=False):
         i = np.where(abs(np.real(l)) > 1e-8)[0]
         if not len(i):
             raise ValueError(
-                "no eigenvector not corresponding to eigenvalue 0")
+                "no eigenvector not corresponding to eigenvalue 0"
+            )
         point = np.real(V[:, i[-1]]).squeeze()
         point /= point[3]
-        normal = - M[3, :3]
+        normal = -M[3, :3]
         perspective = M[:3, 3] / np.dot(point[:3], normal)
         if pseudo:
             perspective -= normal
@@ -649,13 +672,15 @@ def clip_matrix(left, right, bottom, top, near, far, perspective=False):
             (-t / (right - left), 0.0, (right + left) / (right - left), 0.0),
             (0.0, -t / (top - bottom), (top + bottom) / (top - bottom), 0.0),
             (0.0, 0.0, -(far + near) / (far - near), t * far / (far - near)),
-            (0.0, 0.0, -1.0, 0.0))
+            (0.0, 0.0, -1.0, 0.0),
+        )
     else:
         M = (
             (2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)),
             (0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)),
             (0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)),
-            (0.0, 0.0, 0.0, 1.0))
+            (0.0, 0.0, 0.0, 1.0),
+        )
     return np.array(M, dtype=np.float64)
 
 
@@ -717,7 +742,9 @@ def shear_from_matrix(matrix):
     l, V = np.linalg.eig(M33)
     i = np.where(abs(np.real(l) - 1.0) < 1e-4)[0]
     if len(i) < 2:
-        raise ValueError("no two linear independent eigenvectors found {0!s}".format(l))
+        raise ValueError(
+            "no two linear independent eigenvectors found {0!s}".format(l)
+        )
     V = np.real(V[:, i]).squeeze().T
     lenorm = -1.0
     for i0, i1 in ((0, 1), (0, 2), (1, 2)):
@@ -786,7 +813,7 @@ def decompose_matrix(matrix):
     if not np.linalg.det(P):
         raise ValueError("matrix is singular")
 
-    scale = np.zeros((3, ), dtype=np.float64)
+    scale = np.zeros((3,), dtype=np.float64)
     shear = [0, 0, 0]
     angles = [0, 0, 0]
 
@@ -824,15 +851,16 @@ def decompose_matrix(matrix):
         angles[0] = math.atan2(row[1, 2], row[2, 2])
         angles[2] = math.atan2(row[0, 1], row[0, 0])
     else:
-        #angles[0] = math.atan2(row[1, 0], row[1, 1])
+        # angles[0] = math.atan2(row[1, 0], row[1, 1])
         angles[0] = math.atan2(-row[2, 1], row[1, 1])
         angles[2] = 0.0
 
     return scale, shear, angles, translate, perspective
 
 
-def compose_matrix(scale=None, shear=None, angles=None, translate=None,
-                   perspective=None):
+def compose_matrix(
+    scale=None, shear=None, angles=None, translate=None, perspective=None
+):
     """Return transformation matrix from sequence of transformations.
 
     This is the inverse of the decompose_matrix function.
@@ -870,7 +898,7 @@ def compose_matrix(scale=None, shear=None, angles=None, translate=None,
         T[:3, 3] = translate[:3]
         M = np.dot(M, T)
     if angles is not None:
-        R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
+        R = euler_matrix(angles[0], angles[1], angles[2], "sxyz")
         M = np.dot(M, R)
     if shear is not None:
         Z = np.identity(4)
@@ -915,8 +943,10 @@ def orthogonalization_matrix(lengths, angles):
             (a * sinb * math.sqrt(1.0 - co * co), 0.0, 0.0, 0.0),
             (-a * sinb * co, b * sina, 0.0, 0.0),
             (a * cosb, b * cosa, c, 0.0),
-            (0.0, 0.0, 0.0, 1.0)),
-        dtype=np.float64)
+            (0.0, 0.0, 0.0, 1.0),
+        ),
+        dtype=np.float64,
+    )
 
 
 def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
@@ -997,14 +1027,15 @@ def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
         M[:3, :3] = R
     else:
         # compute symmetric matrix N
-        xx, yy, zz = np.einsum('ij,ij->i', v0 , v1)
-        xy, yz, zx = np.einsum('ij,ij->i', v0, np.roll(v1, -1, axis=0))
-        xz, yx, zy = np.einsum('ij,ij->i', v0, np.roll(v1, -2, axis=0))
+        xx, yy, zz = np.einsum("ij,ij->i", v0, v1)
+        xy, yz, zx = np.einsum("ij,ij->i", v0, np.roll(v1, -1, axis=0))
+        xz, yx, zy = np.einsum("ij,ij->i", v0, np.roll(v1, -2, axis=0))
         N = (
             (xx + yy + zz, 0.0, 0.0, 0.0),
             (yz - zy, xx - yy - zz, 0.0, 0.0),
             (zx - xz, xy + yx, -xx + yy - zz, 0.0),
-            (xy - yx, zx + xz, yz + zy, -xx - yy + zz))
+            (xy - yx, zx + xz, yz + zy, -xx - yy + zz),
+        )
         # quaternion: eigenvector corresponding to most positive eigenvalue
         l, V = np.linalg.eigh(N)
         q = V[:, np.argmax(l)]
@@ -1014,9 +1045,9 @@ def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
 
     # scale: ratio of rms deviations from centroid
     if scaling:
-        M[:3, :3] *= math.sqrt(np.einsum('ij,ij->',v1,v1) / 
-                                np.einsum('ij,ij->',v0,v0))
-
+        M[:3, :3] *= math.sqrt(
+            np.einsum("ij,ij->", v1, v1) / np.einsum("ij,ij->", v0, v0)
+        )
 
     # translation
     M[:3, 3] = t1
@@ -1026,7 +1057,7 @@ def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
     return M
 
 
-def euler_matrix(ai, aj, ak, axes='sxyz'):
+def euler_matrix(ai, aj, ak, axes="sxyz"):
     """Return homogeneous rotation matrix from Euler angles and axis sequence.
 
     ai, aj, ak : Euler's roll, pitch and yaw angles
@@ -1093,7 +1124,7 @@ def euler_matrix(ai, aj, ak, axes='sxyz'):
     return M
 
 
-def euler_from_matrix(matrix, axes='sxyz'):
+def euler_from_matrix(matrix, axes="sxyz"):
     """Return Euler angles from rotation matrix for specified axis sequence.
 
     axes : One of 24 axis sequences as string or encoded tuple
@@ -1155,7 +1186,7 @@ def euler_from_matrix(matrix, axes='sxyz'):
     return ax, ay, az
 
 
-def euler_from_quaternion(quaternion, axes='sxyz'):
+def euler_from_quaternion(quaternion, axes="sxyz"):
     """Return Euler angles from quaternion for specified axis sequence.
 
     >>> from MDAnalysis.lib.transformations import euler_from_quaternion
@@ -1168,7 +1199,7 @@ def euler_from_quaternion(quaternion, axes='sxyz'):
     return euler_from_matrix(quaternion_matrix(quaternion), axes)
 
 
-def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
+def quaternion_from_euler(ai, aj, ak, axes="sxyz"):
     """Return quaternion from Euler angles and axis sequence.
 
     ai, aj, ak : Euler's roll, pitch and yaw angles
@@ -1209,7 +1240,7 @@ def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
     sc = si * ck
     ss = si * sk
 
-    quaternion = np.empty((4, ), dtype=np.float64)
+    quaternion = np.empty((4,), dtype=np.float64)
     if repetition:
         quaternion[0] = cj * (cc - ss)
         quaternion[i] = cj * (cs + sc)
@@ -1236,7 +1267,7 @@ def quaternion_about_axis(angle, axis):
     True
 
     """
-    quaternion = np.zeros((4, ), dtype=np.float64)
+    quaternion = np.zeros((4,), dtype=np.float64)
     quaternion[1] = axis[0]
     quaternion[2] = axis[1]
     quaternion[3] = axis[2]
@@ -1272,11 +1303,28 @@ def quaternion_matrix(quaternion):
     q = np.outer(q, q)
     return np.array(
         (
-            (1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0),
-            (q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0),
-            (q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0),
-            (0.0, 0.0, 0.0, 1.0)
-        ), dtype=np.float64)
+            (
+                1.0 - q[2, 2] - q[3, 3],
+                q[1, 2] - q[3, 0],
+                q[1, 3] + q[2, 0],
+                0.0,
+            ),
+            (
+                q[1, 2] + q[3, 0],
+                1.0 - q[1, 1] - q[3, 3],
+                q[2, 3] - q[1, 0],
+                0.0,
+            ),
+            (
+                q[1, 3] - q[2, 0],
+                q[2, 3] + q[1, 0],
+                1.0 - q[1, 1] - q[2, 2],
+                0.0,
+            ),
+            (0.0, 0.0, 0.0, 1.0),
+        ),
+        dtype=np.float64,
+    )
 
 
 def quaternion_from_matrix(matrix, isprecise=False):
@@ -1317,7 +1365,7 @@ def quaternion_from_matrix(matrix, isprecise=False):
     """
     M = np.array(matrix, dtype=np.float64, copy=no_copy_shim)[:4, :4]
     if isprecise:
-        q = np.empty((4, ), dtype=np.float64)
+        q = np.empty((4,), dtype=np.float64)
         t = np.trace(M)
         if t > M[3, 3]:
             q[0] = t
@@ -1347,11 +1395,14 @@ def quaternion_from_matrix(matrix, isprecise=False):
         m21 = M[2, 1]
         m22 = M[2, 2]
         # symmetric matrix K
-        K = np.array((
-            (m00 - m11 - m22, 0.0, 0.0, 0.0),
-            (m01 + m10, m11 - m00 - m22, 0.0, 0.0),
-            (m02 + m20, m12 + m21, m22 - m00 - m11, 0.0),
-            (m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22)))
+        K = np.array(
+            (
+                (m00 - m11 - m22, 0.0, 0.0, 0.0),
+                (m01 + m10, m11 - m00 - m22, 0.0, 0.0),
+                (m02 + m20, m12 + m21, m22 - m00 - m11, 0.0),
+                (m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22),
+            )
+        )
         K /= 3.0
         # quaternion is eigenvector of K that corresponds to largest eigenvalue
         l, V = np.linalg.eigh(K)
@@ -1379,7 +1430,10 @@ def quaternion_multiply(quaternion1, quaternion0):
             -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
             x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
             -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
-            x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0), dtype=np.float64)
+            x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0,
+        ),
+        dtype=np.float64,
+    )
 
 
 def quaternion_conjugate(quaternion):
@@ -1395,9 +1449,9 @@ def quaternion_conjugate(quaternion):
 
     """
     return np.array(
-        (
-            quaternion[0], -quaternion[1],
-            -quaternion[2], -quaternion[3]), dtype=np.float64)
+        (quaternion[0], -quaternion[1], -quaternion[2], -quaternion[3]),
+        dtype=np.float64,
+    )
 
 
 def quaternion_inverse(quaternion):
@@ -1514,7 +1568,10 @@ def random_quaternion(rand=None):
             np.cos(t2) * r2,
             np.sin(t1) * r1,
             np.cos(t1) * r1,
-            np.sin(t2) * r2), dtype=np.float64)
+            np.sin(t2) * r2,
+        ),
+        dtype=np.float64,
+    )
 
 
 def random_rotation_matrix(rand=None):
@@ -1579,7 +1636,7 @@ def __init__(self, initial=None):
             initial = np.array(initial, dtype=np.float64)
             if initial.shape == (4, 4):
                 self._qdown = quaternion_from_matrix(initial)
-            elif initial.shape == (4, ):
+            elif initial.shape == (4,):
                 initial /= vector_norm(initial)
                 self._qdown = initial
             else:
@@ -1658,8 +1715,9 @@ def arcball_map_to_sphere(point, center, radius):
         (
             (point[0] - center[0]) / radius,
             (center[1] - point[1]) / radius,
-            0.0
-        ), dtype=np.float64
+            0.0,
+        ),
+        dtype=np.float64,
     )
     n = v[0] * v[0] + v[1] * v[1]
     if n > 1.0:
@@ -1705,15 +1763,18 @@ def arcball_nearest_axis(point, axes):
 _NEXT_AXIS = [1, 2, 0, 1]
 
 # map axes strings to/from tuples of inner axis, parity, repetition, frame
+# fmt: off
 _AXES2TUPLE = {
-    'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
-    'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
-    'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
-    'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
-    'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
-    'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
-    'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
-    'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
+    "sxyz": (0, 0, 0, 0), "sxyx": (0, 0, 1, 0), "sxzy": (0, 1, 0, 0),
+    "sxzx": (0, 1, 1, 0), "syzx": (1, 0, 0, 0), "syzy": (1, 0, 1, 0),
+    "syxz": (1, 1, 0, 0), "syxy": (1, 1, 1, 0), "szxy": (2, 0, 0, 0),
+    "szxz": (2, 0, 1, 0), "szyx": (2, 1, 0, 0), "szyz": (2, 1, 1, 0),
+    "rzyx": (0, 0, 0, 1), "rxyx": (0, 0, 1, 1), "ryzx": (0, 1, 0, 1),
+    "rxzx": (0, 1, 1, 1), "rxzy": (1, 0, 0, 1), "ryzy": (1, 0, 1, 1),
+    "rzxy": (1, 1, 0, 1), "ryxy": (1, 1, 1, 1), "ryxz": (2, 0, 0, 1),
+    "rzxz": (2, 0, 1, 1), "rxyz": (2, 1, 0, 1), "rzyz": (2, 1, 1, 1),
+}
+# fmt: on
 
 _TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
 
@@ -1878,7 +1939,7 @@ def is_same_transform(matrix0, matrix1):
     return np.allclose(matrix0, matrix1)
 
 
-def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
+def _import_module(module_name, warn=True, prefix="_py_", ignore="_"):
     """Try import all public attributes from module into global namespace.
 
     Existing attributes with name clashes are renamed with prefix.
@@ -1910,6 +1971,7 @@ def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
 
 # orbeckst --- some simple geometry
 
+
 def rotaxis(a, b):
     """Return the rotation axis to rotate vector a into b.
 
@@ -1935,7 +1997,7 @@ def rotaxis(a, b):
     return c / np.linalg.norm(c)
 
 
-_import_module('_transformations')
+_import_module("_transformations")
 
 # Documentation in HTML format can be generated with Epydoc
 __docformat__ = "restructuredtext en"
diff --git a/package/MDAnalysis/lib/util.py b/package/MDAnalysis/lib/util.py
index 7f576af0ad..d227763d9e 100644
--- a/package/MDAnalysis/lib/util.py
+++ b/package/MDAnalysis/lib/util.py
@@ -198,38 +198,38 @@
 __docformat__ = "restructuredtext en"
 
 
-import os
-import os.path
-import errno
-from contextlib import contextmanager
 import bz2
+import errno
+import functools
 import gzip
-import re
+import importlib
+import inspect
 import io
-import warnings
-import functools
-from functools import wraps
+import itertools
+import os
+import os.path
+import re
 import textwrap
+import warnings
 import weakref
-import importlib
-import itertools
+from contextlib import contextmanager
+from functools import wraps
 
 import mmtf
 import numpy as np
-
 from numpy.testing import assert_equal
-import inspect
-
-from .picklable_file_io import pickle_open, bz2_pickle_open, gzip_pickle_open
 
-from ..exceptions import StreamWarning, DuplicateWarning
+from ..exceptions import DuplicateWarning, StreamWarning
+from .picklable_file_io import bz2_pickle_open, gzip_pickle_open, pickle_open
 
 try:
     from ._cutil import unique_int_1d
 except ImportError:
-    raise ImportError("MDAnalysis not installed properly. "
-                      "This can happen if your C extensions "
-                      "have not been built.")
+    raise ImportError(
+        "MDAnalysis not installed properly. "
+        "This can happen if your C extensions "
+        "have not been built."
+    )
 
 
 def int_array_is_sorted(array):
@@ -280,7 +280,7 @@ def filename(name, ext=None, keep=False):
 
 
 @contextmanager
-def openany(datasource, mode='rt', reset=True):
+def openany(datasource, mode="rt", reset=True):
     """Context manager for :func:`anyopen`.
 
     Open the `datasource` and close it when the context of the :keyword:`with`
@@ -330,7 +330,7 @@ def openany(datasource, mode='rt', reset=True):
         stream.close()
 
 
-def anyopen(datasource, mode='rt', reset=True):
+def anyopen(datasource, mode="rt", reset=True):
     """Open datasource (gzipped, bzipped, uncompressed) and return a stream.
 
     `datasource` can be a filename or a stream (see :func:`isstream`). By
@@ -374,14 +374,14 @@ def anyopen(datasource, mode='rt', reset=True):
        :class:`MDAnalysis.lib.picklable_file_io`.
 
     """
-    read_handlers = {'bz2': bz2_pickle_open,
-                     'gz': gzip_pickle_open,
-                     '': pickle_open}
-    write_handlers = {'bz2': bz2.open,
-                      'gz': gzip.open,
-                      '': open}
-
-    if mode.startswith('r'):
+    read_handlers = {
+        "bz2": bz2_pickle_open,
+        "gz": gzip_pickle_open,
+        "": pickle_open,
+    }
+    write_handlers = {"bz2": bz2.open, "gz": gzip.open, "": open}
+
+    if mode.startswith("r"):
         if isstream(datasource):
             stream = datasource
             try:
@@ -395,20 +395,30 @@ def anyopen(datasource, mode='rt', reset=True):
                     try:
                         stream.seek(0)
                     except (AttributeError, IOError):
-                        warnings.warn("Stream {0}: not guaranteed to be at the beginning."
-                                      "".format(filename),
-                                      category=StreamWarning)
+                        warnings.warn(
+                            "Stream {0}: not guaranteed to be at the beginning."
+                            "".format(filename),
+                            category=StreamWarning,
+                        )
         else:
             stream = None
             filename = datasource
-            for ext in ('bz2', 'gz', ''):  # file == '' should be last
+            for ext in ("bz2", "gz", ""):  # file == '' should be last
                 openfunc = read_handlers[ext]
                 stream = _get_stream(datasource, openfunc, mode=mode)
                 if stream is not None:
                     break
             if stream is None:
-                raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename))
-    elif mode.startswith('w') or mode.startswith('a'):  # append 'a' not tested...
+                raise IOError(
+                    errno.EIO,
+                    "Cannot open file or stream in mode={mode!r}.".format(
+                        **vars()
+                    ),
+                    repr(filename),
+                )
+    elif mode.startswith("w") or mode.startswith(
+        "a"
+    ):  # append 'a' not tested...
         if isstream(datasource):
             stream = datasource
             try:
@@ -419,16 +429,26 @@ def anyopen(datasource, mode='rt', reset=True):
             stream = None
             filename = datasource
             name, ext = os.path.splitext(filename)
-            if ext.startswith('.'):
+            if ext.startswith("."):
                 ext = ext[1:]
-            if not ext in ('bz2', 'gz'):
-                ext = ''  # anything else but bz2 or gz is just a normal file
+            if not ext in ("bz2", "gz"):
+                ext = ""  # anything else but bz2 or gz is just a normal file
             openfunc = write_handlers[ext]
             stream = openfunc(datasource, mode=mode)
             if stream is None:
-                raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename))
+                raise IOError(
+                    errno.EIO,
+                    "Cannot open file or stream in mode={mode!r}.".format(
+                        **vars()
+                    ),
+                    repr(filename),
+                )
     else:
-        raise NotImplementedError("Sorry, mode={mode!r} is not implemented for {datasource!r}".format(**vars()))
+        raise NotImplementedError(
+            "Sorry, mode={mode!r} is not implemented for {datasource!r}".format(
+                **vars()
+            )
+        )
     try:
         stream.name = filename
     except (AttributeError, TypeError):
@@ -436,7 +456,7 @@ def anyopen(datasource, mode='rt', reset=True):
     return stream
 
 
-def _get_stream(filename, openfunction=open, mode='r'):
+def _get_stream(filename, openfunction=open, mode="r"):
     """Return open stream if *filename* can be opened with *openfunction* or else ``None``."""
     try:
         stream = openfunction(filename, mode=mode)
@@ -444,10 +464,10 @@ def _get_stream(filename, openfunction=open, mode='r'):
         # An exception might be raised due to two reasons, first the openfunction is unable to open the file, in this
         # case we have to ignore the error and return None. Second is when openfunction can't open the file because
         # either the file isn't there or the permissions don't allow access.
-        if errno.errorcode[err.errno] in ['ENOENT', 'EACCES']:
+        if errno.errorcode[err.errno] in ["ENOENT", "EACCES"]:
             raise sys.exc_info()[1] from err
         return None
-    if mode.startswith('r'):
+    if mode.startswith("r"):
         # additional check for reading (eg can we uncompress) --- is this needed?
         try:
             stream.readline()
@@ -490,7 +510,7 @@ def greedy_splitext(p):
 
     """
     path, root = os.path.split(p)
-    extension = ''
+    extension = ""
     while True:
         root, ext = os.path.splitext(root)
         extension = ext + extension
@@ -535,7 +555,8 @@ def isstream(obj):
     signature_methods = ("close",)
     alternative_methods = (
         ("read", "readline", "readlines"),
-        ("write", "writeline", "writelines"))
+        ("write", "writeline", "writelines"),
+    )
 
     # Must have ALL the signature methods
     for m in signature_methods:
@@ -544,7 +565,8 @@ def isstream(obj):
     # Must have at least one complete set of alternative_methods
     alternative_results = [
         np.all([hasmethod(obj, m) for m in alternatives])
-        for alternatives in alternative_methods]
+        for alternatives in alternative_methods
+    ]
     return np.any(alternative_results)
 
 
@@ -569,9 +591,11 @@ def which(program):
        Please use shutil.which instead.
     """
     # Can't use decorator because it's declared after this method
-    wmsg = ("This method is deprecated as of MDAnalysis version 2.7.0 "
-            "and will be removed in version 3.0.0. Please use shutil.which "
-            "instead.")
+    wmsg = (
+        "This method is deprecated as of MDAnalysis version 2.7.0 "
+        "and will be removed in version 3.0.0. Please use shutil.which "
+        "instead."
+    )
     warnings.warn(wmsg, DeprecationWarning)
 
     def is_exe(fpath):
@@ -681,8 +705,9 @@ def __init__(self, stream, filename, reset=True, close=False):
         # on __del__ and super on python 3. Let's warn the user and ensure the
         # class works normally.
         if isinstance(stream, NamedStream):
-            warnings.warn("Constructed NamedStream from a NamedStream",
-                          RuntimeWarning)
+            warnings.warn(
+                "Constructed NamedStream from a NamedStream", RuntimeWarning
+            )
             stream = stream.stream
         self.stream = stream
         self.name = filename
@@ -699,9 +724,11 @@ def reset(self):
             try:
                 self.stream.seek(0)  # typical file objects
             except (AttributeError, IOError):
-                warnings.warn("NamedStream {0}: not guaranteed to be at the beginning."
-                              "".format(self.name),
-                              category=StreamWarning)
+                warnings.warn(
+                    "NamedStream {0}: not guaranteed to be at the beginning."
+                    "".format(self.name),
+                    category=StreamWarning,
+                )
 
     # access the stream
     def __getattr__(self, x):
@@ -724,9 +751,9 @@ def __exit__(self, *args):
         # NOTE: By default (close=False) we only reset the stream and NOT close it; this makes
         #       it easier to use it as a drop-in replacement for a filename that might
         #       be opened repeatedly (at least in MDAnalysis)
-        #try:
+        # try:
         #    return self.stream.__exit__(*args)
-        #except AttributeError:
+        # except AttributeError:
         #    super(NamedStream, self).__exit__(*args)
         self.close()
 
@@ -932,7 +959,9 @@ def realpath(*args):
     """
     if None in args:
         return None
-    return os.path.realpath(os.path.expanduser(os.path.expandvars(os.path.join(*args))))
+    return os.path.realpath(
+        os.path.expanduser(os.path.expandvars(os.path.join(*args)))
+    )
 
 
 def get_ext(filename):
@@ -1008,9 +1037,11 @@ def format_from_filename_extension(filename):
     try:
         root, ext = get_ext(filename)
     except Exception:
-        errmsg = (f"Cannot determine file format for file '{filename}'.\n"
-                  f"           You can set the format explicitly with "
-                  f"'Universe(..., format=FORMAT)'.")
+        errmsg = (
+            f"Cannot determine file format for file '{filename}'.\n"
+            f"           You can set the format explicitly with "
+            f"'Universe(..., format=FORMAT)'."
+        )
         raise TypeError(errmsg) from None
     format = check_compressed_format(root, ext)
     return format
@@ -1049,16 +1080,21 @@ def guess_format(filename):
             format = format_from_filename_extension(filename.name)
         except AttributeError:
             # format is None so we need to complain:
-            errmsg = (f"guess_format requires an explicit format specifier "
-                      f"for stream {filename}")
+            errmsg = (
+                f"guess_format requires an explicit format specifier "
+                f"for stream {filename}"
+            )
             raise ValueError(errmsg) from None
     else:
         # iterator, list, filename: simple extension checking... something more
         # complicated is left for the ambitious.
         # Note: at the moment the upper-case extension *is* the format specifier
         # and list of filenames is handled by ChainReader
-        format = (format_from_filename_extension(filename)
-                  if not iterable(filename) else 'CHAIN')
+        format = (
+            format_from_filename_extension(filename)
+            if not iterable(filename)
+            else "CHAIN"
+        )
 
     return format.upper()
 
@@ -1069,7 +1105,7 @@ def iterable(obj):
     if isinstance(obj, (str, NamedStream)):
         return False  # avoid iterating over characters of a string
 
-    if hasattr(obj, 'next'):
+    if hasattr(obj, "next"):
         return True  # any iterator will do
     try:
         len(obj)  # anything else that might work
@@ -1098,8 +1134,10 @@ def asiterable(obj):
 #: ``(?P<repeat>\d?)(?P<format>[IFELAX])(?P<numfmt>(?P<length>\d+)(\.(?P<decimals>\d+))?)?``
 #:
 #: .. _FORTRAN edit descriptor: http://www.cs.mtu.edu/~shene/COURSES/cs201/NOTES/chap05/format.html
-FORTRAN_format_regex = (r"(?P<repeat>\d+?)(?P<format>[IFEAX])"
-                        r"(?P<numfmt>(?P<length>\d+)(\.(?P<decimals>\d+))?)?")
+FORTRAN_format_regex = (
+    r"(?P<repeat>\d+?)(?P<format>[IFEAX])"
+    r"(?P<numfmt>(?P<length>\d+)(\.(?P<decimals>\d+))?)?"
+)
 _FORTRAN_format_pattern = re.compile(FORTRAN_format_regex)
 
 
@@ -1114,7 +1152,8 @@ class FixedcolumnEntry(object):
     Reads from line[start:stop] and converts according to
     typespecifier.
     """
-    convertors = {'I': int, 'F': float, 'E': float, 'A': strip}
+
+    convertors = {"I": int, "F": float, "E": float, "A": strip}
 
     def __init__(self, start, stop, typespecifier):
         """
@@ -1138,10 +1177,12 @@ def __init__(self, start, stop, typespecifier):
     def read(self, line):
         """Read the entry from `line` and convert to appropriate type."""
         try:
-            return self.convertor(line[self.start:self.stop])
+            return self.convertor(line[self.start : self.stop])
         except ValueError:
-            errmsg = (f"{self}: Failed to read&convert "
-                      f"{line[self.start:self.stop]}")
+            errmsg = (
+                f"{self}: Failed to read&convert "
+                f"{line[self.start:self.stop]}"
+            )
             raise ValueError(errmsg) from None
 
     def __len__(self):
@@ -1149,7 +1190,9 @@ def __len__(self):
         return self.stop - self.start
 
     def __repr__(self):
-        return "FixedcolumnEntry({0:d},{1:d},{2!r})".format(self.start, self.stop, self.typespecifier)
+        return "FixedcolumnEntry({0:d},{1:d},{2!r})".format(
+            self.start, self.stop, self.typespecifier
+        )
 
 
 class FORTRANReader(object):
@@ -1189,18 +1232,22 @@ def __init__(self, fmt):
                serial,TotRes,resName,name,x,y,z,chainID,resSeq,tempFactor = atomformat.read(line)
 
         """
-        self.fmt = fmt.split(',')
-        descriptors = [self.parse_FORTRAN_format(descriptor) for descriptor in self.fmt]
+        self.fmt = fmt.split(",")
+        descriptors = [
+            self.parse_FORTRAN_format(descriptor) for descriptor in self.fmt
+        ]
         start = 0
         self.entries = []
         for d in descriptors:
-            if d['format'] != 'X':
-                for x in range(d['repeat']):
-                    stop = start + d['length']
-                    self.entries.append(FixedcolumnEntry(start, stop, d['format']))
+            if d["format"] != "X":
+                for x in range(d["repeat"]):
+                    stop = start + d["length"]
+                    self.entries.append(
+                        FixedcolumnEntry(start, stop, d["format"])
+                    )
                     start = stop
             else:
-                start += d['totallength']
+                start += d["totallength"]
 
     def read(self, line):
         """Parse `line` according to the format string and return list of values.
@@ -1268,24 +1315,28 @@ def parse_FORTRAN_format(self, edit_descriptor):
         m = _FORTRAN_format_pattern.match(edit_descriptor.upper())
         if m is None:
             try:
-                m = _FORTRAN_format_pattern.match("1" + edit_descriptor.upper())
+                m = _FORTRAN_format_pattern.match(
+                    "1" + edit_descriptor.upper()
+                )
                 if m is None:
                     raise ValueError  # really no idea what the descriptor is supposed to mean
             except:
-                raise ValueError("unrecognized FORTRAN format {0!r}".format(edit_descriptor))
+                raise ValueError(
+                    "unrecognized FORTRAN format {0!r}".format(edit_descriptor)
+                )
         d = m.groupdict()
-        if d['repeat'] == '':
-            d['repeat'] = 1
-        if d['format'] == 'X':
-            d['length'] = 1
-        for k in ('repeat', 'length', 'decimals'):
+        if d["repeat"] == "":
+            d["repeat"] = 1
+        if d["format"] == "X":
+            d["length"] = 1
+        for k in ("repeat", "length", "decimals"):
             try:
                 d[k] = int(d[k])
             except ValueError:  # catches ''
                 d[k] = 0
             except TypeError:  # keep None
                 pass
-        d['totallength'] = d['repeat'] * d['length']
+        d["totallength"] = d["repeat"] * d["length"]
         return d
 
     def __len__(self):
@@ -1331,14 +1382,14 @@ def fixedwidth_bins(delta, xmin, xmax):
 
     """
     if not np.all(xmin < xmax):
-        raise ValueError('Boundaries are not sane: should be xmin < xmax.')
+        raise ValueError("Boundaries are not sane: should be xmin < xmax.")
     _delta = np.asarray(delta, dtype=np.float64)
     _xmin = np.asarray(xmin, dtype=np.float64)
     _xmax = np.asarray(xmax, dtype=np.float64)
     _length = _xmax - _xmin
     N = np.ceil(_length / _delta).astype(np.int_)  # number of bins
     dx = 0.5 * (N * _delta - _length)  # add half of the excess to each end
-    return {'Nbins': N, 'delta': _delta, 'min': _xmin - dx, 'max': _xmax + dx}
+    return {"Nbins": N, "delta": _delta, "min": _xmin - dx, "max": _xmax + dx}
 
 
 def get_weights(atoms, weights):
@@ -1382,16 +1433,20 @@ def get_weights(atoms, weights):
 
     if iterable(weights):
         if len(np.asarray(weights, dtype=object).shape) != 1:
-            raise ValueError("weights must be a 1D array, not with shape "
-                            "{0}".format(np.asarray(weights,
-                             dtype=object).shape))
+            raise ValueError(
+                "weights must be a 1D array, not with shape "
+                "{0}".format(np.asarray(weights, dtype=object).shape)
+            )
         elif len(weights) != len(atoms):
-            raise ValueError("weights (length {0}) must be of same length as "
-                             "the atoms ({1})".format(
-                                 len(weights), len(atoms)))
+            raise ValueError(
+                "weights (length {0}) must be of same length as "
+                "the atoms ({1})".format(len(weights), len(atoms))
+            )
     elif weights is not None:
-        raise ValueError("weights must be {'mass', None} or an iterable of the "
-                         "same size as the atomgroup.")
+        raise ValueError(
+            "weights must be {'mass', None} or an iterable of the "
+            "same size as the atomgroup."
+        )
 
     return weights
 
@@ -1402,25 +1457,45 @@ def get_weights(atoms, weights):
 #: translation table for 3-letter codes --> 1-letter codes
 #: .. SeeAlso:: :data:`alternative_inverse_aa_codes`
 canonical_inverse_aa_codes = {
-    'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E',
-    'PHE': 'F', 'GLY': 'G', 'HIS': 'H', 'ILE': 'I',
-    'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N',
-    'PRO': 'P', 'GLN': 'Q', 'ARG': 'R', 'SER': 'S',
-    'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y'}
+    "ALA": "A",
+    "CYS": "C",
+    "ASP": "D",
+    "GLU": "E",
+    "PHE": "F",
+    "GLY": "G",
+    "HIS": "H",
+    "ILE": "I",
+    "LYS": "K",
+    "LEU": "L",
+    "MET": "M",
+    "ASN": "N",
+    "PRO": "P",
+    "GLN": "Q",
+    "ARG": "R",
+    "SER": "S",
+    "THR": "T",
+    "VAL": "V",
+    "TRP": "W",
+    "TYR": "Y",
+}
 #: translation table for 1-letter codes --> *canonical* 3-letter codes.
 #: The table is used for :func:`convert_aa_code`.
-amino_acid_codes = {one: three for three,
-                    one in canonical_inverse_aa_codes.items()}
+amino_acid_codes = {
+    one: three for three, one in canonical_inverse_aa_codes.items()
+}
 #: non-default charge state amino acids or special charge state descriptions
 #: (Not fully synchronized with :class:`MDAnalysis.core.selection.ProteinSelection`.)
+# fmt: off
 alternative_inverse_aa_codes = {
-    'HISA': 'H', 'HISB': 'H', 'HSE': 'H', 'HSD': 'H', 'HID': 'H', 'HIE': 'H', 'HIS1': 'H',
-    'HIS2': 'H',
-    'ASPH': 'D', 'ASH': 'D',
-    'GLUH': 'E', 'GLH': 'E',
-    'LYSH': 'K', 'LYN': 'K',
-    'ARGN': 'R',
-    'CYSH': 'C', 'CYS1': 'C', 'CYS2': 'C'}
+    "HISA": "H", "HISB": "H", "HSE": "H", "HSD": "H", "HID": "H", "HIE": "H",
+    "HIS1": "H", "HIS2": "H",
+    "ASPH": "D", "ASH": "D",
+    "GLUH": "E", "GLH": "E",
+    "LYSH": "K", "LYN": "K",
+    "ARGN": "R",
+    "CYSH": "C", "CYS1": "C", "CYS2": "C",
+}
+# fmt: on
 #: lookup table from 3/4 letter resnames to 1-letter codes. Note that non-standard residue names
 #: for tautomers or different protonation states such as HSE are converted to canonical 1-letter codes ("H").
 #: The table is used for :func:`convert_aa_code`.
@@ -1460,14 +1535,17 @@ def convert_aa_code(x):
     try:
         return d[x.upper()]
     except KeyError:
-        errmsg = (f"No conversion for {x} found (1 letter -> 3 letter or 3/4 "
-                  f"letter -> 1 letter)")
+        errmsg = (
+            f"No conversion for {x} found (1 letter -> 3 letter or 3/4 "
+            f"letter -> 1 letter)"
+        )
         raise ValueError(errmsg) from None
 
 
 #: Regular expression to match and parse a residue-atom selection; will match
 #: "LYS300:HZ1" or "K300:HZ1" or "K300" or "4GB300:H6O" or "4GB300" or "YaA300".
-RESIDUE = re.compile(r"""
+RESIDUE = re.compile(
+    r"""
                  (?P<aa>([ACDEFGHIKLMNPQRSTVWY])   # 1-letter amino acid
                         |                          #   or
                         ([0-9A-Z][a-zA-Z][A-Z][A-Z]?)    # 3-letter or 4-letter residue name
@@ -1479,7 +1557,9 @@ def convert_aa_code(x):
                    \s*
                    (?P<atom>\w+)                   # atom name
                  )?                                # possibly one
-            """, re.VERBOSE | re.IGNORECASE)
+            """,
+    re.VERBOSE | re.IGNORECASE,
+)
 
 
 # from GromacsWrapper cbook.IndexBuilder
@@ -1514,14 +1594,18 @@ def parse_residue(residue):
     # XXX: use _translate_residue() ....
     m = RESIDUE.match(residue)
     if not m:
-        raise ValueError("Selection {residue!r} is not valid (only 1/3/4 letter resnames, resid required).".format(**vars()))
-    resid = int(m.group('resid'))
-    residue = m.group('aa')
+        raise ValueError(
+            "Selection {residue!r} is not valid (only 1/3/4 letter resnames, resid required).".format(
+                **vars()
+            )
+        )
+    resid = int(m.group("resid"))
+    residue = m.group("aa")
     if len(residue) == 1:
         resname = convert_aa_code(residue)  # only works for AA
     else:
         resname = residue  # use 3-letter for any resname
-    atomname = m.group('atom')
+    atomname = m.group("atom")
     return (resname, resid, atomname)
 
 
@@ -1592,7 +1676,7 @@ def cached_lookup(func):
         def wrapper(self, *args, **kwargs):
             try:
                 if universe_validation:  # Universe-level cache validation
-                    u_cache = self.universe._cache.setdefault('_valid', dict())
+                    u_cache = self.universe._cache.setdefault("_valid", dict())
                     # A WeakSet is used so that keys from out-of-scope/deleted
                     # objects don't clutter it.
                     valid_caches = u_cache.setdefault(key, weakref.WeakSet())
@@ -1661,20 +1745,21 @@ def unique_rows(arr, return_index=False):
     # This seems to fail if arr.flags['OWNDATA'] is False
     # this can occur when second dimension was created through broadcasting
     # eg: idx = np.array([1, 2])[None, :]
-    if not arr.flags['OWNDATA']:
+    if not arr.flags["OWNDATA"]:
         arr = arr.copy()
 
     m = arr.shape[1]
 
     if return_index:
-        u, r_idx = np.unique(arr.view(dtype=np.dtype([(str(i), arr.dtype)
-                                                      for i in range(m)])),
-                             return_index=True)
+        u, r_idx = np.unique(
+            arr.view(dtype=np.dtype([(str(i), arr.dtype) for i in range(m)])),
+            return_index=True,
+        )
         return u.view(arr.dtype).reshape(-1, m), r_idx
     else:
-        u = np.unique(arr.view(
-            dtype=np.dtype([(str(i), arr.dtype) for i in range(m)])
-        ))
+        u = np.unique(
+            arr.view(dtype=np.dtype([(str(i), arr.dtype) for i in range(m)]))
+        )
         return u.view(arr.dtype).reshape(-1, m)
 
 
@@ -1733,20 +1818,25 @@ def blocks_of(a, n, m):
     # based on:
     # http://stackoverflow.com/a/10862636
     # but generalised to handle non square blocks.
-    if not a.flags['C_CONTIGUOUS']:
+    if not a.flags["C_CONTIGUOUS"]:
         raise ValueError("Input array is not C contiguous.")
 
     nblocks = a.shape[0] // n
     nblocks2 = a.shape[1] // m
 
     if not nblocks == nblocks2:
-        raise ValueError("Must divide into same number of blocks in both"
-                         " directions.  Got {} by {}"
-                         "".format(nblocks, nblocks2))
+        raise ValueError(
+            "Must divide into same number of blocks in both"
+            " directions.  Got {} by {}"
+            "".format(nblocks, nblocks2)
+        )
 
     new_shape = (nblocks, n, m)
-    new_strides = (n * a.strides[0] + m * a.strides[1],
-                   a.strides[0], a.strides[1])
+    new_strides = (
+        n * a.strides[0] + m * a.strides[1],
+        a.strides[0],
+        a.strides[1],
+    )
 
     return np.lib.stride_tricks.as_strided(a, new_shape, new_strides)
 
@@ -1769,11 +1859,11 @@ def group_same_or_consecutive_integers(arr):
     >>> group_same_or_consecutive_integers(arr)
     [array([2, 3, 4]), array([ 7,  8,  9, 10, 11]), array([15, 16])]
     """
-    return np.split(arr, np.where(np.ediff1d(arr)-1 > 0)[0] + 1)
+    return np.split(arr, np.where(np.ediff1d(arr) - 1 > 0)[0] + 1)
 
 
 class Namespace(dict):
-    """Class to allow storing attributes in new namespace. """
+    """Class to allow storing attributes in new namespace."""
 
     def __getattr__(self, key):
         # a.this causes a __getattr__ call for key = 'this'
@@ -1850,7 +1940,7 @@ def flatten_dict(d, parent_key=tuple()):
     items = []
     for k, v in d.items():
         if type(k) != tuple:
-            new_key = parent_key + (k, )
+            new_key = parent_key + (k,)
         else:
             new_key = parent_key + k
         if isinstance(v, dict):
@@ -1886,10 +1976,12 @@ def static_variables(**kwargs):
 
     .. versionadded:: 0.19.0
     """
+
     def static_decorator(func):
         for kwarg in kwargs:
             setattr(func, kwarg, kwargs[kwarg])
         return func
+
     return static_decorator
 
 
@@ -1906,6 +1998,7 @@ def static_decorator(func):
 # method. Of course, as it is generally the case with Python warnings, this is
 # *not threadsafe*.
 
+
 @static_variables(warned=False)
 def warn_if_not_unique(groupmethod):
     """Decorator triggering a :class:`~MDAnalysis.exceptions.DuplicateWarning`
@@ -1925,6 +2018,7 @@ def warn_if_not_unique(groupmethod):
 
     .. versionadded:: 0.19.0
     """
+
     @wraps(groupmethod)
     def wrapper(group, *args, **kwargs):
         # Proceed as usual if the calling group is unique or a DuplicateWarning
@@ -1933,7 +2027,8 @@ def wrapper(group, *args, **kwargs):
             return groupmethod(group, *args, **kwargs)
         # Otherwise, throw a DuplicateWarning and execute the method.
         method_name = ".".join(
-            (group.__class__.__name__, groupmethod.__name__))
+            (group.__class__.__name__, groupmethod.__name__)
+        )
         # Try to get the group's variable name(s):
         caller_locals = inspect.currentframe().f_back.f_locals.items()
         group_names = []
@@ -1950,8 +2045,10 @@ def wrapper(group, *args, **kwargs):
         else:
             group_name = " a.k.a. ".join(sorted(group_names))
         group_repr = repr(group)
-        msg = ("{}(): {} {} contains duplicates. Results might be biased!"
-               "".format(method_name, group_name, group_repr))
+        msg = (
+            "{}(): {} {} contains duplicates. Results might be biased!"
+            "".format(method_name, group_name, group_repr)
+        )
         warnings.warn(message=msg, category=DuplicateWarning, stacklevel=2)
         warn_if_not_unique.warned = True
         try:
@@ -1959,6 +2056,7 @@ def wrapper(group, *args, **kwargs):
         finally:
             warn_if_not_unique.warned = False
         return result
+
     return wrapper
 
 
@@ -2080,17 +2178,20 @@ def check_coords(*coord_names, **options):
        Can now accept an :class:`AtomGroup` as input, and added option
        allow_atomgroup with default False to retain old behaviour
     """
-    enforce_copy = options.get('enforce_copy', True)
-    enforce_dtype = options.get('enforce_dtype', True)
-    allow_single = options.get('allow_single', True)
-    convert_single = options.get('convert_single', True)
-    reduce_result_if_single = options.get('reduce_result_if_single', True)
-    check_lengths_match = options.get('check_lengths_match',
-                                      len(coord_names) > 1)
-    allow_atomgroup = options.get('allow_atomgroup', False)
+    enforce_copy = options.get("enforce_copy", True)
+    enforce_dtype = options.get("enforce_dtype", True)
+    allow_single = options.get("allow_single", True)
+    convert_single = options.get("convert_single", True)
+    reduce_result_if_single = options.get("reduce_result_if_single", True)
+    check_lengths_match = options.get(
+        "check_lengths_match", len(coord_names) > 1
+    )
+    allow_atomgroup = options.get("allow_atomgroup", False)
     if not coord_names:
-        raise ValueError("Decorator check_coords() cannot be used without "
-                         "positional arguments.")
+        raise ValueError(
+            "Decorator check_coords() cannot be used without "
+            "positional arguments."
+        )
 
     def check_coords_decorator(func):
         fname = func.__name__
@@ -2105,18 +2206,22 @@ def check_coords_decorator(func):
         # arguments:
         for name in coord_names:
             if name not in posargnames:
-                raise ValueError("In decorator check_coords(): Name '{}' "
-                                 "doesn't correspond to any positional "
-                                 "argument of the decorated function {}()."
-                                 "".format(name, func.__name__))
+                raise ValueError(
+                    "In decorator check_coords(): Name '{}' "
+                    "doesn't correspond to any positional "
+                    "argument of the decorated function {}()."
+                    "".format(name, func.__name__)
+                )
 
         def _check_coords(coords, argname):
             is_single = False
             if isinstance(coords, np.ndarray):
                 if allow_single:
                     if (coords.ndim not in (1, 2)) or (coords.shape[-1] != 3):
-                        errmsg = (f"{fname}(): {argname}.shape must be (3,) or "
-                                  f"(n, 3), got {coords.shape}")
+                        errmsg = (
+                            f"{fname}(): {argname}.shape must be (3,) or "
+                            f"(n, 3), got {coords.shape}"
+                        )
                         raise ValueError(errmsg)
                     if coords.ndim == 1:
                         is_single = True
@@ -2124,17 +2229,22 @@ def _check_coords(coords, argname):
                             coords = coords[None, :]
                 else:
                     if (coords.ndim != 2) or (coords.shape[1] != 3):
-                        errmsg = (f"{fname}(): {argname}.shape must be (n, 3) "
-                                  f"got {coords.shape}")
+                        errmsg = (
+                            f"{fname}(): {argname}.shape must be (n, 3) "
+                            f"got {coords.shape}"
+                        )
                         raise ValueError(errmsg)
                 if enforce_dtype:
                     try:
                         coords = coords.astype(
-                            np.float32, order='C', copy=enforce_copy)
+                            np.float32, order="C", copy=enforce_copy
+                        )
                     except ValueError:
-                        errmsg = (f"{fname}(): {argname}.dtype must be"
-                                  f"convertible to float32, got"
-                                  f" {coords.dtype}.")
+                        errmsg = (
+                            f"{fname}(): {argname}.dtype must be"
+                            f"convertible to float32, got"
+                            f" {coords.dtype}."
+                        )
                         raise TypeError(errmsg) from None
                 # coordinates should now be the right shape
                 ncoord = coords.shape[0]
@@ -2143,15 +2253,19 @@ def _check_coords(coords, argname):
                     coords = coords.positions  # homogenise to a numpy array
                     ncoord = coords.shape[0]
                     if not allow_atomgroup:
-                        err = TypeError("AtomGroup or other class with a"
-                                        "`.positions` method supplied as an"
-                                        "argument, but allow_atomgroup is"
-                                        " False")
+                        err = TypeError(
+                            "AtomGroup or other class with a"
+                            "`.positions` method supplied as an"
+                            "argument, but allow_atomgroup is"
+                            " False"
+                        )
                         raise err
                 except AttributeError:
-                    raise TypeError(f"{fname}(): Parameter '{argname}' must be"
-                                    f" a numpy.ndarray or an AtomGroup,"
-                                    f" got {type(coords)}.")
+                    raise TypeError(
+                        f"{fname}(): Parameter '{argname}' must be"
+                        f" a numpy.ndarray or an AtomGroup,"
+                        f" got {type(coords)}."
+                    )
 
             return coords, is_single, ncoord
 
@@ -2164,11 +2278,11 @@ def wrapper(*args, **kwargs):
                 if len(args) > nargs:
                     # too many arguments, invoke call:
                     return func(*args, **kwargs)
-                for name in posargnames[:len(args)]:
+                for name in posargnames[: len(args)]:
                     if name in kwargs:
                         # duplicate argument, invoke call:
                         return func(*args, **kwargs)
-                for name in posargnames[len(args):]:
+                for name in posargnames[len(args) :]:
                     if name not in kwargs:
                         # missing argument, invoke call:
                         return func(*args, **kwargs)
@@ -2184,33 +2298,38 @@ def wrapper(*args, **kwargs):
             for name in coord_names:
                 idx = posargnames.index(name)
                 if idx < len(args):
-                    args[idx], is_single, ncoord = _check_coords(args[idx],
-                                                                 name)
+                    args[idx], is_single, ncoord = _check_coords(
+                        args[idx], name
+                    )
                     all_single &= is_single
                     ncoords.append(ncoord)
                 else:
-                    kwargs[name], is_single, ncoord = _check_coords(kwargs[name],
-                                                                    name)
+                    kwargs[name], is_single, ncoord = _check_coords(
+                        kwargs[name], name
+                    )
                     all_single &= is_single
                     ncoords.append(ncoord)
             if check_lengths_match and ncoords:
                 if ncoords.count(ncoords[0]) != len(ncoords):
-                    raise ValueError("{}(): {} must contain the same number of "
-                                     "coordinates, got {}."
-                                     "".format(fname, ", ".join(coord_names),
-                                               ncoords))
+                    raise ValueError(
+                        "{}(): {} must contain the same number of "
+                        "coordinates, got {}."
+                        "".format(fname, ", ".join(coord_names), ncoords)
+                    )
             # If all input coordinate arrays were 1-d, so should be the output:
             if all_single and reduce_result_if_single:
                 return func(*args, **kwargs)[0]
             return func(*args, **kwargs)
+
         return wrapper
+
     return check_coords_decorator
 
 
 def check_atomgroup_not_empty(groupmethod):
     """Decorator triggering a ``ValueError`` if the underlying group is empty.
 
-    Avoids downstream errors in computing properties of empty atomgroups. 
+    Avoids downstream errors in computing properties of empty atomgroups.
 
     Raises
     ------
@@ -2221,6 +2340,7 @@ def check_atomgroup_not_empty(groupmethod):
 
     .. versionadded:: 2.4.0
     """
+
     @wraps(groupmethod)
     def wrapper(group, *args, **kwargs):
         # Throw error if the group is empty.
@@ -2230,6 +2350,7 @@ def wrapper(group, *args, **kwargs):
         else:
             result = groupmethod(group, *args, **kwargs)
         return result
+
     return wrapper
 
 
@@ -2241,6 +2362,7 @@ def wrapper(group, *args, **kwargs):
 # From numpy/lib/utils.py 1.14.5 (used under the BSD 3-clause licence,
 # https://www.numpy.org/license.html#license) and modified
 
+
 def _set_function_name(func, name):
     func.__name__ = name
     return func
@@ -2260,13 +2382,21 @@ class _Deprecate(object):
     .. versionadded:: 0.19.0
     """
 
-    def __init__(self, old_name=None, new_name=None,
-                 release=None, remove=None, message=None):
+    def __init__(
+        self,
+        old_name=None,
+        new_name=None,
+        release=None,
+        remove=None,
+        message=None,
+    ):
         self.old_name = old_name
         self.new_name = new_name
         if release is None:
-            raise ValueError("deprecate: provide release in which "
-                             "feature was deprecated.")
+            raise ValueError(
+                "deprecate: provide release in which "
+                "feature was deprecated."
+            )
         self.release = str(release)
         self.remove = str(remove) if remove is not None else remove
         self.message = message
@@ -2291,14 +2421,16 @@ def __call__(self, func, *args, **kwargs):
             depdoc = "`{0}` is deprecated!".format(old_name)
         else:
             depdoc = "`{0}` is deprecated, use `{1}` instead!".format(
-                old_name, new_name)
+                old_name, new_name
+            )
 
         warn_message = depdoc
 
         remove_text = ""
         if remove is not None:
             remove_text = "`{0}` will be removed in release {1}.".format(
-                old_name, remove)
+                old_name, remove
+            )
             warn_message += "\n" + remove_text
         if message is not None:
             warn_message += "\n" + message
@@ -2322,13 +2454,15 @@ def newfunc(*args, **kwds):
         except TypeError:
             doc = ""
 
-        deprecation_text = dedent_docstring("""\n\n
+        deprecation_text = dedent_docstring(
+            """\n\n
         .. deprecated:: {0}
            {1}
            {2}
-        """.format(release,
-                   message if message else depdoc,
-                   remove_text))
+        """.format(
+                release, message if message else depdoc, remove_text
+            )
+        )
 
         doc = "{0}\n\n{1}\n{2}\n".format(depdoc, doc, deprecation_text)
 
@@ -2435,6 +2569,8 @@ def func():
         return _Deprecate(*args, **kwargs)(fn)
     else:
         return _Deprecate(*args, **kwargs)
+
+
 #
 # ------------------------------------------------------------------
 
@@ -2515,13 +2651,16 @@ def check_box(box):
     if box is None:
         raise ValueError("Box is None")
     from .mdamath import triclinic_vectors  # avoid circular import
-    box = np.asarray(box, dtype=np.float32, order='C')
+
+    box = np.asarray(box, dtype=np.float32, order="C")
     if box.shape != (6,):
-        raise ValueError("Invalid box information. Must be of the form "
-                         "[lx, ly, lz, alpha, beta, gamma].")
-    if np.all(box[3:] == 90.):
-        return 'ortho', box[:3]
-    return 'tri_vecs', triclinic_vectors(box)
+        raise ValueError(
+            "Invalid box information. Must be of the form "
+            "[lx, ly, lz, alpha, beta, gamma]."
+        )
+    if np.all(box[3:] == 90.0):
+        return "ortho", box[:3]
+    return "tri_vecs", triclinic_vectors(box)
 
 
 def store_init_arguments(func):
@@ -2560,6 +2699,7 @@ def wrapper(self, *args, **kwargs):
                     else:
                         self._kwargs[key] = arg
         return func(self, *args, **kwargs)
+
     return wrapper
 
 
@@ -2592,12 +2732,12 @@ def atoi(s: str) -> int:
     34
     >>> atoi('foo')
     0
- 
+
 
     .. versionadded:: 2.8.0
     """
     try:
-        return int(''.join(itertools.takewhile(str.isdigit, s.strip())))
+        return int("".join(itertools.takewhile(str.isdigit, s.strip())))
     except ValueError:
         return 0
 
@@ -2609,8 +2749,8 @@ def is_installed(modulename: str):
     ----------
     modulename : str
         name of the module to be tested
-        
-     
+
+
     .. versionadded:: 2.8.0
     """
-    return importlib.util.find_spec(modulename) is not None      
+    return importlib.util.find_spec(modulename) is not None
diff --git a/package/pyproject.toml b/package/pyproject.toml
index 72a372ccef..05bce42486 100644
--- a/package/pyproject.toml
+++ b/package/pyproject.toml
@@ -132,7 +132,8 @@ tables\.py
 | due\.py
 | setup\.py
 | MDAnalysis/auxiliary/.*\.py
-| visualization/.*\.py
+| MDAnalysis/visualization/.*\.py
+| MDAnalysis/lib/.*\.py^
 | MDAnalysis/transformations/.*\.py
 )
 '''
diff --git a/testsuite/MDAnalysisTests/lib/test_augment.py b/testsuite/MDAnalysisTests/lib/test_augment.py
index bb9d5f54d4..455e890251 100644
--- a/testsuite/MDAnalysisTests/lib/test_augment.py
+++ b/testsuite/MDAnalysisTests/lib/test_augment.py
@@ -14,71 +14,105 @@
 # MDAnalysis: A Python package for the rapid analysis of molecular dynamics
 # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
 # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
-# doi: 10.25080/majora-629e541a-00e
+# doi: 10.25080/majora-629e541a-00e
 #
 # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
 # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
 # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
 
-import os
-import pytest
-import numpy as np
-from numpy.testing import assert_almost_equal, assert_equal
+import os
 
+import numpy as np
+import pytest
 from MDAnalysis.lib._augment import augment_coordinates, undo_augment
 from MDAnalysis.lib.distances import apply_PBC, transform_StoR
+from numpy.testing import assert_almost_equal, assert_equal
 
 # Find images for several query points,
 # here in fractional coordinates
 # Every element of qres tuple is (query, images)
 qres = (
-       ([0.1, 0.5, 0.5], [[1.1, 0.5, 0.5]]),     # box face
-       ([0.5, 0.5, 0.5], []),                    # box center
-       ([0.5, -0.1, 0.5], [[0.5, -0.1, 0.5]]),   # box face
-       ([0.1, 0.1, 0.5], [[1.1, 0.1, 0.5],
-                          [0.1, 1.1, 0.5],
-                          [1.1, 1.1, 0.5]]),     # box edge
-       ([0.5, -0.1, 1.1], [[0.5, -0.1, 0.1],
-                           [0.5, 0.9, 1.1],
-                           [0.5, -0.1, 1.1]]),   # box edge
-       ([0.1, 0.1, 0.1], [[1.1, 0.1, 0.1],
-                          [0.1, 1.1, 0.1],
-                          [0.1, 0.1, 1.1],
-                          [0.1, 1.1, 1.1],
-                          [1.1, 1.1, 0.1],
-                          [1.1, 0.1, 1.1],
-                          [1.1, 1.1, 1.1]]),     # box vertex
-       ([0.1, -0.1, 1.1], [[1.1, 0.9, 0.1],
-                           [0.1, -0.1, 0.1],
-                           [0.1, 0.9, 1.1],
-                           [0.1, -0.1, 1.1],
-                           [1.1, -0.1, 0.1],
-                           [1.1, 0.9, 1.1],
-                           [1.1, -0.1, 1.1]]),   # box vertex
-       ([2.1, -3.1, 0.1], [[1.1, 0.9, 0.1],
-                           [0.1, -0.1, 0.1],
-                           [0.1, 0.9, 1.1],
-                           [0.1, -0.1, 1.1],
-                           [1.1, -0.1, 0.1],
-                           [1.1, 0.9, 1.1],
-                           [1.1, -0.1, 1.1]]),   # box vertex
-       ([[0.1, 0.5, 0.5],
-         [0.5, -0.1, 0.5]], [[1.1, 0.5, 0.5],
-                             [0.5, -0.1, 0.5]])  # multiple queries
-       )
+    ([0.1, 0.5, 0.5], [[1.1, 0.5, 0.5]]),  # box face
+    ([0.5, 0.5, 0.5], []),  # box center
+    ([0.5, -0.1, 0.5], [[0.5, -0.1, 0.5]]),  # box face
+    (
+        [0.1, 0.1, 0.5],
+        [
+            [1.1, 0.1, 0.5],
+            [0.1, 1.1, 0.5],
+            [1.1, 1.1, 0.5],
+        ],
+    ),  # box edge
+    (
+        [0.5, -0.1, 1.1],
+        [
+            [0.5, -0.1, 0.1],
+            [0.5, 0.9, 1.1],
+            [0.5, -0.1, 1.1],
+        ],
+    ),  # box edge
+    (
+        [0.1, 0.1, 0.1],
+        [
+            [1.1, 0.1, 0.1],
+            [0.1, 1.1, 0.1],
+            [0.1, 0.1, 1.1],
+            [0.1, 1.1, 1.1],
+            [1.1, 1.1, 0.1],
+            [1.1, 0.1, 1.1],
+            [1.1, 1.1, 1.1],
+        ],
+    ),  # box vertex
+    (
+        [0.1, -0.1, 1.1],
+        [
+            [1.1, 0.9, 0.1],
+            [0.1, -0.1, 0.1],
+            [0.1, 0.9, 1.1],
+            [0.1, -0.1, 1.1],
+            [1.1, -0.1, 0.1],
+            [1.1, 0.9, 1.1],
+            [1.1, -0.1, 1.1],
+        ],
+    ),  # box vertex
+    (
+        [2.1, -3.1, 0.1],
+        [
+            [1.1, 0.9, 0.1],
+            [0.1, -0.1, 0.1],
+            [0.1, 0.9, 1.1],
+            [0.1, -0.1, 1.1],
+            [1.1, -0.1, 0.1],
+            [1.1, 0.9, 1.1],
+            [1.1, -0.1, 1.1],
+        ],
+    ),  # box vertex
+    (
+        [
+            [0.1, 0.5, 0.5],
+            [0.5, -0.1, 0.5],
+        ],
+        [
+            [1.1, 0.5, 0.5],
+            [0.5, -0.1, 0.5],
+        ],
+    ),  # multiple queries
+)
 
 
-@pytest.mark.xfail(os.name == "nt",
-                   reason="see gh-3248")
-@pytest.mark.parametrize('b', (
-                         np.array([10, 10, 10, 90, 90, 90], dtype=np.float32),
-                         np.array([10, 10, 10, 45, 60, 90], dtype=np.float32)
-                         ))
-@pytest.mark.parametrize('q, res', qres)
+@pytest.mark.xfail(os.name == "nt", reason="see gh-3248")
+@pytest.mark.parametrize(
+    "b",
+    (
+        np.array([10, 10, 10, 90, 90, 90], dtype=np.float32),
+        np.array([10, 10, 10, 45, 60, 90], dtype=np.float32),
+    ),
+)
+@pytest.mark.parametrize("q, res", qres)
 def test_augment(b, q, res):
     radius = 1.5
     q = transform_StoR(np.array(q, dtype=np.float32), b)
-    if q.shape == (3, ):
+    if q.shape == (3,):
         q = q.reshape((1, 3))
     q = apply_PBC(q, b)
     aug, mapping = augment_coordinates(q, b, radius)
@@ -94,18 +128,21 @@ def test_augment(b, q, res):
     assert_almost_equal(aug, cs, decimal=5)
 
 
-@pytest.mark.parametrize('b', (
-                         np.array([10, 10, 10, 90, 90, 90], dtype=np.float32),
-                         np.array([10, 10, 10, 45, 60, 90], dtype=np.float32)
-                         ))
-@pytest.mark.parametrize('qres', qres)
+@pytest.mark.parametrize(
+    "b",
+    (
+        np.array([10, 10, 10, 90, 90, 90], dtype=np.float32),
+        np.array([10, 10, 10, 45, 60, 90], dtype=np.float32),
+    ),
+)
+@pytest.mark.parametrize("qres", qres)
 def test_undoaugment(b, qres):
     radius = 1.5
     q = transform_StoR(np.array(qres[0], dtype=np.float32), b)
-    if q.shape == (3, ):
+    if q.shape == (3,):
         q = q.reshape((1, 3))
     q = apply_PBC(q, b)
     aug, mapping = augment_coordinates(q, b, radius)
     for idx, val in enumerate(aug):
-        imageid = np.asarray([len(q) + idx], dtype=np.intp)
+        imageid = np.asarray([len(q) + idx], dtype=np.intp)
         assert_equal(mapping[idx], undo_augment(imageid, mapping, len(q))[0])
diff --git a/testsuite/MDAnalysisTests/lib/test_cutil.py b/testsuite/MDAnalysisTests/lib/test_cutil.py
index 9f710984df..47c4d7f905 100644
--- a/testsuite/MDAnalysisTests/lib/test_cutil.py
+++ b/testsuite/MDAnalysisTests/lib/test_cutil.py
@@ -25,18 +25,23 @@
 from numpy.testing import assert_equal
 
 from MDAnalysis.lib._cutil import (
-    unique_int_1d, find_fragments, _in2d,
+    unique_int_1d,
+    find_fragments,
+    _in2d,
 )
 
 
-@pytest.mark.parametrize('values', (
-    [],  # empty array
-    [1, 1, 1, 1, ],  # all identical
-    [2, 3, 5, 7, ],  # all different, monotonic
-    [5, 2, 7, 3, ],  # all different, non-monotonic
-    [1, 2, 2, 4, 4, 6, ],  # duplicates, monotonic
-    [1, 2, 2, 6, 4, 4, ],  # duplicates, non-monotonic
-))
+@pytest.mark.parametrize(
+    "values",
+    (
+        [],  # empty array
+        [1, 1, 1, 1],  # all identical
+        [2, 3, 5, 7],  # all different, monotonic
+        [5, 2, 7, 3],  # all different, non-monotonic
+        [1, 2, 2, 4, 4, 6],  # duplicates, monotonic
+        [1, 2, 2, 6, 4, 4],  # duplicates, non-monotonic
+    ),
+)
 def test_unique_int_1d(values):
     array = np.array(values, dtype=np.intp)
     ref = np.unique(array)
@@ -46,16 +51,21 @@ def test_unique_int_1d(values):
     assert res.dtype == ref.dtype
 
 
-@pytest.mark.parametrize('edges,ref', [
-    ([[0, 1], [1, 2], [2, 3], [3, 4]],
-     [[0, 1, 2, 3, 4]]),  # linear chain
-    ([[0, 1], [1, 2], [2, 3], [3, 4], [4, 10]],
-     [[0, 1, 2, 3, 4]]),  # unused edge (4, 10)
-    ([[0, 1], [1, 2], [2, 3]],
-     [[0, 1, 2, 3], [4]]),  # lone atom
-    ([[0, 1], [1, 2], [2, 0], [3, 4], [4, 3]],
-     [[0, 1, 2], [3, 4]]),  # circular
-])
+@pytest.mark.parametrize(
+    "edges,ref",
+    [
+        ([[0, 1], [1, 2], [2, 3], [3, 4]], [[0, 1, 2, 3, 4]]),  # linear chain
+        (
+            [[0, 1], [1, 2], [2, 3], [3, 4], [4, 10]],
+            [[0, 1, 2, 3, 4]],
+        ),  # unused edge (4, 10)
+        ([[0, 1], [1, 2], [2, 3]], [[0, 1, 2, 3], [4]]),  # lone atom
+        (
+            [[0, 1], [1, 2], [2, 0], [3, 4], [4, 3]],
+            [[0, 1, 2], [3, 4]],
+        ),  # circular
+    ],
+)
 def test_find_fragments(edges, ref):
     atoms = np.arange(5)
 
@@ -75,13 +85,21 @@ def test_in2d():
     assert_equal(result, np.array([False, True, False]))
 
 
-@pytest.mark.parametrize('arr1,arr2', [
-    (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.intp),
-     np.array([[1, 2], [3, 4]], dtype=np.intp)),
-    (np.array([[1, 2], [3, 4]], dtype=np.intp),
-     np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.intp)),
-])
+@pytest.mark.parametrize(
+    "arr1,arr2",
+    [
+        (
+            np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.intp),
+            np.array([[1, 2], [3, 4]], dtype=np.intp),
+        ),
+        (
+            np.array([[1, 2], [3, 4]], dtype=np.intp),
+            np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.intp),
+        ),
+    ],
+)
 def test_in2d_VE(arr1, arr2):
-    with pytest.raises(ValueError,
-                       match=r'Both arrays must be \(n, 2\) arrays'):
+    with pytest.raises(
+        ValueError, match=r"Both arrays must be \(n, 2\) arrays"
+    ):
         _in2d(arr1, arr2)
diff --git a/testsuite/MDAnalysisTests/lib/test_distances.py b/testsuite/MDAnalysisTests/lib/test_distances.py
index 0586ba071f..8844ef9b84 100644
--- a/testsuite/MDAnalysisTests/lib/test_distances.py
+++ b/testsuite/MDAnalysisTests/lib/test_distances.py
@@ -20,19 +20,18 @@
 # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
 # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
 #
-import sys
-from unittest.mock import Mock, patch
-import pytest
-import numpy as np
-from numpy.testing import assert_equal, assert_almost_equal, assert_allclose
 import itertools
+import sys
 from itertools import combinations_with_replacement as comb
+from unittest.mock import Mock, patch
 
 import MDAnalysis
-from MDAnalysis.lib import distances
+import numpy as np
+import pytest
+from MDAnalysis.lib import distances, mdamath
 from MDAnalysis.lib.distances import HAS_DISTOPIA
-from MDAnalysis.lib import mdamath
-from MDAnalysis.tests.datafiles import PSF, DCD, TRIC
+from MDAnalysis.tests.datafiles import DCD, PSF, TRIC
+from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
 
 
 class TestCheckResultArray(object):
@@ -52,25 +51,30 @@ def test_check_result_array_wrong_shape(self):
         wrong_shape = (1,) + self.ref.shape
         with pytest.raises(ValueError) as err:
             res = distances._check_result_array(self.ref, wrong_shape)
-            assert err.msg == ("Result array has incorrect shape, should be "
-                               "{0}, got {1}.".format(self.ref.shape,
-                                                      wrong_shape))
+            assert err.msg == (
+                "Result array has incorrect shape, should be "
+                "{0}, got {1}.".format(self.ref.shape, wrong_shape)
+            )
 
     def test_check_result_array_wrong_dtype(self):
         wrong_dtype = np.int64
         ref_wrong_dtype = self.ref.astype(wrong_dtype)
         with pytest.raises(TypeError) as err:
-            res = distances._check_result_array(ref_wrong_dtype, self.ref.shape)
-            assert err.msg == ("Result array must be of type numpy.float64, "
-                               "got {}.".format(wrong_dtype))
+            res = distances._check_result_array(
+                ref_wrong_dtype, self.ref.shape
+            )
+            assert err.msg == (
+                "Result array must be of type numpy.float64, "
+                "got {}.".format(wrong_dtype)
+            )
 
 
-@pytest.mark.parametrize('coord_dtype', (np.float32, np.float64))
+@pytest.mark.parametrize("coord_dtype", (np.float32, np.float64))
 def test_transform_StoR_pass(coord_dtype):
     box = np.array([10, 7, 3, 45, 60, 90], dtype=np.float32)
     s = np.array([[0.5, -0.1, 0.5]], dtype=coord_dtype)
 
-    original_r = np.array([[ 5.75,  0.36066014, 0.75]], dtype=np.float32)
+    original_r = np.array([[5.75, 0.36066014, 0.75]], dtype=np.float32)
 
     test_r = distances.transform_StoR(s, box)
 
@@ -81,10 +85,11 @@ class TestCappedDistances(object):
 
     npoints_1 = (1, 100)
 
-    boxes_1 = (np.array([10, 20, 30, 90, 90, 90], dtype=np.float32),  # ortho
-               np.array([10, 20, 30, 30, 45, 60], dtype=np.float32),  # tri_box
-               None,  # Non Periodic
-               )
+    boxes_1 = (
+        np.array([10, 20, 30, 90, 90, 90], dtype=np.float32),  # ortho
+        np.array([10, 20, 30, 30, 45, 60], dtype=np.float32),  # tri_box
+        None,  # Non Periodic
+    )
 
     @pytest.fixture()
     def query_1(self):
@@ -110,7 +115,7 @@ def query_2_atomgroup(self, query_2):
         u.atoms.positions = q2
         return u.atoms
 
-    method_1 = ('bruteforce', 'pkdtree', 'nsgrid')
+    method_1 = ("bruteforce", "pkdtree", "nsgrid")
 
     min_cutoff_1 = (None, 0.1)
 
@@ -118,90 +123,112 @@ def test_capped_distance_noresults(self):
         point1 = np.array([0.1, 0.1, 0.1], dtype=np.float32)
         point2 = np.array([0.95, 0.1, 0.1], dtype=np.float32)
 
-        pairs, dists = distances.capped_distance(point1,
-                                                 point2, max_cutoff=0.2)
+        pairs, dists = distances.capped_distance(
+            point1, point2, max_cutoff=0.2
+        )
 
         assert_equal(len(pairs), 0)
 
-    @pytest.mark.parametrize('query', ['query_1', 'query_2',
-                             'query_1_atomgroup', 'query_2_atomgroup'])
-    @pytest.mark.parametrize('npoints', npoints_1)
-    @pytest.mark.parametrize('box', boxes_1)
-    @pytest.mark.parametrize('method', method_1)
-    @pytest.mark.parametrize('min_cutoff', min_cutoff_1)
-    def test_capped_distance_checkbrute(self, npoints, box, method,
-                                        min_cutoff, query, request):
+    @pytest.mark.parametrize(
+        "query",
+        ["query_1", "query_2", "query_1_atomgroup", "query_2_atomgroup"],
+    )
+    @pytest.mark.parametrize("npoints", npoints_1)
+    @pytest.mark.parametrize("box", boxes_1)
+    @pytest.mark.parametrize("method", method_1)
+    @pytest.mark.parametrize("min_cutoff", min_cutoff_1)
+    def test_capped_distance_checkbrute(
+        self, npoints, box, method, min_cutoff, query, request
+    ):
         q = request.getfixturevalue(query)
         np.random.seed(90003)
-        points = (np.random.uniform(low=0, high=1.0,
-                  size=(npoints, 3))*(self.boxes_1[0][:3])).astype(np.float32)
+        points = (
+            np.random.uniform(low=0, high=1.0, size=(npoints, 3))
+            * (self.boxes_1[0][:3])
+        ).astype(np.float32)
         max_cutoff = 2.5
         # capped distance should be able to handle array of vectors
         # as well as single vectors.
-        pairs, dist = distances.capped_distance(q, points, max_cutoff,
-                                                min_cutoff=min_cutoff, box=box,
-                                                method=method)
+        pairs, dist = distances.capped_distance(
+            q,
+            points,
+            max_cutoff,
+            min_cutoff=min_cutoff,
+            box=box,
+            method=method,
+        )
 
-        if pairs.shape != (0, ):
+        if pairs.shape != (0,):
             found_pairs = pairs[:, 1]
         else:
             found_pairs = list()
 
         if isinstance(q, np.ndarray):
-            if(q.shape[0] == 3):
+            if q.shape[0] == 3:
                 q = q.reshape((1, 3))
 
         dists = distances.distance_array(q, points, box=box)
 
         if min_cutoff is None:
-            min_cutoff = 0.
+            min_cutoff = 0.0
         indices = np.where((dists <= max_cutoff) & (dists > min_cutoff))
 
         assert_equal(np.sort(found_pairs, axis=0), np.sort(indices[1], axis=0))
 
     # for coverage
-    @pytest.mark.parametrize('query', ['query_1', 'query_2',
-                             'query_1_atomgroup', 'query_2_atomgroup'])
-    @pytest.mark.parametrize('npoints', npoints_1)
-    @pytest.mark.parametrize('box', boxes_1)
-    @pytest.mark.parametrize('method', method_1)
-    @pytest.mark.parametrize('min_cutoff', min_cutoff_1)
-    def test_capped_distance_return(self, npoints, box, query, request,
-                                    method, min_cutoff):
+    @pytest.mark.parametrize(
+        "query",
+        ["query_1", "query_2", "query_1_atomgroup", "query_2_atomgroup"],
+    )
+    @pytest.mark.parametrize("npoints", npoints_1)
+    @pytest.mark.parametrize("box", boxes_1)
+    @pytest.mark.parametrize("method", method_1)
+    @pytest.mark.parametrize("min_cutoff", min_cutoff_1)
+    def test_capped_distance_return(
+        self, npoints, box, query, request, method, min_cutoff
+    ):
         q = request.getfixturevalue(query)
         np.random.seed(90003)
-        points = (np.random.uniform(low=0, high=1.0,
-                  size=(npoints, 3))*(self.boxes_1[0][:3])).astype(np.float32)
+        points = (
+            np.random.uniform(low=0, high=1.0, size=(npoints, 3))
+            * (self.boxes_1[0][:3])
+        ).astype(np.float32)
         max_cutoff = 0.3
         # capped distance should be able to handle array of vectors
         # as well as single vectors.
-        pairs = distances.capped_distance(q, points, max_cutoff,
-                                          min_cutoff=min_cutoff, box=box,
-                                          method=method,
-                                          return_distances=False)
+        pairs = distances.capped_distance(
+            q,
+            points,
+            max_cutoff,
+            min_cutoff=min_cutoff,
+            box=box,
+            method=method,
+            return_distances=False,
+        )
 
-        if pairs.shape != (0, ):
+        if pairs.shape != (0,):
             found_pairs = pairs[:, 1]
         else:
             found_pairs = list()
 
         if isinstance(q, np.ndarray):
-            if(q.shape[0] == 3):
+            if q.shape[0] == 3:
                 q = q.reshape((1, 3))
 
         dists = distances.distance_array(q, points, box=box)
 
         if min_cutoff is None:
-            min_cutoff = 0.
+            min_cutoff = 0.0
         indices = np.where((dists <= max_cutoff) & (dists > min_cutoff))
 
-        assert_equal(np.sort(found_pairs, axis=0),
-                     np.sort(indices[1], axis=0))
+        assert_equal(np.sort(found_pairs, axis=0), np.sort(indices[1], axis=0))
 
     def points_or_ag_self_capped(self, npoints, atomgroup=False):
         np.random.seed(90003)
-        points = (np.random.uniform(low=0, high=1.0,
-                  size=(npoints, 3))*(self.boxes_1[0][:3])).astype(np.float32)
+        points = (
+            np.random.uniform(low=0, high=1.0, size=(npoints, 3))
+            * (self.boxes_1[0][:3])
+        ).astype(np.float32)
         if atomgroup:
             u = MDAnalysis.Universe.empty(points.shape[0], trajectory=True)
             u.atoms.positions = points
@@ -209,20 +236,25 @@ def points_or_ag_self_capped(self, npoints, atomgroup=False):
         else:
             return points
 
-    @pytest.mark.parametrize('npoints', npoints_1)
-    @pytest.mark.parametrize('box', boxes_1)
-    @pytest.mark.parametrize('method', method_1)
-    @pytest.mark.parametrize('min_cutoff', min_cutoff_1)
-    @pytest.mark.parametrize('ret_dist', (False, True))
-    @pytest.mark.parametrize('atomgroup', (False, True))
-    def test_self_capped_distance(self, npoints, box, method, min_cutoff,
-                                  ret_dist, atomgroup):
+    @pytest.mark.parametrize("npoints", npoints_1)
+    @pytest.mark.parametrize("box", boxes_1)
+    @pytest.mark.parametrize("method", method_1)
+    @pytest.mark.parametrize("min_cutoff", min_cutoff_1)
+    @pytest.mark.parametrize("ret_dist", (False, True))
+    @pytest.mark.parametrize("atomgroup", (False, True))
+    def test_self_capped_distance(
+        self, npoints, box, method, min_cutoff, ret_dist, atomgroup
+    ):
         points = self.points_or_ag_self_capped(npoints, atomgroup=atomgroup)
         max_cutoff = 0.2
-        result = distances.self_capped_distance(points, max_cutoff,
-                                                min_cutoff=min_cutoff, box=box,
-                                                method=method,
-                                                return_distances=ret_dist)
+        result = distances.self_capped_distance(
+            points,
+            max_cutoff,
+            min_cutoff=min_cutoff,
+            box=box,
+            method=method,
+            return_distances=ret_dist,
+        )
         if ret_dist:
             pairs, cdists = result
         else:
@@ -251,50 +283,70 @@ def test_self_capped_distance(self, npoints, box, method, min_cutoff,
                 if min_cutoff is not None:
                     assert d_ref > min_cutoff
 
-    @pytest.mark.parametrize('box', (None,
-                                     np.array([1, 1, 1,  90, 90, 90],
-                                              dtype=np.float32),
-                                     np.array([1, 1, 1, 60, 75, 80],
-                                              dtype=np.float32)))
-    @pytest.mark.parametrize('npoints,cutoff,meth',
-                             [(1, 0.02, '_bruteforce_capped_self'),
-                              (1, 0.2, '_bruteforce_capped_self'),
-                              (600, 0.02, '_pkdtree_capped_self'),
-                              (600, 0.2, '_nsgrid_capped_self')])
+    @pytest.mark.parametrize(
+        "box",
+        (
+            None,
+            np.array([1, 1, 1, 90, 90, 90], dtype=np.float32),
+            np.array([1, 1, 1, 60, 75, 80], dtype=np.float32),
+        ),
+    )
+    @pytest.mark.parametrize(
+        "npoints,cutoff,meth",
+        [
+            (1, 0.02, "_bruteforce_capped_self"),
+            (1, 0.2, "_bruteforce_capped_self"),
+            (600, 0.02, "_pkdtree_capped_self"),
+            (600, 0.2, "_nsgrid_capped_self"),
+        ],
+    )
     def test_method_selfselection(self, box, npoints, cutoff, meth):
         np.random.seed(90003)
-        points = (np.random.uniform(low=0, high=1.0,
-                  size=(npoints, 3))).astype(np.float32)
+        points = (
+            np.random.uniform(low=0, high=1.0, size=(npoints, 3))
+        ).astype(np.float32)
         method = distances._determine_method_self(points, cutoff, box=box)
         assert_equal(method.__name__, meth)
 
-    @pytest.mark.parametrize('box', (None,
-                                     np.array([1, 1, 1,  90, 90, 90],
-                                              dtype=np.float32),
-                                     np.array([1, 1, 1, 60, 75, 80],
-                                              dtype=np.float32)))
-    @pytest.mark.parametrize('npoints,cutoff,meth',
-                             [(1, 0.02, '_bruteforce_capped'),
-                              (1, 0.2, '_bruteforce_capped'),
-                              (200, 0.02, '_nsgrid_capped'),
-                              (200, 0.35, '_bruteforce_capped'),
-                              (10000, 0.35, '_nsgrid_capped')])
+    @pytest.mark.parametrize(
+        "box",
+        (
+            None,
+            np.array([1, 1, 1, 90, 90, 90], dtype=np.float32),
+            np.array([1, 1, 1, 60, 75, 80], dtype=np.float32),
+        ),
+    )
+    @pytest.mark.parametrize(
+        "npoints,cutoff,meth",
+        [
+            (1, 0.02, "_bruteforce_capped"),
+            (1, 0.2, "_bruteforce_capped"),
+            (200, 0.02, "_nsgrid_capped"),
+            (200, 0.35, "_bruteforce_capped"),
+            (10000, 0.35, "_nsgrid_capped"),
+        ],
+    )
     def test_method_selection(self, box, npoints, cutoff, meth):
         np.random.seed(90003)
-        points = (np.random.uniform(low=0, high=1.0,
-                  size=(npoints, 3)).astype(np.float32))
+        points = np.random.uniform(low=0, high=1.0, size=(npoints, 3)).astype(
+            np.float32
+        )
         method = distances._determine_method(points, points, cutoff, box=box)
         assert_equal(method.__name__, meth)
 
 
 @pytest.fixture()
 def ref_system():
-    box = np.array([1., 1., 2., 90., 90., 90], dtype=np.float32)
+    box = np.array([1.0, 1.0, 2.0, 90.0, 90.0, 90], dtype=np.float32)
+    # fmt: off
     points = np.array(
         [
             [0, 0, 0], [1, 1, 2], [1, 0, 2],  # identical under PBC
             [0.5, 0.5, 1.5],
-        ], dtype=np.float32)
+        ],
+        dtype=np.float32,
+    )
+    # fmt: on
     ref = points[0:1]
     conf = points[1:]
 
@@ -307,11 +359,15 @@ def ref_system_universe(ref_system):
     u = MDAnalysis.Universe.empty(points.shape[0], trajectory=True)
     u.atoms.positions = points
     u.trajectory.ts.dimensions = box
-    return (box, u.atoms, u.select_atoms("index 0"),
-            u.select_atoms("index 1 to 3"))
+    return (
+        box,
+        u.atoms,
+        u.select_atoms("index 0"),
+        u.select_atoms("index 1 to 3"),
+    )
 
 
-@pytest.mark.parametrize('backend', ['serial', 'openmp'])
+@pytest.mark.parametrize("backend", ["serial", "openmp"])
 class TestDistanceArray(object):
     @staticmethod
     def _dist(x, ref):
@@ -320,65 +376,87 @@ def _dist(x, ref):
         return np.sqrt(np.dot(r, r))
 
     # test both AtomGroup and numpy array
-    @pytest.mark.parametrize('pos', ['ref_system', 'ref_system_universe'])
+    @pytest.mark.parametrize("pos", ["ref_system", "ref_system_universe"])
     def test_noPBC(self, backend, ref_system, pos, request):
         _, points, reference, _ = ref_system  # reference values
         _, all, ref, _ = request.getfixturevalue(pos)
 
         d = distances.distance_array(ref, all, backend=backend)
-        assert_almost_equal(d, np.array([[
-            self._dist(points[0], reference[0]),
-            self._dist(points[1], reference[0]),
-            self._dist(points[2], reference[0]),
-            self._dist(points[3], reference[0])]
-        ]))
+        assert_almost_equal(
+            d,
+            np.array(
+                [
+                    [
+                        self._dist(points[0], reference[0]),
+                        self._dist(points[1], reference[0]),
+                        self._dist(points[2], reference[0]),
+                        self._dist(points[3], reference[0]),
+                    ]
+                ]
+            ),
+        )
 
     # cycle through combinations of numpy array and AtomGroup
-    @pytest.mark.parametrize('pos0', ['ref_system', 'ref_system_universe'])
-    @pytest.mark.parametrize('pos1', ['ref_system', 'ref_system_universe'])
-    def test_noPBC_mixed_combinations(self, backend, ref_system, pos0, pos1,
-                                      request):
+    @pytest.mark.parametrize("pos0", ["ref_system", "ref_system_universe"])
+    @pytest.mark.parametrize("pos1", ["ref_system", "ref_system_universe"])
+    def test_noPBC_mixed_combinations(
+        self, backend, ref_system, pos0, pos1, request
+    ):
         _, points, reference, _ = ref_system  # reference values
         _, _, ref_val, _ = request.getfixturevalue(pos0)
         _, points_val, _, _ = request.getfixturevalue(pos1)
-        d = distances.distance_array(ref_val, points_val,
-                                     backend=backend)
-        assert_almost_equal(d, np.array([[
-            self._dist(points[0], reference[0]),
-            self._dist(points[1], reference[0]),
-            self._dist(points[2], reference[0]),
-            self._dist(points[3], reference[0])]
-        ]))
+        d = distances.distance_array(ref_val, points_val, backend=backend)
+        assert_almost_equal(
+            d,
+            np.array(
+                [
+                    [
+                        self._dist(points[0], reference[0]),
+                        self._dist(points[1], reference[0]),
+                        self._dist(points[2], reference[0]),
+                        self._dist(points[3], reference[0]),
+                    ]
+                ]
+            ),
+        )
 
     # test both AtomGroup and numpy array
-    @pytest.mark.parametrize('pos', ['ref_system', 'ref_system_universe'])
+    @pytest.mark.parametrize("pos", ["ref_system", "ref_system_universe"])
     def test_PBC(self, backend, ref_system, pos, request):
         box, points, _, _ = ref_system
         _, all, ref, _ = request.getfixturevalue(pos)
 
         d = distances.distance_array(ref, all, box=box, backend=backend)
 
-        assert_almost_equal(d, np.array([[0., 0., 0., self._dist(points[3],
-                            ref=[1, 1, 2])]]))
+        assert_almost_equal(
+            d,
+            np.array([[0.0, 0.0, 0.0, self._dist(points[3], ref=[1, 1, 2])]]),
+        )
 
     # cycle through combinations of numpy array and AtomGroup
-    @pytest.mark.parametrize('pos0', ['ref_system', 'ref_system_universe'])
-    @pytest.mark.parametrize('pos1', ['ref_system', 'ref_system_universe'])
-    def test_PBC_mixed_combinations(self, backend, ref_system, pos0, pos1,
-                                    request):
+    @pytest.mark.parametrize("pos0", ["ref_system", "ref_system_universe"])
+    @pytest.mark.parametrize("pos1", ["ref_system", "ref_system_universe"])
+    def test_PBC_mixed_combinations(
+        self, backend, ref_system, pos0, pos1, request
+    ):
         box, points, _, _ = ref_system
         _, _, ref_val, _ = request.getfixturevalue(pos0)
         _, points_val, _, _ = request.getfixturevalue(pos1)
-        d = distances.distance_array(ref_val, points_val,
-                                     box=box,
-                                     backend=backend)
+        d = distances.distance_array(
+            ref_val, points_val, box=box, backend=backend
+        )
         assert_almost_equal(
-            d, np.array([[0., 0., 0., self._dist(points[3], ref=[1, 1, 2])]]))
+            d,
+            np.array([[0.0, 0.0, 0.0, self._dist(points[3], ref=[1, 1, 2])]]),
+        )
 
     def test_PBC2(self, backend):
         a = np.array([7.90146923, -13.72858524, 3.75326586], dtype=np.float32)
         b = np.array([-1.36250901, 13.45423985, -0.36317623], dtype=np.float32)
-        box = np.array([5.5457325, 5.5457325, 5.5457325, 90., 90., 90.], dtype=np.float32)
+        box = np.array(
+            [5.5457325, 5.5457325, 5.5457325, 90.0, 90.0, 90.0],
+            dtype=np.float32,
+        )
 
         def mindist(a, b, box):
             x = a - b
@@ -387,24 +465,32 @@ def mindist(a, b, box):
         ref = mindist(a, b, box[:3])
         val = distances.distance_array(a, b, box=box, backend=backend)[0, 0]
 
-        assert_almost_equal(val, ref, decimal=6,
-                            err_msg="Issue 151 not correct (PBC in distance array)")
+        assert_almost_equal(
+            val,
+            ref,
+            decimal=6,
+            err_msg="Issue 151 not correct (PBC in distance array)",
+        )
+
 
 def test_distance_array_overflow_exception():
     class FakeArray(np.ndarray):
         shape = (4294967296, 3)  # upper limit is sqrt(UINT64_MAX)
         ndim = 2
+
     dummy_array = FakeArray([1, 2, 3])
-    box = np.array([100, 100, 100, 90., 90., 90.], dtype=np.float32)
+    box = np.array([100, 100, 100, 90.0, 90.0, 90.0], dtype=np.float32)
     with pytest.raises(ValueError, match="Size of resulting array"):
         distances.distance_array.__wrapped__(dummy_array, dummy_array, box=box)
 
+
 def test_self_distance_array_overflow_exception():
     class FakeArray(np.ndarray):
         shape = (6074001001, 3)  # solution of x**2 -x = 2*UINT64_MAX
         ndim = 2
+
     dummy_array = FakeArray([1, 2, 3])
-    box = np.array([100, 100, 100, 90., 90., 90.], dtype=np.float32)
+    box = np.array([100, 100, 100, 90.0, 90.0, 90.0], dtype=np.float32)
     with pytest.raises(ValueError, match="Size of resulting array"):
         distances.self_distance_array.__wrapped__(dummy_array, box=box)
 
@@ -428,7 +514,8 @@ def Triclinic_Universe():
     universe = MDAnalysis.Universe(TRIC)
     return universe
 
-@pytest.mark.parametrize('backend', ['serial', 'openmp'])
+
+@pytest.mark.parametrize("backend", ["serial", "openmp"])
 class TestDistanceArrayDCD_TRIC(object):
     # reasonable precision so that tests succeed on 32 and 64 bit machines
     # (the reference values were obtained on 64 bit)
@@ -446,12 +533,21 @@ def test_simple(self, DCD_Universe, backend):
         trajectory[10]
         x1 = U.atoms.positions
         d = distances.distance_array(x0, x1, backend=backend)
-        assert_equal(d.shape, (3341, 3341), "wrong shape (should be"
-                     "(Natoms,Natoms))")
-        assert_almost_equal(d.min(), 0.11981228170520701, self.prec,
-                            err_msg="wrong minimum distance value")
-        assert_almost_equal(d.max(), 53.572192429459619, self.prec,
-                            err_msg="wrong maximum distance value")
+        assert_equal(
+            d.shape, (3341, 3341), "wrong shape (should be" "(Natoms,Natoms))"
+        )
+        assert_almost_equal(
+            d.min(),
+            0.11981228170520701,
+            self.prec,
+            err_msg="wrong minimum distance value",
+        )
+        assert_almost_equal(
+            d.max(),
+            53.572192429459619,
+            self.prec,
+            err_msg="wrong maximum distance value",
+        )
 
     def test_outarray(self, DCD_Universe, backend):
         U = DCD_Universe
@@ -463,12 +559,23 @@ def test_outarray(self, DCD_Universe, backend):
         natoms = len(U.atoms)
         d = np.zeros((natoms, natoms), np.float64)
         distances.distance_array(x0, x1, result=d, backend=backend)
-        assert_equal(d.shape, (natoms, natoms), "wrong shape, should be"
-                     " (Natoms,Natoms) entries")
-        assert_almost_equal(d.min(), 0.11981228170520701, self.prec,
-                            err_msg="wrong minimum distance value")
-        assert_almost_equal(d.max(), 53.572192429459619, self.prec,
-                            err_msg="wrong maximum distance value")
+        assert_equal(
+            d.shape,
+            (natoms, natoms),
+            "wrong shape, should be" " (Natoms,Natoms) entries",
+        )
+        assert_almost_equal(
+            d.min(),
+            0.11981228170520701,
+            self.prec,
+            err_msg="wrong minimum distance value",
+        )
+        assert_almost_equal(
+            d.max(),
+            53.572192429459619,
+            self.prec,
+            err_msg="wrong maximum distance value",
+        )
 
     def test_periodic(self, DCD_Universe, backend):
         # boring with the current dcd as that has no PBC
@@ -478,14 +585,26 @@ def test_periodic(self, DCD_Universe, backend):
         x0 = U.atoms.positions
         trajectory[10]
         x1 = U.atoms.positions
-        d = distances.distance_array(x0, x1, box=U.coord.dimensions,
-                                     backend=backend)
-        assert_equal(d.shape, (3341, 3341), "should be square matrix with"
-                     " Natoms entries")
-        assert_almost_equal(d.min(), 0.11981228170520701, self.prec,
-                            err_msg="wrong minimum distance value with PBC")
-        assert_almost_equal(d.max(), 53.572192429459619, self.prec,
-                            err_msg="wrong maximum distance value with PBC")
+        d = distances.distance_array(
+            x0, x1, box=U.coord.dimensions, backend=backend
+        )
+        assert_equal(
+            d.shape,
+            (3341, 3341),
+            "should be square matrix with" " Natoms entries",
+        )
+        assert_almost_equal(
+            d.min(),
+            0.11981228170520701,
+            self.prec,
+            err_msg="wrong minimum distance value with PBC",
+        )
+        assert_almost_equal(
+            d.max(),
+            53.572192429459619,
+            self.prec,
+            err_msg="wrong maximum distance value with PBC",
+        )
 
     def test_atomgroup_simple(self, DCD_Universe, DCD_Universe2, backend):
         # need two copies as moving ts updates underlying array on atomgroup
@@ -499,53 +618,77 @@ def test_atomgroup_simple(self, DCD_Universe, DCD_Universe2, backend):
         trajectory2[10]
         x1 = U2.select_atoms("all")
         d = distances.distance_array(x0, x1, backend=backend)
-        assert_equal(d.shape, (3341, 3341), "wrong shape (should be"
-                     " (Natoms,Natoms))")
-        assert_almost_equal(d.min(), 0.11981228170520701, self.prec,
-                            err_msg="wrong minimum distance value")
-        assert_almost_equal(d.max(), 53.572192429459619, self.prec,
-                            err_msg="wrong maximum distance value")
+        assert_equal(
+            d.shape, (3341, 3341), "wrong shape (should be" " (Natoms,Natoms))"
+        )
+        assert_almost_equal(
+            d.min(),
+            0.11981228170520701,
+            self.prec,
+            err_msg="wrong minimum distance value",
+        )
+        assert_almost_equal(
+            d.max(),
+            53.572192429459619,
+            self.prec,
+            err_msg="wrong maximum distance value",
+        )
 
     # check no box and ortho box types and some slices
-    @pytest.mark.parametrize('box', [None, [50, 50, 50, 90, 90, 90]])
-    @pytest.mark.parametrize("sel, np_slice", [("all", np.s_[:, :]),
-                             ("index 0 to 8 ", np.s_[0:9, :]),
-                             ("index 9", np.s_[8, :])])
-    def test_atomgroup_matches_numpy(self, DCD_Universe, backend, sel,
-                                     np_slice, box):
+    @pytest.mark.parametrize("box", [None, [50, 50, 50, 90, 90, 90]])
+    @pytest.mark.parametrize(
+        "sel, np_slice",
+        [
+            ("all", np.s_[:, :]),
+            ("index 0 to 8 ", np.s_[0:9, :]),
+            ("index 9", np.s_[8, :]),
+        ],
+    )
+    def test_atomgroup_matches_numpy(
+        self, DCD_Universe, backend, sel, np_slice, box
+    ):
         U = DCD_Universe
         x0_ag = U.select_atoms(sel)
         x0_arr = U.atoms.positions[np_slice]
         x1_ag = U.select_atoms(sel)
         x1_arr = U.atoms.positions[np_slice]
-        d_ag = distances.distance_array(x0_ag, x1_ag, box=box,
-                                        backend=backend)
-        d_arr = distances.distance_array(x0_arr, x1_arr, box=box,
-                                         backend=backend)
-        assert_allclose(d_ag, d_arr,
-                        err_msg="AtomGroup and NumPy distances do not match")
+        d_ag = distances.distance_array(x0_ag, x1_ag, box=box, backend=backend)
+        d_arr = distances.distance_array(
+            x0_arr, x1_arr, box=box, backend=backend
+        )
+        assert_allclose(
+            d_ag, d_arr, err_msg="AtomGroup and NumPy distances do not match"
+        )
 
     # check triclinic box and some slices
-    @pytest.mark.parametrize("sel, np_slice", [("all", np.s_[:, :]),
-                             ("index 0 to 8 ", np.s_[0:9, :]),
-                             ("index 9", np.s_[8, :])])
-    def test_atomgroup_matches_numpy_tric(self, Triclinic_Universe, backend,
-                                          sel, np_slice):
+    @pytest.mark.parametrize(
+        "sel, np_slice",
+        [
+            ("all", np.s_[:, :]),
+            ("index 0 to 8 ", np.s_[0:9, :]),
+            ("index 9", np.s_[8, :]),
+        ],
+    )
+    def test_atomgroup_matches_numpy_tric(
+        self, Triclinic_Universe, backend, sel, np_slice
+    ):
         U = Triclinic_Universe
         x0_ag = U.select_atoms(sel)
         x0_arr = U.atoms.positions[np_slice]
         x1_ag = U.select_atoms(sel)
         x1_arr = U.atoms.positions[np_slice]
-        d_ag = distances.distance_array(x0_ag, x1_ag, box=U.coord.dimensions,
-                                        backend=backend)
-        d_arr = distances.distance_array(x0_arr, x1_arr,
-                                         box=U.coord.dimensions,
-                                         backend=backend)
-        assert_allclose(d_ag, d_arr,
-                        err_msg="AtomGroup and NumPy distances do not match")
+        d_ag = distances.distance_array(
+            x0_ag, x1_ag, box=U.coord.dimensions, backend=backend
+        )
+        d_arr = distances.distance_array(
+            x0_arr, x1_arr, box=U.coord.dimensions, backend=backend
+        )
+        assert_allclose(
+            d_ag, d_arr, err_msg="AtomGroup and NumPy distances do not match"
+        )
 
 
-@pytest.mark.parametrize('backend', ['serial', 'openmp'])
+@pytest.mark.parametrize("backend", ["serial", "openmp"])
 class TestSelfDistanceArrayDCD_TRIC(object):
     prec = 5
 
@@ -556,11 +699,21 @@ def test_simple(self, DCD_Universe, backend):
         x0 = U.atoms.positions
         d = distances.self_distance_array(x0, backend=backend)
         N = 3341 * (3341 - 1) / 2
-        assert_equal(d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))")
-        assert_almost_equal(d.min(), 0.92905562402529318, self.prec,
-                            err_msg="wrong minimum distance value")
-        assert_almost_equal(d.max(), 52.4702570624190590, self.prec,
-                            err_msg="wrong maximum distance value")
+        assert_equal(
+            d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))"
+        )
+        assert_almost_equal(
+            d.min(),
+            0.92905562402529318,
+            self.prec,
+            err_msg="wrong minimum distance value",
+        )
+        assert_almost_equal(
+            d.max(),
+            52.4702570624190590,
+            self.prec,
+            err_msg="wrong maximum distance value",
+        )
 
     def test_outarray(self, DCD_Universe, backend):
         U = DCD_Universe
@@ -571,11 +724,21 @@ def test_outarray(self, DCD_Universe, backend):
         N = natoms * (natoms - 1) // 2
         d = np.zeros((N,), np.float64)
         distances.self_distance_array(x0, result=d, backend=backend)
-        assert_equal(d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))")
-        assert_almost_equal(d.min(), 0.92905562402529318, self.prec,
-                            err_msg="wrong minimum distance value")
-        assert_almost_equal(d.max(), 52.4702570624190590, self.prec,
-                            err_msg="wrong maximum distance value")
+        assert_equal(
+            d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))"
+        )
+        assert_almost_equal(
+            d.min(),
+            0.92905562402529318,
+            self.prec,
+            err_msg="wrong minimum distance value",
+        )
+        assert_almost_equal(
+            d.max(),
+            52.4702570624190590,
+            self.prec,
+            err_msg="wrong maximum distance value",
+        )
 
     def test_periodic(self, DCD_Universe, backend):
         # boring with the current dcd as that has no PBC
@@ -585,13 +748,24 @@ def test_periodic(self, DCD_Universe, backend):
         x0 = U.atoms.positions
         natoms = len(U.atoms)
         N = natoms * (natoms - 1) / 2
-        d = distances.self_distance_array(x0, box=U.coord.dimensions,
-                                          backend=backend)
-        assert_equal(d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))")
-        assert_almost_equal(d.min(), 0.92905562402529318, self.prec,
-                            err_msg="wrong minimum distance value with PBC")
-        assert_almost_equal(d.max(), 52.4702570624190590, self.prec,
-                            err_msg="wrong maximum distance value with PBC")
+        d = distances.self_distance_array(
+            x0, box=U.coord.dimensions, backend=backend
+        )
+        assert_equal(
+            d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))"
+        )
+        assert_almost_equal(
+            d.min(),
+            0.92905562402529318,
+            self.prec,
+            err_msg="wrong minimum distance value with PBC",
+        )
+        assert_almost_equal(
+            d.max(),
+            52.4702570624190590,
+            self.prec,
+            err_msg="wrong maximum distance value with PBC",
+        )
 
     def test_atomgroup_simple(self, DCD_Universe, backend):
         U = DCD_Universe
@@ -600,49 +774,68 @@ def test_atomgroup_simple(self, DCD_Universe, backend):
         x0 = U.select_atoms("all")
         d = distances.self_distance_array(x0, backend=backend)
         N = 3341 * (3341 - 1) / 2
-        assert_equal(d.shape, (N,), "wrong shape (should be"
-                     " (Natoms*(Natoms-1)/2,))")
-        assert_almost_equal(d.min(), 0.92905562402529318, self.prec,
-                            err_msg="wrong minimum distance value")
-        assert_almost_equal(d.max(), 52.4702570624190590, self.prec,
-                            err_msg="wrong maximum distance value")
+        assert_equal(
+            d.shape, (N,), "wrong shape (should be" " (Natoms*(Natoms-1)/2,))"
+        )
+        assert_almost_equal(
+            d.min(),
+            0.92905562402529318,
+            self.prec,
+            err_msg="wrong minimum distance value",
+        )
+        assert_almost_equal(
+            d.max(),
+            52.4702570624190590,
+            self.prec,
+            err_msg="wrong maximum distance value",
+        )
 
     # check no box and ortho box types and some slices
-    @pytest.mark.parametrize('box', [None, [50, 50, 50, 90, 90, 90]])
-    @pytest.mark.parametrize("sel, np_slice", [("all", np.s_[:, :]),
-                             ("index 0 to 8 ", np.s_[0:9, :]),
-                             ("index 9", np.s_[8, :])])
-    def test_atomgroup_matches_numpy(self, DCD_Universe, backend,
-                                     sel, np_slice, box):
+    @pytest.mark.parametrize("box", [None, [50, 50, 50, 90, 90, 90]])
+    @pytest.mark.parametrize(
+        "sel, np_slice",
+        [
+            ("all", np.s_[:, :]),
+            ("index 0 to 8 ", np.s_[0:9, :]),
+            ("index 9", np.s_[8, :]),
+        ],
+    )
+    def test_atomgroup_matches_numpy(
+        self, DCD_Universe, backend, sel, np_slice, box
+    ):
         U = DCD_Universe
 
         x0_ag = U.select_atoms(sel)
         x0_arr = U.atoms.positions[np_slice]
-        d_ag = distances.self_distance_array(x0_ag, box=box,
-                                             backend=backend)
-        d_arr = distances.self_distance_array(x0_arr, box=box,
-                                              backend=backend)
-        assert_allclose(d_ag, d_arr,
-                        err_msg="AtomGroup and NumPy distances do not match")
+        d_ag = distances.self_distance_array(x0_ag, box=box, backend=backend)
+        d_arr = distances.self_distance_array(x0_arr, box=box, backend=backend)
+        assert_allclose(
+            d_ag, d_arr, err_msg="AtomGroup and NumPy distances do not match"
+        )
 
     # check triclinic box and some slices
-    @pytest.mark.parametrize("sel, np_slice", [
-                            ("index 0 to 8 ", np.s_[0:9, :]),
-                            ("index 9", np.s_[8, :])])
-    def test_atomgroup_matches_numpy_tric(self, Triclinic_Universe, backend,
-                                          sel, np_slice):
+    @pytest.mark.parametrize(
+        "sel, np_slice",
+        [("index 0 to 8 ", np.s_[0:9, :]), ("index 9", np.s_[8, :])],
+    )
+    def test_atomgroup_matches_numpy_tric(
+        self, Triclinic_Universe, backend, sel, np_slice
+    ):
         U = Triclinic_Universe
         x0_ag = U.select_atoms(sel)
         x0_arr = U.atoms.positions[np_slice]
-        d_ag = distances.self_distance_array(x0_ag, box=U.coord.dimensions,
-                                             backend=backend)
-        d_arr = distances.self_distance_array(x0_arr, box=U.coord.dimensions,
-                                              backend=backend)
-        assert_allclose(d_ag, d_arr,
-                        err_msg="AtomGroup and NumPy distances do not match")
+        d_ag = distances.self_distance_array(
+            x0_ag, box=U.coord.dimensions, backend=backend
+        )
+        d_arr = distances.self_distance_array(
+            x0_arr, box=U.coord.dimensions, backend=backend
+        )
+        assert_allclose(
+            d_ag, d_arr, err_msg="AtomGroup and NumPy distances do not match"
+        )
 
 
-@pytest.mark.parametrize('backend', ['serial', 'openmp'])
+@pytest.mark.parametrize("backend", ["serial", "openmp"])
 class TestTriclinicDistances(object):
     """Unit tests for the Triclinic PBC functions.
     Tests:
@@ -686,7 +879,7 @@ def S_mol_single(TRIC):
         S_mol2 = TRIC.atoms[390].position
         return S_mol1, S_mol2
 
-    @pytest.mark.parametrize('S_mol', [S_mol, S_mol_single], indirect=True)
+    @pytest.mark.parametrize("S_mol", [S_mol, S_mol_single], indirect=True)
     def test_transforms(self, S_mol, tri_vec_box, box, backend):
         # To check the cython coordinate transform, the same operation is done in numpy
         # Is a matrix multiplication of Coords x tri_vec_box = NewCoords, so can use np.dot
@@ -697,26 +890,48 @@ def test_transforms(self, S_mol, tri_vec_box, box, backend):
         R_mol2 = distances.transform_StoR(S_mol2, box, backend=backend)
         R_np2 = np.dot(S_mol2, tri_vec_box)
 
-        assert_almost_equal(R_mol1, R_np1, self.prec, err_msg="StoR transform failed for S_mol1")
-        assert_almost_equal(R_mol2, R_np2, self.prec, err_msg="StoR transform failed for S_mol2")
+        assert_almost_equal(
+            R_mol1,
+            R_np1,
+            self.prec,
+            err_msg="StoR transform failed for S_mol1",
+        )
+        assert_almost_equal(
+            R_mol2,
+            R_np2,
+            self.prec,
+            err_msg="StoR transform failed for S_mol2",
+        )
 
         # Round trip test
         S_test1 = distances.transform_RtoS(R_mol1, box, backend=backend)
         S_test2 = distances.transform_RtoS(R_mol2, box, backend=backend)
 
-        assert_almost_equal(S_test1, S_mol1, self.prec, err_msg="Round trip 1 failed in transform")
-        assert_almost_equal(S_test2, S_mol2, self.prec, err_msg="Round trip 2 failed in transform")
+        assert_almost_equal(
+            S_test1,
+            S_mol1,
+            self.prec,
+            err_msg="Round trip 1 failed in transform",
+        )
+        assert_almost_equal(
+            S_test2,
+            S_mol2,
+            self.prec,
+            err_msg="Round trip 2 failed in transform",
+        )
 
     def test_selfdist(self, S_mol, box, tri_vec_box, backend):
         S_mol1, S_mol2 = S_mol
         R_coords = distances.transform_StoR(S_mol1, box, backend=backend)
         # Transform functions are tested elsewhere so taken as working here
-        dists = distances.self_distance_array(R_coords, box=box, backend=backend)
+        dists = distances.self_distance_array(
+            R_coords, box=box, backend=backend
+        )
         # Manually calculate self_distance_array
         manual = np.zeros(len(dists), dtype=np.float64)
         distpos = 0
         for i, Ri in enumerate(R_coords):
-            for Rj in R_coords[i + 1:]:
+            for Rj in R_coords[i + 1 :]:
                 Rij = Rj - Ri
                 Rij -= round(Rij[2] / tri_vec_box[2][2]) * tri_vec_box[2]
                 Rij -= round(Rij[1] / tri_vec_box[1][1]) * tri_vec_box[1]
@@ -725,18 +940,24 @@ def test_selfdist(self, S_mol, box, tri_vec_box, backend):
                 manual[distpos] = Rij  # and done, phew
                 distpos += 1
 
-        assert_almost_equal(dists, manual, self.prec,
-                            err_msg="self_distance_array failed with input 1")
+        assert_almost_equal(
+            dists,
+            manual,
+            self.prec,
+            err_msg="self_distance_array failed with input 1",
+        )
 
         # Do it again for input 2 (has wider separation in points)
         R_coords = distances.transform_StoR(S_mol2, box, backend=backend)
         # Transform functions are tested elsewhere so taken as working here
-        dists = distances.self_distance_array(R_coords, box=box, backend=backend)
+        dists = distances.self_distance_array(
+            R_coords, box=box, backend=backend
+        )
         # Manually calculate self_distance_array
         manual = np.zeros(len(dists), dtype=np.float64)
         distpos = 0
         for i, Ri in enumerate(R_coords):
-            for Rj in R_coords[i + 1:]:
+            for Rj in R_coords[i + 1 :]:
                 Rij = Rj - Ri
                 Rij -= round(Rij[2] / tri_vec_box[2][2]) * tri_vec_box[2]
                 Rij -= round(Rij[1] / tri_vec_box[1][1]) * tri_vec_box[1]
@@ -745,8 +966,12 @@ def test_selfdist(self, S_mol, box, tri_vec_box, backend):
                 manual[distpos] = Rij  # and done, phew
                 distpos += 1
 
-        assert_almost_equal(dists, manual, self.prec,
-                            err_msg="self_distance_array failed with input 2")
+        assert_almost_equal(
+            dists,
+            manual,
+            self.prec,
+            err_msg="self_distance_array failed with input 2",
+        )
 
     def test_distarray(self, S_mol, tri_vec_box, box, backend):
         S_mol1, S_mol2 = S_mol
@@ -755,7 +980,9 @@ def test_distarray(self, S_mol, tri_vec_box, box, backend):
         R_mol2 = distances.transform_StoR(S_mol2, box, backend=backend)
 
         # Try with box
-        dists = distances.distance_array(R_mol1, R_mol2, box=box, backend=backend)
+        dists = distances.distance_array(
+            R_mol1, R_mol2, box=box, backend=backend
+        )
         # Manually calculate distance_array
         manual = np.zeros((len(R_mol1), len(R_mol2)))
         for i, Ri in enumerate(R_mol1):
@@ -767,36 +994,46 @@ def test_distarray(self, S_mol, tri_vec_box, box, backend):
                 Rij = np.linalg.norm(Rij)  # find norm of Rij vector
                 manual[i][j] = Rij
 
-        assert_almost_equal(dists, manual, self.prec,
-                            err_msg="distance_array failed with box")
+        assert_almost_equal(
+            dists, manual, self.prec, err_msg="distance_array failed with box"
+        )
 
     def test_pbc_dist(self, S_mol, box, backend):
         S_mol1, S_mol2 = S_mol
         results = np.array([[37.629944]])
-        dists = distances.distance_array(S_mol1, S_mol2, box=box, backend=backend)
+        dists = distances.distance_array(
+            S_mol1, S_mol2, box=box, backend=backend
+        )
 
-        assert_almost_equal(dists, results, self.prec,
-                            err_msg="distance_array failed to retrieve PBC distance")
+        assert_almost_equal(
+            dists,
+            results,
+            self.prec,
+            err_msg="distance_array failed to retrieve PBC distance",
+        )
 
     def test_pbc_wrong_wassenaar_distance(self, backend):
         box = [2, 2, 2, 60, 60, 60]
         tri_vec_box = mdamath.triclinic_vectors(box)
         a, b, c = tri_vec_box
         point_a = a + b
-        point_b = .5 * point_a
-        dist = distances.distance_array(point_a, point_b, box=box, backend=backend)
+        point_b = 0.5 * point_a
+        dist = distances.distance_array(
+            point_a, point_b, box=box, backend=backend
+        )
         assert_almost_equal(dist[0, 0], 1)
         # check that our distance is different from the wassenaar distance as
         # expected.
         assert np.linalg.norm(point_a - point_b) != dist[0, 0]
 
 
-@pytest.mark.parametrize("box",
+@pytest.mark.parametrize(
+    "box",
     [
         None,
-        np.array([10., 15., 20., 90., 90., 90.]), # otrho
-        np.array([10., 15., 20., 70.53571, 109.48542, 70.518196]), # TRIC
-    ]
+        np.array([10.0, 15.0, 20.0, 90.0, 90.0, 90.0]),  # otrho
+        np.array([10.0, 15.0, 20.0, 70.53571, 109.48542, 70.518196]),  # TRIC
+    ],
 )
 def test_issue_3725(box):
     """
@@ -806,10 +1043,10 @@ def test_issue_3725(box):
     random_coords = np.random.uniform(-50, 50, (1000, 3))
 
     self_da_serial = distances.self_distance_array(
-        random_coords, box=box, backend='serial'
+        random_coords, box=box, backend="serial"
     )
     self_da_openmp = distances.self_distance_array(
-        random_coords, box=box, backend='openmp'
+        random_coords, box=box, backend="openmp"
     )
 
     np.testing.assert_allclose(self_da_serial, self_da_openmp)
@@ -823,10 +1060,12 @@ def conv_dtype_if_ndarr(a, dtype):
 
 
 def convert_position_dtype_if_ndarray(a, b, c, d, dtype):
-    return (conv_dtype_if_ndarr(a, dtype),
-            conv_dtype_if_ndarr(b, dtype),
-            conv_dtype_if_ndarr(c, dtype),
-            conv_dtype_if_ndarr(d, dtype))
+    return (
+        conv_dtype_if_ndarr(a, dtype),
+        conv_dtype_if_ndarr(b, dtype),
+        conv_dtype_if_ndarr(c, dtype),
+        conv_dtype_if_ndarr(d, dtype),
+    )
 
 
 def distopia_conditional_backend():
@@ -848,29 +1087,33 @@ def test_HAS_DISTOPIA_incompatible_distopia():
     # 0.3.0 functions (from
     # https://github.com/MDAnalysis/distopia/blob/main/distopia/__init__.py
     # __all__):
-    mock_distopia_030 = Mock(spec=[
-        'calc_bonds_ortho',
-        'calc_bonds_no_box',
-        'calc_bonds_triclinic',
-        'calc_angles_no_box',
-        'calc_angles_ortho',
-        'calc_angles_triclinic',
-        'calc_dihedrals_no_box',
-        'calc_dihedrals_ortho',
-        'calc_dihedrals_triclinic',
-        'calc_distance_array_no_box',
-        'calc_distance_array_ortho',
-        'calc_distance_array_triclinic',
-        'calc_self_distance_array_no_box',
-        'calc_self_distance_array_ortho',
-        'calc_self_distance_array_triclinic',
-    ])
+    mock_distopia_030 = Mock(
+        spec=[
+            "calc_bonds_ortho",
+            "calc_bonds_no_box",
+            "calc_bonds_triclinic",
+            "calc_angles_no_box",
+            "calc_angles_ortho",
+            "calc_angles_triclinic",
+            "calc_dihedrals_no_box",
+            "calc_dihedrals_ortho",
+            "calc_dihedrals_triclinic",
+            "calc_distance_array_no_box",
+            "calc_distance_array_ortho",
+            "calc_distance_array_triclinic",
+            "calc_self_distance_array_no_box",
+            "calc_self_distance_array_ortho",
+            "calc_self_distance_array_triclinic",
+        ]
+    )
     with patch.dict("sys.modules", {"distopia": mock_distopia_030}):
-        with pytest.warns(RuntimeWarning,
-                          match="Install 'distopia>=0.2.0,<0.3.0' to"):
+        with pytest.warns(
+            RuntimeWarning, match="Install 'distopia>=0.2.0,<0.3.0' to"
+        ):
             import MDAnalysis.lib._distopia
         assert not MDAnalysis.lib._distopia.HAS_DISTOPIA
 
+
 class TestCythonFunctions(object):
     # Unit tests for calc_bonds calc_angles and calc_dihedrals in lib.distances
     # Tests both numerical results as well as input types as Cython will silently
@@ -880,23 +1123,61 @@ class TestCythonFunctions(object):
     @staticmethod
     @pytest.fixture()
     def box():
-        return np.array([10., 10., 10., 90., 90., 90.], dtype=np.float32)
+        return np.array([10.0, 10.0, 10.0, 90.0, 90.0, 90.0], dtype=np.float32)
 
     @staticmethod
     @pytest.fixture()
     def triclinic_box():
-        box_vecs = np.array([[10., 0., 0.], [1., 10., 0., ], [1., 0., 10.]],
-                            dtype=np.float32)
+        box_vecs = np.array(
+            [
+                [10.0, 0.0, 0.0],
+                [1.0, 10.0, 0.0],
+                [1.0, 0.0, 10.0],
+            ],
+            dtype=np.float32,
+        )
         return mdamath.triclinic_box(box_vecs[0], box_vecs[1], box_vecs[2])
 
     @staticmethod
     @pytest.fixture()
     def positions():
         # dummy atom data
-        a = np.array([[0., 0., 0.], [0., 0., 0.], [0., 11., 0.], [1., 1., 1.]], dtype=np.float32)
-        b = np.array([[0., 0., 0.], [1., 1., 1.], [0., 0., 0.], [29., -21., 99.]], dtype=np.float32)
-        c = np.array([[0., 0., 0.], [2., 2., 2.], [11., 0., 0.], [1., 9., 9.]], dtype=np.float32)
-        d = np.array([[0., 0., 0.], [3., 3., 3.], [11., -11., 0.], [65., -65., 65.]], dtype=np.float32)
+        a = np.array(
+            [
+                [0.0, 0.0, 0.0],
+                [0.0, 0.0, 0.0],
+                [0.0, 11.0, 0.0],
+                [1.0, 1.0, 1.0],
+            ],
+            dtype=np.float32,
+        )
+        b = np.array(
+            [
+                [0.0, 0.0, 0.0],
+                [1.0, 1.0, 1.0],
+                [0.0, 0.0, 0.0],
+                [29.0, -21.0, 99.0],
+            ],
+            dtype=np.float32,
+        )
+        c = np.array(
+            [
+                [0.0, 0.0, 0.0],
+                [2.0, 2.0, 2.0],
+                [11.0, 0.0, 0.0],
+                [1.0, 9.0, 9.0],
+            ],
+            dtype=np.float32,
+        )
+        d = np.array(
+            [
+                [0.0, 0.0, 0.0],
+                [3.0, 3.0, 3.0],
+                [11.0, -11.0, 0.0],
+                [65.0, -65.0, 65.0],
+            ],
+            dtype=np.float32,
+        )
         return a, b, c, d
 
     @staticmethod
@@ -904,8 +1185,10 @@ def positions():
     def positions_atomgroups(positions):
         a, b, c, d = positions
         arrs = [a, b, c, d]
-        universes = [MDAnalysis.Universe.empty(arr.shape[0],
-                     trajectory=True) for arr in arrs]
+        universes = [
+            MDAnalysis.Universe.empty(arr.shape[0], trajectory=True)
+            for arr in arrs
+        ]
         for u, a in zip(universes, arrs):
             u.atoms.positions = a
         return tuple([u.atoms for u in universes])
@@ -914,14 +1197,15 @@ def positions_atomgroups(positions):
     @pytest.fixture()
     def wronglength():
         # has a different length to other inputs and should raise ValueError
-        return np.array([[0., 0., 0.], [3., 3., 3.]],
-                        dtype=np.float32)
+        return np.array([[0.0, 0.0, 0.0], [3.0, 3.0, 3.0]], dtype=np.float32)
 
     # coordinate shifts for single coord tests
-    shifts = [((0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)),  # no shifting
-              ((1, 0, 0), (0, 1, 1), (0, 0, 1), (1, 1, 0)),  # single box lengths
-              ((-1, 0, 1), (0, -1, 0), (1, 0, 1), (-1, -1, -1)),  # negative single
-              ((4, 3, -2), (-2, 2, 2), (-5, 2, 2), (0, 2, 2))]  # multiple boxlengths
+    shifts = [
+        ((0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)),  # no shifting
+        ((1, 0, 0), (0, 1, 1), (0, 0, 1), (1, 1, 0)),  # single box lengths
+        ((-1, 0, 1), (0, -1, 0), (1, 0, 1), (-1, -1, -1)),  # negative single
+        ((4, 3, -2), (-2, 2, 2), (-5, 2, 2), (0, 2, 2)),
+    ]  # multiple boxlengths
 
     @pytest.mark.parametrize("dtype", (np.float32, np.float64))
     @pytest.mark.parametrize("pos", ["positions", "positions_atomgroups"])
@@ -930,27 +1214,51 @@ def test_bonds(self, box, backend, dtype, pos, request):
         a, b, c, d = request.getfixturevalue(pos)
         a, b, c, d = convert_position_dtype_if_ndarray(a, b, c, d, dtype)
         dists = distances.calc_bonds(a, b, backend=backend)
-        assert_equal(len(dists), 4, err_msg="calc_bonds results have wrong length")
+        assert_equal(
+            len(dists), 4, err_msg="calc_bonds results have wrong length"
+        )
         dists_pbc = distances.calc_bonds(a, b, box=box, backend=backend)
-        #tests 0 length
-        assert_almost_equal(dists[0], 0.0, self.prec, err_msg="Zero length calc_bonds fail")
-        assert_almost_equal(dists[1], 1.7320508075688772, self.prec,
-                            err_msg="Standard length calc_bonds fail")  # arbitrary length check
+        # tests 0 length
+        assert_almost_equal(
+            dists[0], 0.0, self.prec, err_msg="Zero length calc_bonds fail"
+        )
+        assert_almost_equal(
+            dists[1],
+            1.7320508075688772,
+            self.prec,
+            err_msg="Standard length calc_bonds fail",
+        )  # arbitrary length check
         # PBC checks, 2 without, 2 with
-        assert_almost_equal(dists[2], 11.0, self.prec,
-                            err_msg="PBC check #1 w/o box")  # pbc check 1, subtract single box length
-        assert_almost_equal(dists_pbc[2], 1.0, self.prec,
-                            err_msg="PBC check #1 with box")
-        assert_almost_equal(dists[3], 104.26888318, self.prec,  # pbc check 2, subtract multiple box
-                            err_msg="PBC check #2 w/o box")  # lengths in all directions
-        assert_almost_equal(dists_pbc[3], 3.46410072, self.prec,
-                            err_msg="PBC check #w with box")
+        assert_almost_equal(
+            dists[2], 11.0, self.prec, err_msg="PBC check #1 w/o box"
+        )  # pbc check 1, subtract single box length
+        assert_almost_equal(
+            dists_pbc[2], 1.0, self.prec, err_msg="PBC check #1 with box"
+        )
+        assert_almost_equal(
+            dists[3],
+            104.26888318,
+            self.prec,  # pbc check 2, subtract multiple box
+            err_msg="PBC check #2 w/o box",
+        )  # lengths in all directions
+        assert_almost_equal(
+            dists_pbc[3],
+            3.46410072,
+            self.prec,
+            err_msg="PBC check #w with box",
+        )
 
     @pytest.mark.parametrize("backend", distopia_conditional_backend())
     def test_bonds_badbox(self, positions, backend):
         a, b, c, d = positions
-        badbox1 = np.array([10., 10., 10.], dtype=np.float64)
-        badbox2 = np.array([[10., 10.], [10., 10., ]], dtype=np.float32)
+        badbox1 = np.array([10.0, 10.0, 10.0], dtype=np.float64)
+        badbox2 = np.array(
+            [
+                [10.0, 10.0],
+                [10.0, 10.0],
+            ],
+            dtype=np.float32,
+        )
 
         with pytest.raises(ValueError):
             distances.calc_bonds(a, b, box=badbox1, backend=backend)
@@ -968,18 +1276,25 @@ def test_bonds_badresult(self, positions, backend):
     @pytest.mark.parametrize("dtype", (np.float32, np.float64))
     @pytest.mark.parametrize("pos", ["positions", "positions_atomgroups"])
     @pytest.mark.parametrize("backend", distopia_conditional_backend())
-    def test_bonds_triclinic(self, triclinic_box, backend, dtype, pos, request):
+    def test_bonds_triclinic(
+        self, triclinic_box, backend, dtype, pos, request
+    ):
         a, b, c, d = request.getfixturevalue(pos)
         a, b, c, d = convert_position_dtype_if_ndarray(a, b, c, d, dtype)
         dists = distances.calc_bonds(a, b, box=triclinic_box, backend=backend)
         reference = np.array([0.0, 1.7320508, 1.4142136, 2.82842712])
-        assert_almost_equal(dists, reference, self.prec, err_msg="calc_bonds with triclinic box failed")
+        assert_almost_equal(
+            dists,
+            reference,
+            self.prec,
+            err_msg="calc_bonds with triclinic box failed",
+        )
 
     @pytest.mark.parametrize("shift", shifts)
     @pytest.mark.parametrize("periodic", [True, False])
     @pytest.mark.parametrize("backend", distopia_conditional_backend())
     def test_bonds_single_coords(self, shift, periodic, backend):
-        box = np.array([10, 20, 30, 90., 90., 90.], dtype=np.float32)
+        box = np.array([10, 20, 30, 90.0, 90.0, 90.0], dtype=np.float32)
 
         coords = np.array([[1, 1, 1], [3, 1, 1]], dtype=np.float32)
 
@@ -989,7 +1304,9 @@ def test_bonds_single_coords(self, shift, periodic, backend):
         coords[1] += shift2 * box[:3]
 
         box = box if periodic else None
-        result = distances.calc_bonds(coords[0], coords[1], box, backend=backend)
+        result = distances.calc_bonds(
+            coords[0], coords[1], box, backend=backend
+        )
 
         reference = 2.0 if periodic else np.linalg.norm(coords[0] - coords[1])
 
@@ -1003,15 +1320,29 @@ def test_angles(self, backend, dtype, pos, request):
         a, b, c, d = convert_position_dtype_if_ndarray(a, b, c, d, dtype)
         angles = distances.calc_angles(a, b, c, backend=backend)
         # Check calculated values
-        assert_equal(len(angles), 4, err_msg="calc_angles results have wrong length")
+        assert_equal(
+            len(angles), 4, err_msg="calc_angles results have wrong length"
+        )
         #        assert_almost_equal(angles[0], 0.0, self.prec,
         #                           err_msg="Zero length angle calculation failed") # What should this be?
-        assert_almost_equal(angles[1], np.pi, self.prec,
-                            err_msg="180 degree angle calculation failed")
-        assert_almost_equal(np.rad2deg(angles[2]), 90., self.prec,
-                            err_msg="Ninety degree angle in calc_angles failed")
-        assert_almost_equal(angles[3], 0.098174833, self.prec,
-                            err_msg="Small angle failed in calc_angles")
+        assert_almost_equal(
+            angles[1],
+            np.pi,
+            self.prec,
+            err_msg="180 degree angle calculation failed",
+        )
+        assert_almost_equal(
+            np.rad2deg(angles[2]),
+            90.0,
+            self.prec,
+            err_msg="Ninety degree angle in calc_angles failed",
+        )
+        assert_almost_equal(
+            angles[3],
+            0.098174833,
+            self.prec,
+            err_msg="Small angle failed in calc_angles",
+        )
 
     @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_angles_bad_result(self, positions, backend):
@@ -1044,7 +1375,7 @@ def test_angles_single_coords(self, case, shift, periodic, backend):
         def manual_angle(x, y, z):
             return mdamath.angle(y - x, y - z)
 
-        box = np.array([10, 20, 30, 90., 90., 90.], dtype=np.float32)
+        box = np.array([10, 20, 30, 90.0, 90.0, 90.0], dtype=np.float32)
         (a, b, c), ref = case
 
         shift1, shift2, shift3, _ = shift
@@ -1066,12 +1397,25 @@ def test_dihedrals(self, backend, dtype, pos, request):
         a, b, c, d = convert_position_dtype_if_ndarray(a, b, c, d, dtype)
         dihedrals = distances.calc_dihedrals(a, b, c, d, backend=backend)
         # Check calculated values
-        assert_equal(len(dihedrals), 4, err_msg="calc_dihedrals results have wrong length")
+        assert_equal(
+            len(dihedrals),
+            4,
+            err_msg="calc_dihedrals results have wrong length",
+        )
         assert np.isnan(dihedrals[0]), "Zero length dihedral failed"
         assert np.isnan(dihedrals[1]), "Straight line dihedral failed"
-        assert_almost_equal(dihedrals[2], np.pi, self.prec, err_msg="180 degree dihedral failed")
-        assert_almost_equal(dihedrals[3], -0.50714064, self.prec,
-                            err_msg="arbitrary dihedral angle failed")
+        assert_almost_equal(
+            dihedrals[2],
+            np.pi,
+            self.prec,
+            err_msg="180 degree dihedral failed",
+        )
+        assert_almost_equal(
+            dihedrals[3],
+            -0.50714064,
+            self.prec,
+            err_msg="arbitrary dihedral angle failed",
+        )
 
     @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_dihedrals_wronglength(self, positions, wronglength, backend):
@@ -1094,44 +1438,52 @@ def test_dihedrals_bad_result(self, positions, backend):
         badresult = np.zeros(len(a) - 1)  # Bad result array
 
         with pytest.raises(ValueError):
-            distances.calc_dihedrals(a, b, c, d, result=badresult, backend=backend)
+            distances.calc_dihedrals(
+                a, b, c, d, result=badresult, backend=backend
+            )
 
     @pytest.mark.parametrize(
         "case",
         [
             (
                 np.array(
-                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 2, 1]], dtype=np.float32
+                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 2, 1]],
+                    dtype=np.float32,
                 ),
                 0.0,
             ),  # 0 degree angle (cis)
             (
                 np.array(
-                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 0, 1]], dtype=np.float32
+                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 0, 1]],
+                    dtype=np.float32,
                 ),
                 np.pi,
             ),  # 180 degree (trans)
             (
                 np.array(
-                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 1, 2]], dtype=np.float32
+                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 1, 2]],
+                    dtype=np.float32,
                 ),
                 0.5 * np.pi,
             ),  # 90 degree
             (
                 np.array(
-                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 1, 0]], dtype=np.float32
+                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 1, 0]],
+                    dtype=np.float32,
                 ),
                 0.5 * np.pi,
             ),  # other 90 degree
             (
                 np.array(
-                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 2, 2]], dtype=np.float32
+                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 2, 2]],
+                    dtype=np.float32,
                 ),
                 0.25 * np.pi,
             ),  # 45 degree
             (
                 np.array(
-                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 0, 2]], dtype=np.float32
+                    [[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 0, 2]],
+                    dtype=np.float32,
                 ),
                 0.75 * np.pi,
             ),  # 135
@@ -1144,7 +1496,7 @@ def test_dihedrals_single_coords(self, case, shift, periodic, backend):
         def manual_dihedral(a, b, c, d):
             return mdamath.dihedral(b - a, c - b, d - c)
 
-        box = np.array([10., 10., 10., 90., 90., 90.], dtype=np.float32)
+        box = np.array([10.0, 10.0, 10.0, 90.0, 90.0, 90.0], dtype=np.float32)
 
         (a, b, c, d), ref = case
 
@@ -1181,7 +1533,9 @@ def test_numpy_compliance_angles(self, positions, backend):
         angles = distances.calc_angles(a, b, c, backend=backend)
         vec1 = a - b
         vec2 = c - b
-        angles_numpy = np.array([mdamath.angle(x, y) for x, y in zip(vec1, vec2)])
+        angles_numpy = np.array(
+            [mdamath.angle(x, y) for x, y in zip(vec1, vec2)]
+        )
         # numpy 0 angle returns NaN rather than 0
         assert_almost_equal(
             angles[1:],
@@ -1198,12 +1552,18 @@ def test_numpy_compliance_dihedrals(self, positions, backend):
         ab = a - b
         bc = b - c
         cd = c - d
-        dihedrals_numpy = np.array([mdamath.dihedral(x, y, z) for x, y, z in zip(ab, bc, cd)])
-        assert_almost_equal(dihedrals, dihedrals_numpy, self.prec,
-                            err_msg="Cython dihedrals didn't match numpy calculations")
+        dihedrals_numpy = np.array(
+            [mdamath.dihedral(x, y, z) for x, y, z in zip(ab, bc, cd)]
+        )
+        assert_almost_equal(
+            dihedrals,
+            dihedrals_numpy,
+            self.prec,
+            err_msg="Cython dihedrals didn't match numpy calculations",
+        )
 
 
-@pytest.mark.parametrize('backend', ['serial', 'openmp'])
+@pytest.mark.parametrize("backend", ["serial", "openmp"])
 class Test_apply_PBC(object):
     prec = 6
 
@@ -1237,23 +1597,30 @@ def Triclinic_universe_ag_box(self, Triclinic_Universe):
         box = U.dimensions
         return atoms, box
 
-    @pytest.mark.parametrize('pos', ['DCD_universe_pos', 'DCD_universe_ag'])
+    @pytest.mark.parametrize("pos", ["DCD_universe_pos", "DCD_universe_ag"])
     def test_ortho_PBC(self, backend, pos, request, DCD_universe_pos):
         positions = request.getfixturevalue(pos)
-        box = np.array([2.5, 2.5, 3.5, 90., 90., 90.], dtype=np.float32)
+        box = np.array([2.5, 2.5, 3.5, 90.0, 90.0, 90.0], dtype=np.float32)
         with pytest.raises(ValueError):
             cyth1 = distances.apply_PBC(positions, box[:3], backend=backend)
         cyth2 = distances.apply_PBC(positions, box, backend=backend)
-        reference = (DCD_universe_pos -
-                     np.floor(DCD_universe_pos / box[:3]) * box[:3])
+        reference = (
+            DCD_universe_pos - np.floor(DCD_universe_pos / box[:3]) * box[:3]
+        )
 
-        assert_almost_equal(cyth2, reference, self.prec,
-                            err_msg="Ortho apply_PBC #2 failed comparison with np")
+        assert_almost_equal(
+            cyth2,
+            reference,
+            self.prec,
+            err_msg="Ortho apply_PBC #2 failed comparison with np",
+        )
 
-    @pytest.mark.parametrize('pos', ['Triclinic_universe_pos_box',
-                             'Triclinic_universe_ag_box'])
+    @pytest.mark.parametrize(
+        "pos", ["Triclinic_universe_pos_box", "Triclinic_universe_ag_box"]
+    )
     def test_tric_PBC(self, backend, pos, request):
         positions, box = request.getfixturevalue(pos)
+
         def numpy_PBC(coords, box):
             # need this to allow both AtomGroup and array
             if isinstance(coords, MDAnalysis.core.AtomGroup):
@@ -1271,8 +1638,12 @@ def numpy_PBC(coords, box):
 
         reference = numpy_PBC(positions, box)
 
-        assert_almost_equal(cyth1, reference, decimal=4,
-                            err_msg="Triclinic apply_PBC failed comparison with np")
+        assert_almost_equal(
+            cyth1,
+            reference,
+            decimal=4,
+            err_msg="Triclinic apply_PBC failed comparison with np",
+        )
 
         box = np.array([10, 7, 3, 45, 60, 90], dtype=np.float32)
         r = np.array([5.75, 0.36066014, 0.75], dtype=np.float32)
@@ -1283,14 +1654,19 @@ def numpy_PBC(coords, box):
     def test_coords_strictly_in_central_image_ortho(self, backend):
         box = np.array([10.1, 10.1, 10.1, 90.0, 90.0, 90.0], dtype=np.float32)
         # coordinates just below lower or exactly at the upper box boundaries:
-        coords = np.array([[-1.0e-7, -1.0e-7, -1.0e-7],
-                           [-1.0e-7, -1.0e-7,  box[2]],
-                           [-1.0e-7,  box[1], -1.0e-7],
-                           [ box[0], -1.0e-7, -1.0e-7],
-                           [ box[0],  box[1], -1.0e-7],
-                           [ box[0], -1.0e-7,  box[2]],
-                           [-1.0e-7,  box[1],  box[2]],
-                           [ box[0],  box[1],  box[2]]], dtype=np.float32)
+        coords = np.array(
+            [
+                [-1.0e-7, -1.0e-7, -1.0e-7],
+                [-1.0e-7, -1.0e-7, box[2]],
+                [-1.0e-7, box[1], -1.0e-7],
+                [box[0], -1.0e-7, -1.0e-7],
+                [box[0], box[1], -1.0e-7],
+                [box[0], -1.0e-7, box[2]],
+                [-1.0e-7, box[1], box[2]],
+                [box[0], box[1], box[2]],
+            ],
+            dtype=np.float32,
+        )
         # Check that all test coordinates actually lie below the lower or
         # exactly at the upper box boundary:
         assert np.all((coords < 0.0) | (coords == box[:3]))
@@ -1301,22 +1677,33 @@ def test_coords_strictly_in_central_image_ortho(self, backend):
 
     def test_coords_in_central_image_tric(self, backend):
         # Triclinic box corresponding to this box matrix:
-        tbx = np.array([[10.1      ,  0.       ,  0.       ],
-                        [ 1.0100002, 10.1      ,  0.       ],
-                        [ 1.0100006,  1.0100021, 10.1      ]],
-                       dtype=np.float32)
+        tbx = np.array(
+            [
+                [10.1, 0.0, 0.0],
+                [1.0100002, 10.1, 0.0],
+                [1.0100006, 1.0100021, 10.1],
+            ],
+            dtype=np.float32,
+        )
         box = mdamath.triclinic_box(*tbx)
         # coordinates just below lower or exactly at the upper box boundaries:
-        coords = np.array([[  -1.0e-7,   -1.0e-7,   -1.0e-7],
-                           [tbx[0, 0],   -1.0e-7,   -1.0e-7],
-                           [   1.01  , tbx[1, 1],   -1.0e-7],
-                           [   1.01  ,    1.01  , tbx[2, 2]],
-                           [tbx[0, 0] + tbx[1, 0], tbx[1, 1], -1.0e-7],
-                           [tbx[0, 0] + tbx[2, 0], 1.01, tbx[2, 2]],
-                           [2.02, tbx[1, 1] + tbx[2, 1], tbx[2, 2]],
-                           [tbx[0, 0] + tbx[1, 0] + tbx[2, 0],
-                            tbx[1, 1] + tbx[2, 1], tbx[2, 2]]],
-                          dtype=np.float32)
+        coords = np.array(
+            [
+                [-1.0e-7, -1.0e-7, -1.0e-7],
+                [tbx[0, 0], -1.0e-7, -1.0e-7],
+                [1.01, tbx[1, 1], -1.0e-7],
+                [1.01, 1.01, tbx[2, 2]],
+                [tbx[0, 0] + tbx[1, 0], tbx[1, 1], -1.0e-7],
+                [tbx[0, 0] + tbx[2, 0], 1.01, tbx[2, 2]],
+                [2.02, tbx[1, 1] + tbx[2, 1], tbx[2, 2]],
+                [
+                    tbx[0, 0] + tbx[1, 0] + tbx[2, 0],
+                    tbx[1, 1] + tbx[2, 1],
+                    tbx[2, 2],
+                ],
+            ],
+            dtype=np.float32,
+        )
         relcoords = distances.transform_RtoS(coords, box)
         # Check that all test coordinates actually lie below the lower or
         # exactly at the upper box boundary:
@@ -1328,11 +1715,12 @@ def test_coords_in_central_image_tric(self, backend):
         assert np.all(relres < 1.0)
 
 
-@pytest.mark.parametrize('backend', ['serial', 'openmp'])
+@pytest.mark.parametrize("backend", ["serial", "openmp"])
 class TestPeriodicAngles(object):
     """Test case for properly considering minimum image convention when calculating angles and dihedrals
     (Issue 172)
     """
+
     @staticmethod
     @pytest.fixture()
     def positions():
@@ -1361,7 +1749,12 @@ def test_angles(self, positions, backend):
         test4 = distances.calc_angles(a2, b2, c2, box=box, backend=backend)
 
         for val in [test1, test2, test3, test4]:
-            assert_almost_equal(ref, val, self.prec, err_msg="Min image in angle calculation failed")
+            assert_almost_equal(
+                ref,
+                val,
+                self.prec,
+                err_msg="Min image in angle calculation failed",
+            )
 
     def test_dihedrals(self, positions, backend):
         a, b, c, d, box = positions
@@ -1377,10 +1770,18 @@ def test_dihedrals(self, positions, backend):
         test2 = distances.calc_dihedrals(a, b2, c, d, box=box, backend=backend)
         test3 = distances.calc_dihedrals(a, b, c2, d, box=box, backend=backend)
         test4 = distances.calc_dihedrals(a, b, c, d2, box=box, backend=backend)
-        test5 = distances.calc_dihedrals(a2, b2, c2, d2, box=box, backend=backend)
+        test5 = distances.calc_dihedrals(
+            a2, b2, c2, d2, box=box, backend=backend
+        )
 
         for val in [test1, test2, test3, test4, test5]:
-            assert_almost_equal(ref, val, self.prec, err_msg="Min image in dihedral calculation failed")
+            assert_almost_equal(
+                ref,
+                val,
+                self.prec,
+                err_msg="Min image in dihedral calculation failed",
+            )
+
 
 class TestInputUnchanged(object):
     """Tests ensuring that the following functions in MDAnalysis.lib.distances
@@ -1397,87 +1798,100 @@ class TestInputUnchanged(object):
       * apply_PBC
     """
 
-    boxes = ([1.0, 1.0, 1.0, 90.0, 90.0, 90.0],  # orthorhombic
-             [1.0, 1.0, 1.0, 80.0, 80.0, 80.0],  # triclinic
-             None)  # no PBC
+    boxes = (
+        [1.0, 1.0, 1.0, 90.0, 90.0, 90.0],  # orthorhombic
+        [1.0, 1.0, 1.0, 80.0, 80.0, 80.0],  # triclinic
+        None,
+    )  # no PBC
 
     @staticmethod
     @pytest.fixture()
     def coords():
         # input coordinates, some outside the [1, 1, 1] box:
-        return [np.array([[0.1, 0.1, 0.1], [-0.9, -0.9, -0.9]], dtype=np.float32),
-                np.array([[0.1, 0.1, 1.9], [-0.9, -0.9,  0.9]], dtype=np.float32),
-                np.array([[0.1, 1.9, 1.9], [-0.9,  0.9,  0.9]], dtype=np.float32),
-                np.array([[0.1, 1.9, 0.1], [-0.9,  0.9, -0.9]], dtype=np.float32)]
+        return [
+            np.array([[0.1, 0.1, 0.1], [-0.9, -0.9, -0.9]], dtype=np.float32),
+            np.array([[0.1, 0.1, 1.9], [-0.9, -0.9, 0.9]], dtype=np.float32),
+            np.array([[0.1, 1.9, 1.9], [-0.9, 0.9, 0.9]], dtype=np.float32),
+            np.array([[0.1, 1.9, 0.1], [-0.9, 0.9, -0.9]], dtype=np.float32),
+        ]
 
     @staticmethod
     @pytest.fixture()
     def coords_atomgroups(coords):
-        universes = [MDAnalysis.Universe.empty(arr.shape[0], trajectory=True)
-                     for arr in coords]
+        universes = [
+            MDAnalysis.Universe.empty(arr.shape[0], trajectory=True)
+            for arr in coords
+        ]
         for u, a in zip(universes, coords):
             u.atoms.positions = a
         return [u.atoms for u in universes]
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_input_unchanged_distance_array(self, coords, box, backend):
         crds = coords[:2]
         refs = [crd.copy() for crd in crds]
-        res = distances.distance_array(crds[0], crds[1], box=box,
-                                       backend=backend)
+        res = distances.distance_array(
+            crds[0], crds[1], box=box, backend=backend
+        )
         assert_equal(crds, refs)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
-    def test_input_unchanged_distance_array_atomgroup(self, coords_atomgroups,
-                                                      box, backend):
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
+    def test_input_unchanged_distance_array_atomgroup(
+        self, coords_atomgroups, box, backend
+    ):
         crds = coords_atomgroups[:2]
         refs = [crd.positions.copy() for crd in crds]
-        res = distances.distance_array(crds[0], crds[1], box=box,
-                                       backend=backend)
+        res = distances.distance_array(
+            crds[0], crds[1], box=box, backend=backend
+        )
         assert_equal([crd.positions for crd in crds], refs)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_input_unchanged_self_distance_array(self, coords, box, backend):
         crd = coords[0]
         ref = crd.copy()
         res = distances.self_distance_array(crd, box=box, backend=backend)
         assert_equal(crd, ref)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
-    def test_input_unchanged_self_distance_array_atomgroup(self,
-                                                           coords_atomgroups,
-                                                           box, backend):
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
+    def test_input_unchanged_self_distance_array_atomgroup(
+        self, coords_atomgroups, box, backend
+    ):
         crd = coords_atomgroups[0]
         ref = crd.positions.copy()
         res = distances.self_distance_array(crd, box=box, backend=backend)
         assert_equal(crd.positions, ref)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("met", ["bruteforce", "pkdtree", "nsgrid", None])
     def test_input_unchanged_capped_distance(self, coords, box, met):
         crds = coords[:2]
         refs = [crd.copy() for crd in crds]
-        res = distances.capped_distance(crds[0], crds[1], max_cutoff=0.3,
-                                        box=box, method=met)
+        res = distances.capped_distance(
+            crds[0], crds[1], max_cutoff=0.3, box=box, method=met
+        )
         assert_equal(crds, refs)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("met", ["bruteforce", "pkdtree", "nsgrid", None])
     def test_input_unchanged_self_capped_distance(self, coords, box, met):
         crd = coords[0]
         ref = crd.copy()
         r_cut = 0.25
-        res = distances.self_capped_distance(crd, max_cutoff=r_cut, box=box,
-                                             method=met)
+        res = distances.self_capped_distance(
+            crd, max_cutoff=r_cut, box=box, method=met
+        )
         assert_equal(crd, ref)
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
-    def test_input_unchanged_transform_RtoS_and_StoR(self, coords, box, backend):
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
+    def test_input_unchanged_transform_RtoS_and_StoR(
+        self, coords, box, backend
+    ):
         crd = coords[0]
         ref = crd.copy()
         res = distances.transform_RtoS(crd, box, backend=backend)
@@ -1505,61 +1919,69 @@ def test_input_unchanged_calc_bonds_atomgroup(
         res = distances.calc_bonds(crds[0], crds[1], box=box, backend=backend)
         assert_equal([crd.positions for crd in crds], refs)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_input_unchanged_calc_angles(self, coords, box, backend):
         crds = coords[:3]
         refs = [crd.copy() for crd in crds]
-        res = distances.calc_angles(crds[0], crds[1], crds[2], box=box,
-                                    backend=backend)
+        res = distances.calc_angles(
+            crds[0], crds[1], crds[2], box=box, backend=backend
+        )
         assert_equal(crds, refs)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
-    def test_input_unchanged_calc_angles_atomgroup(self, coords_atomgroups,
-                                                   box, backend):
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
+    def test_input_unchanged_calc_angles_atomgroup(
+        self, coords_atomgroups, box, backend
+    ):
         crds = coords_atomgroups[:3]
         refs = [crd.positions.copy() for crd in crds]
-        res = distances.calc_angles(crds[0], crds[1], crds[2], box=box,
-                                    backend=backend)
+        res = distances.calc_angles(
+            crds[0], crds[1], crds[2], box=box, backend=backend
+        )
         assert_equal([crd.positions for crd in crds], refs)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_input_unchanged_calc_dihedrals(self, coords, box, backend):
         crds = coords
         refs = [crd.copy() for crd in crds]
-        res = distances.calc_dihedrals(crds[0], crds[1], crds[2], crds[3],
-                                       box=box, backend=backend)
+        res = distances.calc_dihedrals(
+            crds[0], crds[1], crds[2], crds[3], box=box, backend=backend
+        )
         assert_equal(crds, refs)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
-    def test_input_unchanged_calc_dihedrals_atomgroup(self, coords_atomgroups,
-                                                      box, backend):
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
+    def test_input_unchanged_calc_dihedrals_atomgroup(
+        self, coords_atomgroups, box, backend
+    ):
         crds = coords_atomgroups
         refs = [crd.positions.copy() for crd in crds]
-        res = distances.calc_dihedrals(crds[0], crds[1], crds[2], crds[3],
-                                       box=box, backend=backend)
+        res = distances.calc_dihedrals(
+            crds[0], crds[1], crds[2], crds[3], box=box, backend=backend
+        )
         assert_equal([crd.positions for crd in crds], refs)
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_input_unchanged_apply_PBC(self, coords, box, backend):
         crd = coords[0]
         ref = crd.copy()
         res = distances.apply_PBC(crd, box, backend=backend)
         assert_equal(crd, ref)
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
-    def test_input_unchanged_apply_PBC_atomgroup(self, coords_atomgroups, box,
-                                                 backend):
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
+    def test_input_unchanged_apply_PBC_atomgroup(
+        self, coords_atomgroups, box, backend
+    ):
         crd = coords_atomgroups[0]
         ref = crd.positions.copy()
         res = distances.apply_PBC(crd, box, backend=backend)
         assert_equal(crd.positions, ref)
 
+
 class TestEmptyInputCoordinates(object):
     """Tests ensuring that the following functions in MDAnalysis.lib.distances
     do not choke on empty input coordinate arrays:
@@ -1578,9 +2000,11 @@ class TestEmptyInputCoordinates(object):
     max_cut = 0.25  # max_cutoff parameter for *capped_distance()
     min_cut = 0.0  # optional min_cutoff parameter for *capped_distance()
 
-    boxes = ([1.0, 1.0, 1.0, 90.0, 90.0, 90.0],  # orthorhombic
-             [1.0, 1.0, 1.0, 80.0, 80.0, 80.0],  # triclinic
-             None)  # no PBC
+    boxes = (
+        [1.0, 1.0, 1.0, 90.0, 90.0, 90.0],  # orthorhombic
+        [1.0, 1.0, 1.0, 80.0, 80.0, 80.0],  # triclinic
+        None,
+    )  # no PBC
 
     @staticmethod
     @pytest.fixture()
@@ -1588,60 +2012,73 @@ def empty_coord():
         # empty coordinate array:
         return np.empty((0, 3), dtype=np.float32)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_empty_input_distance_array(self, empty_coord, box, backend):
-        res = distances.distance_array(empty_coord, empty_coord, box=box,
-                                       backend=backend)
+        res = distances.distance_array(
+            empty_coord, empty_coord, box=box, backend=backend
+        )
         assert_equal(res, np.empty((0, 0), dtype=np.float64))
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_empty_input_self_distance_array(self, empty_coord, box, backend):
-        res = distances.self_distance_array(empty_coord, box=box,
-                                            backend=backend)
+        res = distances.self_distance_array(
+            empty_coord, box=box, backend=backend
+        )
         assert_equal(res, np.empty((0,), dtype=np.float64))
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('min_cut', [min_cut, None])
-    @pytest.mark.parametrize('ret_dist', [False, True])
-    @pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
-    def test_empty_input_capped_distance(self, empty_coord, min_cut, box, met,
-                                         ret_dist):
-        res = distances.capped_distance(empty_coord, empty_coord,
-                                        max_cutoff=self.max_cut,
-                                        min_cutoff=min_cut, box=box, method=met,
-                                        return_distances=ret_dist)
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("min_cut", [min_cut, None])
+    @pytest.mark.parametrize("ret_dist", [False, True])
+    @pytest.mark.parametrize("met", ["bruteforce", "pkdtree", "nsgrid", None])
+    def test_empty_input_capped_distance(
+        self, empty_coord, min_cut, box, met, ret_dist
+    ):
+        res = distances.capped_distance(
+            empty_coord,
+            empty_coord,
+            max_cutoff=self.max_cut,
+            min_cutoff=min_cut,
+            box=box,
+            method=met,
+            return_distances=ret_dist,
+        )
         if ret_dist:
             assert_equal(res[0], np.empty((0, 2), dtype=np.int64))
             assert_equal(res[1], np.empty((0,), dtype=np.float64))
         else:
             assert_equal(res, np.empty((0, 2), dtype=np.int64))
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('min_cut', [min_cut, None])
-    @pytest.mark.parametrize('ret_dist', [False, True])
-    @pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
-    def test_empty_input_self_capped_distance(self, empty_coord, min_cut, box,
-                                              met, ret_dist):
-        res = distances.self_capped_distance(empty_coord,
-                                             max_cutoff=self.max_cut,
-                                             min_cutoff=min_cut, box=box,
-                                             method=met, return_distances=ret_dist)
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("min_cut", [min_cut, None])
+    @pytest.mark.parametrize("ret_dist", [False, True])
+    @pytest.mark.parametrize("met", ["bruteforce", "pkdtree", "nsgrid", None])
+    def test_empty_input_self_capped_distance(
+        self, empty_coord, min_cut, box, met, ret_dist
+    ):
+        res = distances.self_capped_distance(
+            empty_coord,
+            max_cutoff=self.max_cut,
+            min_cutoff=min_cut,
+            box=box,
+            method=met,
+            return_distances=ret_dist,
+        )
         if ret_dist:
             assert_equal(res[0], np.empty((0, 2), dtype=np.int64))
             assert_equal(res[1], np.empty((0,), dtype=np.float64))
         else:
             assert_equal(res, np.empty((0, 2), dtype=np.int64))
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_empty_input_transform_RtoS(self, empty_coord, box, backend):
         res = distances.transform_RtoS(empty_coord, box, backend=backend)
         assert_equal(res, empty_coord)
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_empty_input_transform_StoR(self, empty_coord, box, backend):
         res = distances.transform_StoR(empty_coord, box, backend=backend)
         assert_equal(res, empty_coord)
@@ -1649,26 +2086,34 @@ def test_empty_input_transform_StoR(self, empty_coord, box, backend):
     @pytest.mark.parametrize("box", boxes)
     @pytest.mark.parametrize("backend", distopia_conditional_backend())
     def test_empty_input_calc_bonds(self, empty_coord, box, backend):
-        res = distances.calc_bonds(empty_coord, empty_coord, box=box,
-                                   backend=backend)
+        res = distances.calc_bonds(
+            empty_coord, empty_coord, box=box, backend=backend
+        )
         assert_equal(res, np.empty((0,), dtype=np.float64))
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_empty_input_calc_angles(self, empty_coord, box, backend):
-        res = distances.calc_angles(empty_coord, empty_coord, empty_coord,
-                                    box=box, backend=backend)
+        res = distances.calc_angles(
+            empty_coord, empty_coord, empty_coord, box=box, backend=backend
+        )
         assert_equal(res, np.empty((0,), dtype=np.float64))
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_empty_input_calc_dihedrals(self, empty_coord, box, backend):
-        res = distances.calc_dihedrals(empty_coord, empty_coord, empty_coord,
-                                       empty_coord, box=box, backend=backend)
+        res = distances.calc_dihedrals(
+            empty_coord,
+            empty_coord,
+            empty_coord,
+            empty_coord,
+            box=box,
+            backend=backend,
+        )
         assert_equal(res, np.empty((0,), dtype=np.float64))
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_empty_input_apply_PBC(self, empty_coord, box, backend):
         res = distances.apply_PBC(empty_coord, box, backend=backend)
         assert_equal(res, empty_coord)
@@ -1704,46 +2149,60 @@ class TestOutputTypes(object):
       * apply_PBC:
         - numpy.ndarray (shape=input.shape, dtype=numpy.float32)
     """
+
     max_cut = 0.25  # max_cutoff parameter for *capped_distance()
     min_cut = 0.0  # optional min_cutoff parameter for *capped_distance()
 
-    boxes = ([1.0, 1.0, 1.0, 90.0, 90.0, 90.0],  # orthorhombic
-             [1.0, 1.0, 1.0, 80.0, 80.0, 80.0],  # triclinic
-             None)  # no PBC
+    boxes = (
+        [1.0, 1.0, 1.0, 90.0, 90.0, 90.0],  # orthorhombic
+        [1.0, 1.0, 1.0, 80.0, 80.0, 80.0],  # triclinic
+        None,
+    )  # no PBC
 
-    coords = [np.empty((0, 3), dtype=np.float32),  # empty coord array
-              np.array([[0.1, 0.1, 0.1]], dtype=np.float32),  # coord array
-              np.array([0.1, 0.1, 0.1], dtype=np.float32),  # single coord
-              np.array([[-1.1, -1.1, -1.1]], dtype=np.float32)]  # outside box
+    coords = [
+        np.empty((0, 3), dtype=np.float32),  # empty coord array
+        np.array([[0.1, 0.1, 0.1]], dtype=np.float32),  # coord array
+        np.array([0.1, 0.1, 0.1], dtype=np.float32),  # single coord
+        np.array([[-1.1, -1.1, -1.1]], dtype=np.float32),
+    ]  # outside box
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('incoords', list(comb(coords, 2)))
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("incoords", list(comb(coords, 2)))
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_output_type_distance_array(self, incoords, box, backend):
         res = distances.distance_array(*incoords, box=box, backend=backend)
         assert type(res) == np.ndarray
-        assert res.shape == (incoords[0].shape[0] % 2, incoords[1].shape[0] % 2)
+        assert res.shape == (
+            incoords[0].shape[0] % 2,
+            incoords[1].shape[0] % 2,
+        )
         assert res.dtype.type == np.float64
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('incoords', coords)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("incoords", coords)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_output_type_self_distance_array(self, incoords, box, backend):
         res = distances.self_distance_array(incoords, box=box, backend=backend)
         assert type(res) == np.ndarray
         assert res.shape == (0,)
         assert res.dtype.type == np.float64
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('min_cut', [min_cut, None])
-    @pytest.mark.parametrize('ret_dist', [False, True])
-    @pytest.mark.parametrize('incoords', list(comb(coords, 2)))
-    @pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
-    def test_output_type_capped_distance(self, incoords, min_cut, box, met,
-                                         ret_dist):
-        res = distances.capped_distance(*incoords, max_cutoff=self.max_cut,
-                                        min_cutoff=min_cut, box=box, method=met,
-                                        return_distances=ret_dist)
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("min_cut", [min_cut, None])
+    @pytest.mark.parametrize("ret_dist", [False, True])
+    @pytest.mark.parametrize("incoords", list(comb(coords, 2)))
+    @pytest.mark.parametrize("met", ["bruteforce", "pkdtree", "nsgrid", None])
+    def test_output_type_capped_distance(
+        self, incoords, min_cut, box, met, ret_dist
+    ):
+        res = distances.capped_distance(
+            *incoords,
+            max_cutoff=self.max_cut,
+            min_cutoff=min_cut,
+            box=box,
+            method=met,
+            return_distances=ret_dist,
+        )
         if ret_dist:
             pairs, dist = res
         else:
@@ -1757,18 +2216,22 @@ def test_output_type_capped_distance(self, incoords, min_cut, box, met,
             assert dist.dtype.type == np.float64
             assert dist.shape == (pairs.shape[0],)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('min_cut', [min_cut, None])
-    @pytest.mark.parametrize('ret_dist', [False, True])
-    @pytest.mark.parametrize('incoords', coords)
-    @pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
-    def test_output_type_self_capped_distance(self, incoords, min_cut, box,
-                                              met, ret_dist):
-        res = distances.self_capped_distance(incoords,
-                                                     max_cutoff=self.max_cut,
-                                                     min_cutoff=min_cut,
-                                                     box=box, method=met,
-                                                     return_distances=ret_dist)
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize("min_cut", [min_cut, None])
+    @pytest.mark.parametrize("ret_dist", [False, True])
+    @pytest.mark.parametrize("incoords", coords)
+    @pytest.mark.parametrize("met", ["bruteforce", "pkdtree", "nsgrid", None])
+    def test_output_type_self_capped_distance(
+        self, incoords, min_cut, box, met, ret_dist
+    ):
+        res = distances.self_capped_distance(
+            incoords,
+            max_cutoff=self.max_cut,
+            min_cutoff=min_cut,
+            box=box,
+            method=met,
+            return_distances=ret_dist,
+        )
         if ret_dist:
             pairs, dist = res
         else:
@@ -1782,18 +2245,18 @@ def test_output_type_self_capped_distance(self, incoords, min_cut, box,
             assert dist.dtype.type == np.float64
             assert dist.shape == (pairs.shape[0],)
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('incoords', coords)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("incoords", coords)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_output_dtype_transform_RtoS(self, incoords, box, backend):
         res = distances.transform_RtoS(incoords, box, backend=backend)
         assert type(res) == np.ndarray
         assert res.dtype.type == np.float32
         assert res.shape == incoords.shape
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('incoords', coords)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("incoords", coords)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_output_dtype_transform_RtoS(self, incoords, box, backend):
         res = distances.transform_RtoS(incoords, box, backend=backend)
         assert type(res) == np.ndarray
@@ -1801,7 +2264,9 @@ def test_output_dtype_transform_RtoS(self, incoords, box, backend):
         assert res.shape == incoords.shape
 
     @pytest.mark.parametrize("box", boxes)
-    @pytest.mark.parametrize("incoords", [2 * [coords[0]]] + list(comb(coords[1:], 2)))
+    @pytest.mark.parametrize(
+        "incoords", [2 * [coords[0]]] + list(comb(coords[1:], 2))
+    )
     @pytest.mark.parametrize("backend", distopia_conditional_backend())
     def test_output_type_calc_bonds(self, incoords, box, backend):
         res = distances.calc_bonds(*incoords, box=box, backend=backend)
@@ -1814,10 +2279,11 @@ def test_output_type_calc_bonds(self, incoords, box, backend):
             coord = [crd for crd in incoords if crd.ndim == maxdim][0]
             assert res.shape == (coord.shape[0],)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('incoords',
-                             [3 * [coords[0]]] + list(comb(coords[1:], 3)))
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize(
+        "incoords", [3 * [coords[0]]] + list(comb(coords[1:], 3))
+    )
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_output_type_calc_angles(self, incoords, box, backend):
         res = distances.calc_angles(*incoords, box=box, backend=backend)
         maxdim = max([crd.ndim for crd in incoords])
@@ -1829,10 +2295,11 @@ def test_output_type_calc_angles(self, incoords, box, backend):
             coord = [crd for crd in incoords if crd.ndim == maxdim][0]
             assert res.shape == (coord.shape[0],)
 
-    @pytest.mark.parametrize('box', boxes)
-    @pytest.mark.parametrize('incoords',
-                             [4 * [coords[0]]] + list(comb(coords[1:], 4)))
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes)
+    @pytest.mark.parametrize(
+        "incoords", [4 * [coords[0]]] + list(comb(coords[1:], 4))
+    )
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_output_type_calc_dihedrals(self, incoords, box, backend):
         res = distances.calc_dihedrals(*incoords, box=box, backend=backend)
         maxdim = max([crd.ndim for crd in incoords])
@@ -1844,9 +2311,9 @@ def test_output_type_calc_dihedrals(self, incoords, box, backend):
             coord = [crd for crd in incoords if crd.ndim == maxdim][0]
             assert res.shape == (coord.shape[0],)
 
-    @pytest.mark.parametrize('box', boxes[:2])
-    @pytest.mark.parametrize('incoords', coords)
-    @pytest.mark.parametrize('backend', ['serial', 'openmp'])
+    @pytest.mark.parametrize("box", boxes[:2])
+    @pytest.mark.parametrize("incoords", coords)
+    @pytest.mark.parametrize("backend", ["serial", "openmp"])
     def test_output_type_apply_PBC(self, incoords, box, backend):
         res = distances.apply_PBC(incoords, box, backend=backend)
         assert type(res) == np.ndarray
@@ -1864,37 +2331,60 @@ def backend_selection_pos():
 
         return positions, result
 
-    @pytest.mark.parametrize('backend', [
-        "serial", "Serial", "SeRiAL", "SERIAL",
-        "openmp", "OpenMP", "oPENmP", "OPENMP",
-    ])
+    @pytest.mark.parametrize(
+        "backend",
+        [
+            "serial",
+            "Serial",
+            "SeRiAL",
+            "SERIAL",
+            "openmp",
+            "OpenMP",
+            "oPENmP",
+            "OPENMP",
+        ],
+    )
     def test_case_insensitivity(self, backend, backend_selection_pos):
         positions, result = backend_selection_pos
         try:
-            distances._run("calc_self_distance_array", args=(positions, result),
-                           backend=backend)
+            distances._run(
+                "calc_self_distance_array",
+                args=(positions, result),
+                backend=backend,
+            )
         except RuntimeError:
             pytest.fail("Failed to understand backend {0}".format(backend))
 
     def test_wront_backend(self, backend_selection_pos):
         positions, result = backend_selection_pos
         with pytest.raises(ValueError):
-            distances._run("calc_self_distance_array", args=(positions, result),
-                           backend="not implemented stuff")
+            distances._run(
+                "calc_self_distance_array",
+                args=(positions, result),
+                backend="not implemented stuff",
+            )
+
 
 def test_used_openmpflag():
     assert isinstance(distances.USED_OPENMP, bool)
 
 
 # test both orthognal and triclinic boxes
-@pytest.mark.parametrize('box', (np.eye(3) * 10, np.array([[10, 0, 0], [2, 10, 0], [2, 2, 10]])))
+@pytest.mark.parametrize(
+    "box", (np.eye(3) * 10, np.array([[10, 0, 0], [2, 10, 0], [2, 2, 10]]))
+)
 # try shifts of -2 to +2 in each dimension, and all combinations of shifts
-@pytest.mark.parametrize('shift', itertools.product(range(-2, 3), range(-2, 3), range(-2, 3)))
-@pytest.mark.parametrize('dtype', (np.float32, np.float64))
+@pytest.mark.parametrize(
+    "shift", itertools.product(range(-2, 3), range(-2, 3), range(-2, 3))
+)
+@pytest.mark.parametrize("dtype", (np.float32, np.float64))
 def test_minimize_vectors(box, shift, dtype):
     # test vectors pointing in all directions
     # these currently all obey minimum convention as they're much smaller than the box
-    vec = np.array(list(itertools.product(range(-1, 2), range(-1, 2), range(-1, 2))), dtype=dtype)
+    vec = np.array(
+        list(itertools.product(range(-1, 2), range(-1, 2), range(-1, 2))),
+        dtype=dtype,
+    )
     box = box.astype(dtype)
 
     # box is 3x3 representation
diff --git a/testsuite/MDAnalysisTests/lib/test_log.py b/testsuite/MDAnalysisTests/lib/test_log.py
index cab2994a87..541660ca4c 100644
--- a/testsuite/MDAnalysisTests/lib/test_log.py
+++ b/testsuite/MDAnalysisTests/lib/test_log.py
@@ -32,22 +32,22 @@ def test_output(self, capsys):
         for i in ProgressBar(list(range(10))):
             pass
         out, err = capsys.readouterr()
-        expected = u'100%|██████████'
-        actual = err.strip().split('\r')[-1]
+        expected = "100%|██████████"
+        actual = err.strip().split("\r")[-1]
         assert actual[:15] == expected
 
     def test_disable(self, capsys):
         for i in ProgressBar(list(range(10)), disable=True):
             pass
         out, err = capsys.readouterr()
-        expected = ''
-        actual = err.strip().split('\r')[-1]
+        expected = ""
+        actual = err.strip().split("\r")[-1]
         assert actual == expected
 
     def test_verbose_disable(self, capsys):
         for i in ProgressBar(list(range(10)), verbose=False):
             pass
         out, err = capsys.readouterr()
-        expected = ''
-        actual = err.strip().split('\r')[-1]
+        expected = ""
+        actual = err.strip().split("\r")[-1]
         assert actual == expected
diff --git a/testsuite/MDAnalysisTests/lib/test_neighborsearch.py b/testsuite/MDAnalysisTests/lib/test_neighborsearch.py
index 7ae209485b..29a179350d 100644
--- a/testsuite/MDAnalysisTests/lib/test_neighborsearch.py
+++ b/testsuite/MDAnalysisTests/lib/test_neighborsearch.py
@@ -30,7 +30,6 @@
 from MDAnalysisTests.datafiles import PSF, DCD
 
 
-
 @pytest.fixture
 def universe():
     u = mda.Universe(PSF, DCD)
@@ -43,8 +42,9 @@ def test_search(universe):
     """simply check that for a centered protein in a large box periodic
     and non-periodic return the same result"""
     ns = NeighborSearch.AtomNeighborSearch(universe.atoms)
-    pns = NeighborSearch.AtomNeighborSearch(universe.atoms,
-                                            universe.atoms.dimensions)
+    pns = NeighborSearch.AtomNeighborSearch(
+        universe.atoms, universe.atoms.dimensions
+    )
 
     ns_res = ns.search(universe.atoms[20], 20)
     pns_res = pns.search(universe.atoms[20], 20)
@@ -54,9 +54,9 @@ def test_search(universe):
 def test_zero(universe):
     """Check if empty atomgroup, residue, segments are returned"""
     ns = NeighborSearch.AtomNeighborSearch(universe.atoms[:10])
-    ns_res = ns.search(universe.atoms[20], 0.1, level='A')
+    ns_res = ns.search(universe.atoms[20], 0.1, level="A")
     assert ns_res == universe.atoms[[]]
-    ns_res = ns.search(universe.atoms[20], 0.1, level='R')
+    ns_res = ns.search(universe.atoms[20], 0.1, level="R")
     assert ns_res == universe.atoms[[]].residues
-    ns_res = ns.search(universe.atoms[20], 0.1, level='S')
+    ns_res = ns.search(universe.atoms[20], 0.1, level="S")
     assert ns_res == universe.atoms[[]].segments
diff --git a/testsuite/MDAnalysisTests/lib/test_nsgrid.py b/testsuite/MDAnalysisTests/lib/test_nsgrid.py
index 69e7fa1f89..582e780172 100644
--- a/testsuite/MDAnalysisTests/lib/test_nsgrid.py
+++ b/testsuite/MDAnalysisTests/lib/test_nsgrid.py
@@ -31,7 +31,12 @@
 
 import MDAnalysis as mda
 from MDAnalysisTests.datafiles import (
-    GRO, Martini_membrane_gro, PDB, PDB_xvf, SURFACE_PDB, SURFACE_TRR
+    GRO,
+    Martini_membrane_gro,
+    PDB,
+    PDB_xvf,
+    SURFACE_PDB,
+    SURFACE_TRR,
 )
 from MDAnalysis.lib import nsgrid
 from MDAnalysis.transformations.translate import center_in_box
@@ -42,23 +47,32 @@ def universe():
     u = mda.Universe(GRO)
     return u
 
+
 def run_grid_search(u, ref_id, cutoff=3):
     coords = u.atoms.positions
     searchcoords = u.atoms.positions[ref_id]
-    if searchcoords.shape == (3, ):
+    if searchcoords.shape == (3,):
         searchcoords = searchcoords[None, :]
     # Run grid search
     searcher = nsgrid.FastNS(cutoff, coords, box=u.dimensions)
 
     return searcher.search(searchcoords)
 
-@pytest.mark.parametrize('box', [
-    np.zeros(3),  # Bad shape
-    np.zeros((3, 3)),  # Collapsed box
-    np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]),  # 2D box
-    np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),  # Box provided as array of integers
-    np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float64),  # Box provided as array of double
-])
+
+@pytest.mark.parametrize(
+    "box",
+    [
+        np.zeros(3),  # Bad shape
+        np.zeros((3, 3)),  # Collapsed box
+        np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]),  # 2D box
+        np.array(
+            [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
+        ),  # Box provided as array of integers
+        np.array(
+            [[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float64
+        ),  # Box provided as array of double
+    ],
+)
 def test_pbc_box(box):
     """Check that PBC box accepts only well-formated boxes"""
     coords = np.array([[1.0, 1.0, 1.0]], dtype=np.float32)
@@ -67,9 +81,13 @@ def test_pbc_box(box):
         nsgrid.FastNS(4.0, coords, box=box)
 
 
-@pytest.mark.parametrize('cutoff, match', ((-4, "Cutoff must be positive"),
-                                           (100000,
-                                            "Cutoff 100000 too large for box")))
+@pytest.mark.parametrize(
+    "cutoff, match",
+    (
+        (-4, "Cutoff must be positive"),
+        (100000, "Cutoff 100000 too large for box"),
+    ),
+)
 def test_nsgrid_badcutoff(universe, cutoff, match):
     with pytest.raises(ValueError, match=match):
         run_grid_search(universe, 0, cutoff)
@@ -91,16 +109,38 @@ def test_nsgrid_PBC_rect():
     """Check that nsgrid works with rect boxes and PBC"""
     ref_id = 191
     # Atomid are from gmx select so there start from 1 and not 0. hence -1!
-    results = np.array([191, 192, 672, 682, 683, 684, 995, 996, 2060, 2808, 3300, 3791,
-                        3792]) - 1
+    results = (
+        np.array(
+            [
+                191,
+                192,
+                672,
+                682,
+                683,
+                684,
+                995,
+                996,
+                2060,
+                2808,
+                3300,
+                3791,
+                3792,
+            ]
+        )
+        - 1
+    )
 
     universe = mda.Universe(Martini_membrane_gro)
     cutoff = 7
 
     # FastNS is called differently to max coverage
-    searcher = nsgrid.FastNS(cutoff, universe.atoms.positions, box=universe.dimensions)
+    searcher = nsgrid.FastNS(
+        cutoff, universe.atoms.positions, box=universe.dimensions
+    )
 
-    results_grid = searcher.search(universe.atoms.positions[ref_id][None, :]).get_pairs()
+    results_grid = searcher.search(
+        universe.atoms.positions[ref_id][None, :]
+    ).get_pairs()
     other_ix = sorted(i for (_, i) in results_grid)
 
     assert len(results) == len(results_grid)
@@ -111,8 +151,25 @@ def test_nsgrid_PBC(universe):
     """Check that grid search works when PBC is needed"""
     # Atomid are from gmx select so there start from 1 and not 0. hence -1!
     ref_id = 13937
-    results = np.array([4398, 4401, 13938, 13939, 13940, 13941, 17987, 23518, 23519, 23521, 23734,
-                        47451]) - 1
+    results = (
+        np.array(
+            [
+                4398,
+                4401,
+                13938,
+                13939,
+                13940,
+                13941,
+                17987,
+                23518,
+                23519,
+                23521,
+                23734,
+                47451,
+            ]
+        )
+        - 1
+    )
 
     results_grid = run_grid_search(universe, ref_id).get_pairs()
 
@@ -126,23 +183,59 @@ def test_nsgrid_pairs(universe):
     """Check that grid search returns the proper pairs"""
 
     ref_id = 13937
-    neighbors = np.array([4398, 4401, 13938, 13939, 13940, 13941, 17987, 23518, 23519, 23521, 23734,
-                          47451]) - 1  # Atomid are from gmx select so there start from 1 and not 0. hence -1!
+    neighbors = (
+        np.array(
+            [
+                4398,
+                4401,
+                13938,
+                13939,
+                13940,
+                13941,
+                17987,
+                23518,
+                23519,
+                23521,
+                23734,
+                47451,
+            ]
+        )
+        - 1
+    )  # Atomid are from gmx select so there start from 1 and not 0. hence -1!
     results = []
 
     results = np.array(results)
 
     results_grid = run_grid_search(universe, ref_id).get_pairs()
 
-    assert_equal(np.sort(neighbors, axis=0), np.sort(results_grid[:, 1], axis=0))
+    assert_equal(
+        np.sort(neighbors, axis=0), np.sort(results_grid[:, 1], axis=0)
+    )
 
 
 def test_nsgrid_pair_distances(universe):
     """Check that grid search returns the proper pair distances"""
 
     ref_id = 13937
-    results = np.array([0.0, 0.270, 0.285, 0.096, 0.096, 0.015, 0.278, 0.268, 0.179, 0.259, 0.290,
-                        0.270]) * 10  # These distances where obtained by gmx distance so they are in nm
+    results = (
+        np.array(
+            [
+                0.0,
+                0.270,
+                0.285,
+                0.096,
+                0.096,
+                0.015,
+                0.278,
+                0.268,
+                0.179,
+                0.259,
+                0.290,
+                0.270,
+            ]
+        )
+        * 10
+    )  # These distances where obtained by gmx distance so they are in nm
 
     results_grid = run_grid_search(universe, ref_id).get_pair_distances()
 
@@ -153,32 +246,57 @@ def test_nsgrid_distances(universe):
     """Check that grid search returns the proper distances"""
     # These distances where obtained by gmx distance so they are in nm
     ref_id = 13937
-    results = np.array([0.0, 0.270, 0.285, 0.096, 0.096, 0.015, 0.278, 0.268, 0.179, 0.259, 0.290,
-                        0.270]) * 10
+    results = (
+        np.array(
+            [
+                0.0,
+                0.270,
+                0.285,
+                0.096,
+                0.096,
+                0.015,
+                0.278,
+                0.268,
+                0.179,
+                0.259,
+                0.290,
+                0.270,
+            ]
+        )
+        * 10
+    )
 
     results_grid = run_grid_search(universe, ref_id).get_pair_distances()
 
     assert_allclose(np.sort(results), np.sort(results_grid), atol=1e-2)
 
 
-@pytest.mark.parametrize('box, results',
-                         ((None, [3, 13, 24]),
-                          (np.array([10., 10., 10., 90., 90., 90.]), [3, 13, 24, 39, 67]),
-                          (np.array([10., 10., 10., 60., 75., 90.]), [3, 13, 24, 39, 60, 79])))
+@pytest.mark.parametrize(
+    "box, results",
+    (
+        (None, [3, 13, 24]),
+        (np.array([10.0, 10.0, 10.0, 90.0, 90.0, 90.0]), [3, 13, 24, 39, 67]),
+        (
+            np.array([10.0, 10.0, 10.0, 60.0, 75.0, 90.0]),
+            [3, 13, 24, 39, 60, 79],
+        ),
+    ),
+)
 def test_nsgrid_search(box, results):
     np.random.seed(90003)
-    points = (np.random.uniform(low=0, high=1.0,
-                        size=(100, 3))*(10.)).astype(np.float32)
+    points = (
+        np.random.uniform(low=0, high=1.0, size=(100, 3)) * (10.0)
+    ).astype(np.float32)
     cutoff = 2.0
-    query = np.array([1., 1., 1.], dtype=np.float32).reshape((1, 3))
+    query = np.array([1.0, 1.0, 1.0], dtype=np.float32).reshape((1, 3))
 
     if box is None:
         pseudobox = np.zeros(6, dtype=np.float32)
         all_coords = np.concatenate([points, query])
         lmax = all_coords.max(axis=0)
         lmin = all_coords.min(axis=0)
-        pseudobox[:3] = 1.1*(lmax - lmin)
-        pseudobox[3:] = 90.
+        pseudobox[:3] = 1.1 * (lmax - lmin)
+        pseudobox[3:] = 90.0
         shiftpoints, shiftquery = points.copy(), query.copy()
         shiftpoints -= lmin
         shiftquery -= lmin
@@ -191,15 +309,20 @@ def test_nsgrid_search(box, results):
     assert_equal(np.sort(indices), results)
 
 
-@pytest.mark.parametrize('box, result',
-                         ((None, 21),
-                          (np.array([0., 0., 0., 90., 90., 90.]), 21),
-                          (np.array([10., 10., 10., 90., 90., 90.]), 26),
-                          (np.array([10., 10., 10., 60., 75., 90.]), 33)))
+@pytest.mark.parametrize(
+    "box, result",
+    (
+        (None, 21),
+        (np.array([0.0, 0.0, 0.0, 90.0, 90.0, 90.0]), 21),
+        (np.array([10.0, 10.0, 10.0, 90.0, 90.0, 90.0]), 26),
+        (np.array([10.0, 10.0, 10.0, 60.0, 75.0, 90.0]), 33),
+    ),
+)
 def test_nsgrid_selfsearch(box, result):
     np.random.seed(90003)
-    points = (np.random.uniform(low=0, high=1.0,
-                        size=(100, 3))*(10.)).astype(np.float32)
+    points = (
+        np.random.uniform(low=0, high=1.0, size=(100, 3)) * (10.0)
+    ).astype(np.float32)
     cutoff = 1.0
     if box is None or np.allclose(box[:3], 0):
         # create a pseudobox
@@ -209,8 +332,8 @@ def test_nsgrid_selfsearch(box, result):
         pseudobox = np.zeros(6, dtype=np.float32)
         lmax = points.max(axis=0)
         lmin = points.min(axis=0)
-        pseudobox[:3] = 1.1*(lmax - lmin)
-        pseudobox[3:] = 90.
+        pseudobox[:3] = 1.1 * (lmax - lmin)
+        pseudobox[3:] = 90.0
         shiftref = points.copy()
         shiftref -= lmin
         searcher = nsgrid.FastNS(cutoff, shiftref, box=pseudobox, pbc=False)
@@ -221,12 +344,15 @@ def test_nsgrid_selfsearch(box, result):
     pairs = searchresults.get_pairs()
     assert_equal(len(pairs), result)
 
+
 def test_nsgrid_probe_close_to_box_boundary():
     # FastNS.search used to segfault with this box, cutoff and reference
     # coordinate prior to PR #2136, so we ensure that this remains fixed.
     # See Issue #2132 for further information.
     ref = np.array([[55.783722, 44.190044, -54.16671]], dtype=np.float32)
-    box = np.array([53.785854, 43.951054, 57.17597, 90., 90., 90.], dtype=np.float32)
+    box = np.array(
+        [53.785854, 43.951054, 57.17597, 90.0, 90.0, 90.0], dtype=np.float32
+    )
     cutoff = 3.0
     # search within a configuration where we know the expected outcome:
     conf = np.ones((1, 3), dtype=np.float32)
@@ -236,7 +362,7 @@ def test_nsgrid_probe_close_to_box_boundary():
     expected_pairs = np.zeros((1, 2), dtype=np.int64)
     expected_dists = np.array([2.3689647], dtype=np.float64)
     assert_equal(results.get_pairs(), expected_pairs)
-    assert_allclose(results.get_pair_distances(), expected_dists, rtol=1.e-6)
+    assert_allclose(results.get_pair_distances(), expected_dists, rtol=1.0e-6)
 
 
 def test_zero_max_dist():
@@ -245,7 +371,7 @@ def test_zero_max_dist():
     ref = np.array([1.0, 1.0, 1.0], dtype=np.float32)
     conf = np.array([2.0, 1.0, 1.0], dtype=np.float32)
 
-    box = np.array([10., 10., 10., 90., 90., 90.], dtype=np.float32)
+    box = np.array([10.0, 10.0, 10.0, 90.0, 90.0, 90.0], dtype=np.float32)
 
     res = mda.lib.distances._nsgrid_capped(ref, conf, box=box, max_cutoff=0.0)
 
@@ -259,7 +385,7 @@ def u_pbc_triclinic():
 
 def test_around_res(u_pbc_triclinic):
     # sanity check for issue 2656, shouldn't segfault (obviously)
-    ag = u_pbc_triclinic.select_atoms('around 0.0 resid 3')
+    ag = u_pbc_triclinic.select_atoms("around 0.0 resid 3")
     assert len(ag) == 0
 
 
@@ -267,7 +393,7 @@ def test_around_overlapping():
     # check that around 0.0 catches when atoms *are* superimposed
     u = mda.Universe.empty(60, trajectory=True)
     xyz = np.zeros((60, 3))
-    x = np.tile(np.arange(12), (5,))+np.repeat(np.arange(5)*100, 12)
+    x = np.tile(np.arange(12), (5,)) + np.repeat(np.arange(5) * 100, 12)
     # x is 5 images of 12 atoms
 
     xyz[:, 0] = x  # y and z are 0
@@ -279,7 +405,7 @@ def test_around_overlapping():
     #                                         u.atoms[12:].positions,
     #                                         box=u.dimensions)
     # assert np.count_nonzero(np.any(dist <= 0.0, axis=0)) == 48
-    assert u.select_atoms('around 0.0 index 0:11').n_atoms == 48
+    assert u.select_atoms("around 0.0 index 0:11").n_atoms == 48
 
 
 def test_issue_2229_part1():
@@ -306,7 +432,9 @@ def test_issue_2229_part2():
     u.atoms[0].position = [0, 0, 29.29]
     u.atoms[1].position = [0, 0, 28.23]
 
-    g = mda.lib.nsgrid.FastNS(3.0, u.atoms[[0]].positions, box=u.dimensions, pbc=False)
+    g = mda.lib.nsgrid.FastNS(
+        3.0, u.atoms[[0]].positions, box=u.dimensions, pbc=False
+    )
     assert len(g.search(u.atoms[[1]].positions).get_pairs()) == 1
 
     g = mda.lib.nsgrid.FastNS(3.0, u.atoms[[1]].positions, box=u.dimensions)
@@ -317,12 +445,12 @@ def test_issue_2919():
     # regression test reported in issue 2919
     # other methods will also give 1115 or 2479 results
     u = mda.Universe(PDB_xvf)
-    ag = u.select_atoms('index 0')
+    ag = u.select_atoms("index 0")
     u.trajectory.ts = center_in_box(ag)(u.trajectory.ts)
 
     box = u.dimensions
-    reference = u.select_atoms('protein')
-    configuration = u.select_atoms('not protein')
+    reference = u.select_atoms("protein")
+    configuration = u.select_atoms("not protein")
 
     for cutoff, expected in [(2.8, 1115), (3.2, 2497)]:
         pairs, distances = mda.lib.distances.capped_distance(
@@ -330,7 +458,7 @@ def test_issue_2919():
             configuration.positions,
             max_cutoff=cutoff,
             box=box,
-            method='nsgrid',
+            method="nsgrid",
             return_distances=True,
         )
         assert len(pairs) == expected
@@ -348,7 +476,7 @@ def test_issue_2345():
 
     idx = g.self_search().get_pairs()
     # count number of contacts for each atom
-    for (i, j) in idx:
+    for i, j in idx:
         cn[i].append(j)
         cn[j].append(i)
     c = Counter(len(v) for v in cn.values())
@@ -365,29 +493,31 @@ def test_issue_2670():
     # the coordinates for this test to make any sense:
     u.atoms.positions = u.atoms.positions * 1.0e-3
 
-    ag1 = u.select_atoms('resid 2 3')
+    ag1 = u.select_atoms("resid 2 3")
     # should return nothing as nothing except resid 3 is within 0.0 or resid 3
-    assert len(ag1.select_atoms('around 0.0 resid 3')) == 0
+    assert len(ag1.select_atoms("around 0.0 resid 3")) == 0
 
     # force atom 0 of resid 1 to overlap with atom 0 of resid 3
     u.residues[0].atoms[0].position = u.residues[2].atoms[0].position
-    ag2 = u.select_atoms('resid 1 3')
+    ag2 = u.select_atoms("resid 1 3")
 
     # should return the one atom overlap
-    assert len(ag2.select_atoms('around 0.0 resid 3')) == 1
+    assert len(ag2.select_atoms("around 0.0 resid 3")) == 1
 
 
 def high_mem_tests_enabled():
-    """ Returns true if ENABLE_HIGH_MEM_UNIT_TESTS is set to true."""
+    """Returns true if ENABLE_HIGH_MEM_UNIT_TESTS is set to true."""
     env = os.getenv("ENABLE_HIGH_MEM_UNIT_TESTS", default="false").lower()
-    if env == 'true':
+    if env == "true":
         return True
     return False
 
 
-reason = ("Turned off by default. The test can be enabled by setting "
-          "the ENABLE_HIGH_MEM_UNIT_TESTS "
-          "environment variable. Make sure you have at least 10GB of RAM.")
+reason = (
+    "Turned off by default. The test can be enabled by setting "
+    "the ENABLE_HIGH_MEM_UNIT_TESTS "
+    "environment variable. Make sure you have at least 10GB of RAM."
+)
 
 
 # Tests that with a tiny cutoff to box ratio, the number of grids is capped
@@ -396,11 +526,12 @@ def high_mem_tests_enabled():
 @pytest.mark.skipif(not high_mem_tests_enabled(), reason=reason)
 def test_issue_3183():
     np.random.seed(90003)
-    points = (np.random.uniform(low=0, high=1.0,
-                                size=(100, 3)) * (10.)).astype(np.float32)
+    points = (
+        np.random.uniform(low=0, high=1.0, size=(100, 3)) * (10.0)
+    ).astype(np.float32)
     cutoff = 2.0
-    query = np.array([1., 1., 1.], dtype=np.float32).reshape((1, 3))
-    box = np.array([10000., 10000., 10000., 90., 90., 90.])
+    query = np.array([1.0, 1.0, 1.0], dtype=np.float32).reshape((1, 3))
+    box = np.array([10000.0, 10000.0, 10000.0, 90.0, 90.0, 90.0])
 
     searcher = nsgrid.FastNS(cutoff, points, box)
     searchresults = searcher.search(query)
diff --git a/testsuite/MDAnalysisTests/lib/test_pkdtree.py b/testsuite/MDAnalysisTests/lib/test_pkdtree.py
index f92a87e73e..ec4e586b38 100644
--- a/testsuite/MDAnalysisTests/lib/test_pkdtree.py
+++ b/testsuite/MDAnalysisTests/lib/test_pkdtree.py
@@ -31,21 +31,33 @@
 
 
 # fractional coordinates for data points
-f_dataset = np.array([[0.2, 0.2, 0.2],  # center of the box
-                      [0.5, 0.5, 0.5],
-                      [0.11, 0.11, 0.11],
-                      [1.1, -1.1, 1.1],  # wrapped to [1, 9, 1]
-                      [2.1, 2.1, 0.3]],  # wrapped to [1, 1, 3]
-                     dtype=np.float32)
-
-
-@pytest.mark.parametrize('b, cut, result', (
-                         (None, 1.0,
-                          'Donot provide cutoff distance'
-                          ' for non PBC aware calculations'),
-                         ([10, 10, 10, 90, 90, 90], None,
-                          'Provide a cutoff distance with'
-                          ' tree.set_coords(...)')))
+f_dataset = np.array(
+    [
+        [0.2, 0.2, 0.2],  # center of the box
+        [0.5, 0.5, 0.5],
+        [0.11, 0.11, 0.11],
+        [1.1, -1.1, 1.1],  # wrapped to [1, 9, 1]
+        [2.1, 2.1, 0.3],  # wrapped to [1, 1, 3]
+    ],
+    dtype=np.float32,
+)
+
+
+@pytest.mark.parametrize(
+    "b, cut, result",
+    (
+        (
+            None,
+            1.0,
+            "Donot provide cutoff distance" " for non PBC aware calculations",
+        ),
+        (
+            [10, 10, 10, 90, 90, 90],
+            None,
+            "Provide a cutoff distance with" " tree.set_coords(...)",
+        ),
+    ),
+)
 def test_setcoords(b, cut, result):
     coords = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.float32)
     if b is not None:
@@ -64,16 +76,19 @@ def test_searchfail():
     query = np.array([1, 1, 1], dtype=np.float32)
     tree = PeriodicKDTree(box=b)
     tree.set_coords(coords, cutoff=cutoff)
-    match = 'Set cutoff greater or equal to the radius.'
+    match = "Set cutoff greater or equal to the radius."
     with pytest.raises(RuntimeError, match=match):
         tree.search(query, search_radius)
 
 
-@pytest.mark.parametrize('b, q, result', (
-                         ([10, 10, 10, 90, 90, 90], [0.5, -0.1, 1.1], []),
-                         ([10, 10, 10, 90, 90, 90], [2.1, -3.1, 0.1], [2, 3, 4]),
-                         ([10, 10, 10, 45, 60, 90], [2.1, -3.1, 0.1], [2, 3])
-                         ))
+@pytest.mark.parametrize(
+    "b, q, result",
+    (
+        ([10, 10, 10, 90, 90, 90], [0.5, -0.1, 1.1], []),
+        ([10, 10, 10, 90, 90, 90], [2.1, -3.1, 0.1], [2, 3, 4]),
+        ([10, 10, 10, 45, 60, 90], [2.1, -3.1, 0.1], [2, 3]),
+    ),
+)
 def test_search(b, q, result):
     b = np.array(b, dtype=np.float32)
     q = transform_StoR(np.array(q, dtype=np.float32), b)
@@ -95,16 +110,19 @@ def test_nopbc():
     assert_equal(indices, [0, 2])
 
 
-@pytest.mark.parametrize('b, radius, result', (
-                         ([10, 10, 10, 90, 90, 90], 2.0,  [[0, 2],
-                                                           [0, 4],
-                                                           [2, 4]]),
-                         ([10, 10, 10, 45, 60, 90], 2.0,  [[0, 4],
-                                                           [2, 4]]),
-                         ([10, 10, 10, 45, 60, 90], 4.5,
-                          'Set cutoff greater or equal to the radius.'),
-                         ([10, 10, 10, 45, 60, 90], 0.1, [])
-                         ))
+@pytest.mark.parametrize(
+    "b, radius, result",
+    (
+        ([10, 10, 10, 90, 90, 90], 2.0, [[0, 2], [0, 4], [2, 4]]),
+        ([10, 10, 10, 45, 60, 90], 2.0, [[0, 4], [2, 4]]),
+        (
+            [10, 10, 10, 45, 60, 90],
+            4.5,
+            "Set cutoff greater or equal to the radius.",
+        ),
+        ([10, 10, 10, 45, 60, 90], 0.1, []),
+    ),
+)
 def test_searchpairs(b, radius, result):
     b = np.array(b, dtype=np.float32)
     cutoff = 2.0
@@ -119,8 +137,7 @@ def test_searchpairs(b, radius, result):
         assert_equal(len(indices), len(result))
 
 
-@pytest.mark.parametrize('radius, result', ((0.1, []),
-                                            (0.3, [[0, 2]])))
+@pytest.mark.parametrize("radius, result", ((0.1, []), (0.3, [[0, 2]])))
 def test_ckd_searchpairs_nopbc(radius, result):
     coords = f_dataset.copy()
     tree = PeriodicKDTree()
@@ -129,6 +146,7 @@ def test_ckd_searchpairs_nopbc(radius, result):
     assert_equal(indices, result)
 
 
+# fmt: off
 @pytest.mark.parametrize('b, q, result', (
                          ([10, 10, 10, 90, 90, 90], [0.5, -0.1, 1.1], []),
                          ([10, 10, 10, 90, 90, 90], [2.1, -3.1, 0.1], [[0, 2],
@@ -142,6 +160,7 @@ def test_ckd_searchpairs_nopbc(radius, result):
                          ([10, 10, 10, 45, 60, 90], [2.1, -3.1, 0.1], [[0, 2],
                                                                        [0, 3]])
                          ))
+# fmt: on
 def test_searchtree(b, q, result):
     b = np.array(b, dtype=np.float32)
     cutoff = 3.0
diff --git a/testsuite/MDAnalysisTests/lib/test_qcprot.py b/testsuite/MDAnalysisTests/lib/test_qcprot.py
index a62ae73f97..5750155c49 100644
--- a/testsuite/MDAnalysisTests/lib/test_qcprot.py
+++ b/testsuite/MDAnalysisTests/lib/test_qcprot.py
@@ -30,7 +30,7 @@
 from MDAnalysisTests.datafiles import PSF, DCD
 
 
-@pytest.mark.parametrize('dtype', [np.float64, np.float32])
+@pytest.mark.parametrize("dtype", [np.float64, np.float32])
 class TestQCProt:
     def test_dummy(self, dtype):
         a = np.array([[1.0, 1.0, 2.0]], dtype=dtype)
@@ -47,7 +47,7 @@ def test_dummy(self, dtype):
 
     def test_regression(self, dtype):
         u = mda.Universe(PSF, DCD)
-        prot = u.select_atoms('protein')
+        prot = u.select_atoms("protein")
         weights = prot.masses.astype(dtype)
         weights /= np.mean(weights)
         p1 = prot.positions.astype(dtype)
@@ -57,10 +57,20 @@ def test_regression(self, dtype):
 
         r = qcprot.CalcRMSDRotationalMatrix(p1, p2, len(prot), rot, weights)
 
-        rot_ref = np.array([0.999998, 0.001696, 0.001004,
-                            -0.001698,  0.999998,  0.001373,
-                            -0.001002, -0.001375,  0.999999],
-                           dtype=dtype)
+        rot_ref = np.array(
+            [
+                0.999998,
+                0.001696,
+                0.001004,
+                -0.001698,
+                0.999998,
+                0.001373,
+                -0.001002,
+                -0.001375,
+                0.999999,
+            ],
+            dtype=dtype,
+        )
 
         err = 0.001 if dtype is np.float32 else 0.000001
         assert r == pytest.approx(0.6057544485785074, abs=err)
diff --git a/testsuite/MDAnalysisTests/lib/test_util.py b/testsuite/MDAnalysisTests/lib/test_util.py
index 839b0ef61e..5db9b9afd4 100644
--- a/testsuite/MDAnalysisTests/lib/test_util.py
+++ b/testsuite/MDAnalysisTests/lib/test_util.py
@@ -32,50 +32,83 @@
 import shutil
 
 import numpy as np
-from numpy.testing import (assert_equal, assert_almost_equal,
-                           assert_array_almost_equal, assert_array_equal,
-                           assert_allclose)
+from numpy.testing import (
+    assert_equal,
+    assert_almost_equal,
+    assert_array_almost_equal,
+    assert_array_equal,
+    assert_allclose,
+)
 from itertools import combinations_with_replacement as comb_wr
 
 import MDAnalysis as mda
 import MDAnalysis.lib.util as util
 import MDAnalysis.lib.mdamath as mdamath
-from MDAnalysis.lib.util import (cached, static_variables, warn_if_not_unique,
-                                 check_coords, store_init_arguments, 
-                                 check_atomgroup_not_empty,)
+from MDAnalysis.lib.util import (
+    cached,
+    static_variables,
+    warn_if_not_unique,
+    check_coords,
+    store_init_arguments,
+    check_atomgroup_not_empty,
+)
 from MDAnalysis.core.topologyattrs import Bonds
 from MDAnalysis.exceptions import NoDataError, DuplicateWarning
 from MDAnalysis.core.groups import AtomGroup
-from MDAnalysisTests.datafiles import (PSF, DCD,
-    Make_Whole, TPR, GRO, fullerene, two_water_gro,
+from MDAnalysisTests.datafiles import (
+    PSF,
+    DCD,
+    Make_Whole,
+    TPR,
+    GRO,
+    fullerene,
+    two_water_gro,
 )
 
+
 def test_absence_cutil():
-    with patch.dict('sys.modules', {'MDAnalysis.lib._cutil':None}):
+    with patch.dict("sys.modules", {"MDAnalysis.lib._cutil": None}):
         import importlib
+
         with pytest.raises(ImportError):
-            importlib.reload(sys.modules['MDAnalysis.lib.util'])
+            importlib.reload(sys.modules["MDAnalysis.lib.util"])
+
 
 def test_presence_cutil():
     mock = Mock()
-    with patch.dict('sys.modules', {'MDAnalysis.lib._cutil':mock}):
+    with patch.dict("sys.modules", {"MDAnalysis.lib._cutil": mock}):
         try:
             import MDAnalysis.lib._cutil
         except ImportError:
-            pytest.fail(msg='''MDAnalysis.lib._cutil should not raise
-                         an ImportError if cutil is available.''')
+            pytest.fail(
+                msg="""MDAnalysis.lib._cutil should not raise
+                         an ImportError if cutil is available."""
+            )
+
 
 def convert_aa_code_long_data():
     aa = [
-        ('H',
-         ('HIS', 'HISA', 'HISB', 'HSE', 'HSD', 'HIS1', 'HIS2', 'HIE', 'HID')),
-        ('K', ('LYS', 'LYSH', 'LYN')),
-        ('A', ('ALA',)),
-        ('D', ('ASP', 'ASPH', 'ASH')),
-        ('E', ('GLU', 'GLUH', 'GLH')),
-        ('N', ('ASN',)),
-        ('Q', ('GLN',)),
-        ('C', ('CYS', 'CYSH', 'CYS1', 'CYS2')),
+        (
+            "H",
+            (
+                "HIS",
+                "HISA",
+                "HISB",
+                "HSE",
+                "HSD",
+                "HIS1",
+                "HIS2",
+                "HIE",
+                "HID",
+            ),
+        ),
+        ("K", ("LYS", "LYSH", "LYN")),
+        ("A", ("ALA",)),
+        ("D", ("ASP", "ASPH", "ASH")),
+        ("E", ("GLU", "GLUH", "GLH")),
+        ("N", ("ASN",)),
+        ("Q", ("GLN",)),
+        ("C", ("CYS", "CYSH", "CYS1", "CYS2")),
     ]
     for resname1, strings in aa:
         for resname3 in strings:
@@ -85,15 +118,27 @@ def convert_aa_code_long_data():
 class TestStringFunctions(object):
     # (1-letter, (canonical 3 letter, other 3/4 letter, ....))
     aa = [
-        ('H',
-         ('HIS', 'HISA', 'HISB', 'HSE', 'HSD', 'HIS1', 'HIS2', 'HIE', 'HID')),
-        ('K', ('LYS', 'LYSH', 'LYN')),
-        ('A', ('ALA',)),
-        ('D', ('ASP', 'ASPH', 'ASH')),
-        ('E', ('GLU', 'GLUH', 'GLH')),
-        ('N', ('ASN',)),
-        ('Q', ('GLN',)),
-        ('C', ('CYS', 'CYSH', 'CYS1', 'CYS2')),
+        (
+            "H",
+            (
+                "HIS",
+                "HISA",
+                "HISB",
+                "HSE",
+                "HSD",
+                "HIS1",
+                "HIS2",
+                "HIE",
+                "HID",
+            ),
+        ),
+        ("K", ("LYS", "LYSH", "LYN")),
+        ("A", ("ALA",)),
+        ("D", ("ASP", "ASPH", "ASH")),
+        ("E", ("GLU", "GLUH", "GLH")),
+        ("N", ("ASN",)),
+        ("Q", ("GLN",)),
+        ("C", ("CYS", "CYSH", "CYS1", "CYS2")),
     ]
 
     residues = [
@@ -104,33 +149,31 @@ class TestStringFunctions(object):
         ("M1:CA", ("MET", 1, "CA")),
     ]
 
-    @pytest.mark.parametrize('rstring, residue', residues)
+    @pytest.mark.parametrize("rstring, residue", residues)
     def test_parse_residue(self, rstring, residue):
         assert util.parse_residue(rstring) == residue
 
     def test_parse_residue_ValueError(self):
         with pytest.raises(ValueError):
-            util.parse_residue('ZZZ')
+            util.parse_residue("ZZZ")
 
-    @pytest.mark.parametrize('resname3, resname1', convert_aa_code_long_data())
+    @pytest.mark.parametrize("resname3, resname1", convert_aa_code_long_data())
     def test_convert_aa_3to1(self, resname3, resname1):
         assert util.convert_aa_code(resname3) == resname1
 
-    @pytest.mark.parametrize('resname1, strings', aa)
+    @pytest.mark.parametrize("resname1, strings", aa)
     def test_convert_aa_1to3(self, resname1, strings):
         assert util.convert_aa_code(resname1) == strings[0]
 
-    @pytest.mark.parametrize('x', (
-        'XYZXYZ',
-        '£'
-    ))
+    @pytest.mark.parametrize("x", ("XYZXYZ", "£"))
     def test_ValueError(self, x):
         with pytest.raises(ValueError):
             util.convert_aa_code(x)
 
 
-def test_greedy_splitext(inp="foo/bar/boing.2.pdb.bz2",
-                         ref=["foo/bar/boing", ".2.pdb.bz2"]):
+def test_greedy_splitext(
+    inp="foo/bar/boing.2.pdb.bz2", ref=["foo/bar/boing", ".2.pdb.bz2"]
+):
     inp = os.path.normpath(inp)
     ref[0] = os.path.normpath(ref[0])
     ref[1] = os.path.normpath(ref[1])
@@ -139,17 +182,20 @@ def test_greedy_splitext(inp="foo/bar/boing.2.pdb.bz2",
     assert ext == ref[1], "extension incorrect"
 
 
-@pytest.mark.parametrize('iterable, value', [
-    ([1, 2, 3], True),
-    ([], True),
-    ((1, 2, 3), True),
-    ((), True),
-    (range(3), True),
-    (np.array([1, 2, 3]), True),
-    (123, False),
-    ("byte string", False),
-    (u"unicode string", False)
-])
+@pytest.mark.parametrize(
+    "iterable, value",
+    [
+        ([1, 2, 3], True),
+        ([], True),
+        ((1, 2, 3), True),
+        ((), True),
+        (range(3), True),
+        (np.array([1, 2, 3]), True),
+        (123, False),
+        ("byte string", False),
+        ("unicode string", False),
+    ],
+)
 def test_iterable(iterable, value):
     assert util.iterable(iterable) == value
 
@@ -160,13 +206,16 @@ class TestFilename(object):
     ext = "pdb"
     filename2 = "foo.pdb"
 
-    @pytest.mark.parametrize('name, ext, keep, actual_name', [
-        (filename, None, False, filename),
-        (filename, ext, False, filename2),
-        (filename, ext, True, filename),
-        (root, ext, False, filename2),
-        (root, ext, True, filename2)
-    ])
+    @pytest.mark.parametrize(
+        "name, ext, keep, actual_name",
+        [
+            (filename, None, False, filename),
+            (filename, ext, False, filename2),
+            (filename, ext, True, filename),
+            (root, ext, False, filename2),
+            (root, ext, True, filename2),
+        ],
+    )
     def test_string(self, name, ext, keep, actual_name):
         file_name = util.filename(name, ext, keep)
         assert file_name == actual_name
@@ -186,61 +235,65 @@ class TestGeometryFunctions(object):
     a = np.array([np.cos(np.pi / 3), np.sin(np.pi / 3), 0])
     null = np.zeros(3)
 
-    @pytest.mark.parametrize('x_axis, y_axis, value', [
-        # Unit vectors
-        (e1, e2, np.pi / 2),
-        (e1, a, np.pi / 3),
-        # Angle vectors
-        (2 * e1, e2, np.pi / 2),
-        (-2 * e1, e2, np.pi - np.pi / 2),
-        (23.3 * e1, a, np.pi / 3),
-        # Null vector
-        (e1, null, np.nan),
-        # Coleniar
-        (a, a, 0.0)
-    ])
+    @pytest.mark.parametrize(
+        "x_axis, y_axis, value",
+        [
+            # Unit vectors
+            (e1, e2, np.pi / 2),
+            (e1, a, np.pi / 3),
+            # Angle vectors
+            (2 * e1, e2, np.pi / 2),
+            (-2 * e1, e2, np.pi - np.pi / 2),
+            (23.3 * e1, a, np.pi / 3),
+            # Null vector
+            (e1, null, np.nan),
+            # Coleniar
+            (a, a, 0.0),
+        ],
+    )
     def test_vectors(self, x_axis, y_axis, value):
         assert_allclose(mdamath.angle(x_axis, y_axis), value)
 
-    @pytest.mark.parametrize('x_axis, y_axis, value', [
-        (-2.3456e7 * e1, 3.4567e-6 * e1, np.pi),
-        (2.3456e7 * e1, 3.4567e-6 * e1, 0.0)
-    ])
+    @pytest.mark.parametrize(
+        "x_axis, y_axis, value",
+        [
+            (-2.3456e7 * e1, 3.4567e-6 * e1, np.pi),
+            (2.3456e7 * e1, 3.4567e-6 * e1, 0.0),
+        ],
+    )
     def test_angle_pi(self, x_axis, y_axis, value):
         assert_almost_equal(mdamath.angle(x_axis, y_axis), value)
 
-    @pytest.mark.parametrize('x', np.linspace(0, np.pi, 20))
+    @pytest.mark.parametrize("x", np.linspace(0, np.pi, 20))
     def test_angle_range(self, x):
-        r = 1000.
+        r = 1000.0
         v = r * np.array([np.cos(x), np.sin(x), 0])
         assert_almost_equal(mdamath.angle(self.e1, v), x, 6)
 
-    @pytest.mark.parametrize('vector, value', [
-        (e3, 1),
-        (a, np.linalg.norm(a)),
-        (null, 0.0)
-    ])
+    @pytest.mark.parametrize(
+        "vector, value", [(e3, 1), (a, np.linalg.norm(a)), (null, 0.0)]
+    )
     def test_norm(self, vector, value):
         assert mdamath.norm(vector) == value
 
-    @pytest.mark.parametrize('x', np.linspace(0, np.pi, 20))
+    @pytest.mark.parametrize("x", np.linspace(0, np.pi, 20))
     def test_norm_range(self, x):
-        r = 1000.
+        r = 1000.0
         v = r * np.array([np.cos(x), np.sin(x), 0])
         assert_almost_equal(mdamath.norm(v), r, 6)
 
-    @pytest.mark.parametrize('vec1, vec2, value', [
-        (e1, e2, e3),
-        (e1, null, 0.0)
-    ])
+    @pytest.mark.parametrize(
+        "vec1, vec2, value", [(e1, e2, e3), (e1, null, 0.0)]
+    )
     def test_normal(self, vec1, vec2, value):
         assert_allclose(mdamath.normal(vec1, vec2), value)
         # add more non-trivial tests
 
     def test_angle_lower_clip(self):
         a = np.array([0.1, 0, 0.2])
-        x = np.dot(a**0.5, -(a**0.5)) / \
-            (mdamath.norm(a**0.5) * mdamath.norm(-(a**0.5)))
+        x = np.dot(a**0.5, -(a**0.5)) / (
+            mdamath.norm(a**0.5) * mdamath.norm(-(a**0.5))
+        )
         assert x < -1.0
         assert mdamath.angle(a, -(a)) == np.pi
         assert mdamath.angle(a**0.5, -(a**0.5)) == np.pi
@@ -329,9 +382,10 @@ def ref_tribox(self, tri_vecs):
             box = np.zeros(6, dtype=np.float32)
         return box
 
-    @pytest.mark.parametrize('lengths', comb_wr([-1, 0, 1, 2], 3))
-    @pytest.mark.parametrize('angles',
-                             comb_wr([-10, 0, 20, 70, 90, 120, 180], 3))
+    @pytest.mark.parametrize("lengths", comb_wr([-1, 0, 1, 2], 3))
+    @pytest.mark.parametrize(
+        "angles", comb_wr([-10, 0, 20, 70, 90, 120, 180], 3)
+    )
     def test_triclinic_vectors(self, lengths, angles):
         box = lengths + angles
         ref = self.ref_trivecs(box)
@@ -340,11 +394,11 @@ def test_triclinic_vectors(self, lengths, angles):
         # check for default dtype:
         assert res.dtype == np.float32
         # belts and braces, make sure upper triangle is always zero:
-        assert not(res[0, 1] or res[0, 2] or res[1, 2])
+        assert not (res[0, 1] or res[0, 2] or res[1, 2])
 
-    @pytest.mark.parametrize('alpha', (60, 90))
-    @pytest.mark.parametrize('beta', (60, 90))
-    @pytest.mark.parametrize('gamma', (60, 90))
+    @pytest.mark.parametrize("alpha", (60, 90))
+    @pytest.mark.parametrize("beta", (60, 90))
+    @pytest.mark.parametrize("gamma", (60, 90))
     def test_triclinic_vectors_right_angle_zeros(self, alpha, beta, gamma):
         angles = [alpha, beta, gamma]
         box = [10, 20, 30] + angles
@@ -375,7 +429,7 @@ def test_triclinic_vectors_right_angle_zeros(self, alpha, beta, gamma):
         else:
             assert mat[1, 0] and mat[2, 0] and mat[2, 1]
 
-    @pytest.mark.parametrize('dtype', (int, float, np.float32, np.float64))
+    @pytest.mark.parametrize("dtype", (int, float, np.float32, np.float64))
     def test_triclinic_vectors_retval(self, dtype):
         # valid box
         box = [1, 1, 1, 70, 80, 90]
@@ -408,26 +462,33 @@ def test_triclinic_vectors_box_cycle(self):
                 for g in range(10, 91, 10):
                     ref = np.array([1, 1, 1, a, b, g], dtype=np.float32)
                     res = mdamath.triclinic_box(
-                        *mdamath.triclinic_vectors(ref))
+                        *mdamath.triclinic_vectors(ref)
+                    )
                     if not np.all(res == 0.0):
                         assert_almost_equal(res, ref, 5)
 
-    @pytest.mark.parametrize('angles', ([70, 70, 70],
-                                        [70, 70, 90],
-                                        [70, 90, 70],
-                                        [90, 70, 70],
-                                        [70, 90, 90],
-                                        [90, 70, 90],
-                                        [90, 90, 70]))
+    @pytest.mark.parametrize(
+        "angles",
+        (
+            [70, 70, 70],
+            [70, 70, 90],
+            [70, 90, 70],
+            [90, 70, 70],
+            [70, 90, 90],
+            [90, 70, 90],
+            [90, 90, 70],
+        ),
+    )
     def test_triclinic_vectors_box_cycle_exact(self, angles):
         # These cycles were inexact prior to PR #2201
         ref = np.array([10.1, 10.1, 10.1] + angles, dtype=np.float32)
         res = mdamath.triclinic_box(*mdamath.triclinic_vectors(ref))
         assert_allclose(res, ref)
 
-    @pytest.mark.parametrize('lengths', comb_wr([-1, 0, 1, 2], 3))
-    @pytest.mark.parametrize('angles',
-                             comb_wr([-10, 0, 20, 70, 90, 120, 180], 3))
+    @pytest.mark.parametrize("lengths", comb_wr([-1, 0, 1, 2], 3))
+    @pytest.mark.parametrize(
+        "angles", comb_wr([-10, 0, 20, 70, 90, 120, 180], 3)
+    )
     def test_triclinic_box(self, lengths, angles):
         tri_vecs = self.ref_trivecs_unsafe(lengths + angles)
         ref = self.ref_tribox(tri_vecs)
@@ -435,14 +496,17 @@ def test_triclinic_box(self, lengths, angles):
         assert_array_equal(res, ref)
         assert res.dtype == ref.dtype
 
-    @pytest.mark.parametrize('lengths', comb_wr([-1, 0, 1, 2], 3))
-    @pytest.mark.parametrize('angles',
-                             comb_wr([-10, 0, 20, 70, 90, 120, 180], 3))
+    @pytest.mark.parametrize("lengths", comb_wr([-1, 0, 1, 2], 3))
+    @pytest.mark.parametrize(
+        "angles", comb_wr([-10, 0, 20, 70, 90, 120, 180], 3)
+    )
     def test_box_volume(self, lengths, angles):
         box = np.array(lengths + angles, dtype=np.float32)
-        assert_almost_equal(mdamath.box_volume(box),
-                            np.linalg.det(self.ref_trivecs(box)),
-                            decimal=5)
+        assert_almost_equal(
+            mdamath.box_volume(box),
+            np.linalg.det(self.ref_trivecs(box)),
+            decimal=5,
+        )
 
     def test_sarrus_det(self):
         comb = comb_wr(np.linspace(-133.7, 133.7, num=5), 9)
@@ -459,7 +523,7 @@ def test_sarrus_det(self):
         assert_almost_equal(res, ref, 7)
         assert ref.dtype == res.dtype == np.float64
 
-    @pytest.mark.parametrize('shape', ((0,), (3, 2), (2, 3), (1, 1, 3, 1)))
+    @pytest.mark.parametrize("shape", ((0,), (3, 2), (2, 3), (1, 1, 3, 1)))
     def test_sarrus_det_wrong_shape(self, shape):
         matrix = np.zeros(shape)
         with pytest.raises(ValueError):
@@ -545,18 +609,32 @@ def test_double_precision_box(self):
             residue_segindex=[0],
             trajectory=True,
             velocities=False,
-            forces=False)
+            forces=False,
+        )
         ts = u.trajectory.ts
         ts.frame = 0
         ts.dimensions = [10, 10, 10, 90, 90, 90]
         # assert ts.dimensions.dtype == np.float64
         # not applicable since #2213
-        ts.positions = np.array([[1, 1, 1, ], [9, 9, 9]], dtype=np.float32)
+        ts.positions = np.array(
+            [
+                [1, 1, 1],
+                [9, 9, 9],
+            ],
+            dtype=np.float32,
+        )
         u.add_TopologyAttr(Bonds([(0, 1)]))
         mdamath.make_whole(u.atoms)
-        assert_array_almost_equal(u.atoms.positions,
-                                  np.array([[1, 1, 1, ], [-1, -1, -1]],
-                                           dtype=np.float32))
+        assert_array_almost_equal(
+            u.atoms.positions,
+            np.array(
+                [
+                    [1, 1, 1],
+                    [-1, -1, -1],
+                ],
+                dtype=np.float32,
+            ),
+        )
 
     @staticmethod
     @pytest.fixture()
@@ -571,7 +649,7 @@ def test_no_bonds(self):
             mdamath.make_whole(ag)
 
     def test_zero_box_size(self, universe, ag):
-        universe.dimensions = [0., 0., 0., 90., 90., 90.]
+        universe.dimensions = [0.0, 0.0, 0.0, 90.0, 90.0, 90.0]
         with pytest.raises(ValueError):
             mdamath.make_whole(ag)
 
@@ -593,14 +671,26 @@ def test_solve_1(self, universe, ag):
         mdamath.make_whole(ag)
 
         assert_array_almost_equal(universe.atoms[:4].positions, refpos)
-        assert_array_almost_equal(universe.atoms[4].position,
-                                  np.array([110.0, 50.0, 0.0]), decimal=self.prec)
-        assert_array_almost_equal(universe.atoms[5].position,
-                                  np.array([110.0, 60.0, 0.0]), decimal=self.prec)
-        assert_array_almost_equal(universe.atoms[6].position,
-                                  np.array([110.0, 40.0, 0.0]), decimal=self.prec)
-        assert_array_almost_equal(universe.atoms[7].position,
-                                  np.array([120.0, 50.0, 0.0]), decimal=self.prec)
+        assert_array_almost_equal(
+            universe.atoms[4].position,
+            np.array([110.0, 50.0, 0.0]),
+            decimal=self.prec,
+        )
+        assert_array_almost_equal(
+            universe.atoms[5].position,
+            np.array([110.0, 60.0, 0.0]),
+            decimal=self.prec,
+        )
+        assert_array_almost_equal(
+            universe.atoms[6].position,
+            np.array([110.0, 40.0, 0.0]),
+            decimal=self.prec,
+        )
+        assert_array_almost_equal(
+            universe.atoms[7].position,
+            np.array([120.0, 50.0, 0.0]),
+            decimal=self.prec,
+        )
 
     def test_solve_2(self, universe, ag):
         # use but specify the center atom
@@ -610,14 +700,26 @@ def test_solve_2(self, universe, ag):
         mdamath.make_whole(ag, reference_atom=universe.residues[0].atoms[4])
 
         assert_array_almost_equal(universe.atoms[4:8].positions, refpos)
-        assert_array_almost_equal(universe.atoms[0].position,
-                                  np.array([-20.0, 50.0, 0.0]), decimal=self.prec)
-        assert_array_almost_equal(universe.atoms[1].position,
-                                  np.array([-10.0, 50.0, 0.0]), decimal=self.prec)
-        assert_array_almost_equal(universe.atoms[2].position,
-                                  np.array([-10.0, 60.0, 0.0]), decimal=self.prec)
-        assert_array_almost_equal(universe.atoms[3].position,
-                                  np.array([-10.0, 40.0, 0.0]), decimal=self.prec)
+        assert_array_almost_equal(
+            universe.atoms[0].position,
+            np.array([-20.0, 50.0, 0.0]),
+            decimal=self.prec,
+        )
+        assert_array_almost_equal(
+            universe.atoms[1].position,
+            np.array([-10.0, 50.0, 0.0]),
+            decimal=self.prec,
+        )
+        assert_array_almost_equal(
+            universe.atoms[2].position,
+            np.array([-10.0, 60.0, 0.0]),
+            decimal=self.prec,
+        )
+        assert_array_almost_equal(
+            universe.atoms[3].position,
+            np.array([-10.0, 40.0, 0.0]),
+            decimal=self.prec,
+        )
 
     def test_solve_3(self, universe):
         # put in a chunk that doesn't need any work
@@ -638,12 +740,15 @@ def test_solve_4(self, universe):
         mdamath.make_whole(chunk)
 
         assert_array_almost_equal(universe.atoms[7].position, refpos)
-        assert_array_almost_equal(universe.atoms[4].position,
-                                  np.array([110.0, 50.0, 0.0]))
-        assert_array_almost_equal(universe.atoms[5].position,
-                                  np.array([110.0, 60.0, 0.0]))
-        assert_array_almost_equal(universe.atoms[6].position,
-                                  np.array([110.0, 40.0, 0.0]))
+        assert_array_almost_equal(
+            universe.atoms[4].position, np.array([110.0, 50.0, 0.0])
+        )
+        assert_array_almost_equal(
+            universe.atoms[5].position, np.array([110.0, 60.0, 0.0])
+        )
+        assert_array_almost_equal(
+            universe.atoms[6].position, np.array([110.0, 40.0, 0.0])
+        )
 
     def test_double_frag_short_bonds(self, universe, ag):
         # previous bug where if two fragments are given
@@ -655,7 +760,7 @@ def test_double_frag_short_bonds(self, universe, ag):
 
     def test_make_whole_triclinic(self):
         u = mda.Universe(TPR, GRO)
-        thing = u.select_atoms('not resname SOL NA+')
+        thing = u.select_atoms("not resname SOL NA+")
         mdamath.make_whole(thing)
 
         blengths = thing.bonds.values()
@@ -667,18 +772,20 @@ def test_make_whole_fullerene(self):
         u = mda.Universe(fullerene)
 
         bbox = u.atoms.bbox()
-        u.dimensions = np.r_[bbox[1] - bbox[0], [90]*3]
+        u.dimensions = np.r_[bbox[1] - bbox[0], [90] * 3]
 
         blengths = u.atoms.bonds.values()
         # kaboom
         u.atoms[::2].translate([u.dimensions[0], -2 * u.dimensions[1], 0.0])
         u.atoms[1::2].translate(
-            [0.0, 7 * u.dimensions[1], -5 * u.dimensions[2]])
+            [0.0, 7 * u.dimensions[1], -5 * u.dimensions[2]]
+        )
 
         mdamath.make_whole(u.atoms)
 
         assert_array_almost_equal(
-            u.atoms.bonds.values(), blengths, decimal=self.prec)
+            u.atoms.bonds.values(), blengths, decimal=self.prec
+        )
 
     def test_make_whole_multiple_molecules(self):
         u = mda.Universe(two_water_gro, guess_bonds=True)
@@ -700,36 +807,36 @@ def __init__(self):
         self.ref6 = 6.0
         # For universe-validated caches
         # One-line lambda-like class
-        self.universe = type('Universe', (), dict())()
-        self.universe._cache = {'_valid': {}}
+        self.universe = type("Universe", (), dict())()
+        self.universe._cache = {"_valid": {}}
 
-    @cached('val1')
+    @cached("val1")
     def val1(self):
         return self.ref1
 
     # Do one with property decorator as these are used together often
     @property
-    @cached('val2')
+    @cached("val2")
     def val2(self):
         return self.ref2
 
     # Check use of property setters
     @property
-    @cached('val3')
+    @cached("val3")
     def val3(self):
         return self.ref3
 
     @val3.setter
     def val3(self, new):
-        self._clear_caches('val3')
-        self._fill_cache('val3', new)
+        self._clear_caches("val3")
+        self._fill_cache("val3", new)
 
     @val3.deleter
     def val3(self):
-        self._clear_caches('val3')
+        self._clear_caches("val3")
 
     # Check that args are passed through to underlying functions
-    @cached('val4')
+    @cached("val4")
     def val4(self, n1, n2):
         return self._init_val_4(n1, n2)
 
@@ -737,7 +844,7 @@ def _init_val_4(self, m1, m2):
         return self.ref4 + m1 + m2
 
     # Args and Kwargs
-    @cached('val5')
+    @cached("val5")
     def val5(self, n, s=None):
         return self._init_val_5(n, s=s)
 
@@ -746,7 +853,7 @@ def _init_val_5(self, n, s=None):
 
     # Property decorator and universally-validated cache
     @property
-    @cached('val6', universe_validation=True)
+    @cached("val6", universe_validation=True)
     def val6(self):
         return self.ref5 + 1.0
 
@@ -772,40 +879,40 @@ def obj(self):
 
     def test_val1_lookup(self, obj):
         obj._clear_caches()
-        assert 'val1' not in obj._cache
+        assert "val1" not in obj._cache
         assert obj.val1() == obj.ref1
         ret = obj.val1()
-        assert 'val1' in obj._cache
-        assert obj._cache['val1'] == ret
-        assert obj.val1() is obj._cache['val1']
+        assert "val1" in obj._cache
+        assert obj._cache["val1"] == ret
+        assert obj.val1() is obj._cache["val1"]
 
     def test_val1_inject(self, obj):
         # Put something else into the cache and check it gets returned
         # this tests that the cache is blindly being used
         obj._clear_caches()
         ret = obj.val1()
-        assert 'val1' in obj._cache
+        assert "val1" in obj._cache
         assert ret == obj.ref1
         new = 77.0
-        obj._fill_cache('val1', new)
+        obj._fill_cache("val1", new)
         assert obj.val1() == new
 
     # Managed property
     def test_val2_lookup(self, obj):
         obj._clear_caches()
-        assert 'val2' not in obj._cache
+        assert "val2" not in obj._cache
         assert obj.val2 == obj.ref2
         ret = obj.val2
-        assert 'val2' in obj._cache
-        assert obj._cache['val2'] == ret
+        assert "val2" in obj._cache
+        assert obj._cache["val2"] == ret
 
     def test_val2_inject(self, obj):
         obj._clear_caches()
         ret = obj.val2
-        assert 'val2' in obj._cache
+        assert "val2" in obj._cache
         assert ret == obj.ref2
         new = 77.0
-        obj._fill_cache('val2', new)
+        obj._fill_cache("val2", new)
         assert obj.val2 == new
 
         # Setter on cached attribute
@@ -816,18 +923,18 @@ def test_val3_set(self, obj):
         new = 99.0
         obj.val3 = new
         assert obj.val3 == new
-        assert obj._cache['val3'] == new
+        assert obj._cache["val3"] == new
 
     def test_val3_del(self, obj):
         # Check that deleting the property removes it from cache,
         obj._clear_caches()
         assert obj.val3 == obj.ref3
-        assert 'val3' in obj._cache
+        assert "val3" in obj._cache
         del obj.val3
-        assert 'val3' not in obj._cache
+        assert "val3" not in obj._cache
         # But allows it to work as usual afterwards
         assert obj.val3 == obj.ref3
-        assert 'val3' in obj._cache
+        assert "val3" in obj._cache
 
     # Pass args
     def test_val4_args(self, obj):
@@ -840,27 +947,27 @@ def test_val4_args(self, obj):
     # Pass args and kwargs
     def test_val5_kwargs(self, obj):
         obj._clear_caches()
-        assert obj.val5(5, s='abc') == 5 * 'abc'
+        assert obj.val5(5, s="abc") == 5 * "abc"
 
-        assert obj.val5(5, s='!!!') == 5 * 'abc'
+        assert obj.val5(5, s="!!!") == 5 * "abc"
 
     # property decorator, with universe validation
     def test_val6_universe_validation(self, obj):
         obj._clear_caches()
-        assert not hasattr(obj, '_cache_key')
-        assert 'val6' not in obj._cache
-        assert 'val6' not in obj.universe._cache['_valid']
+        assert not hasattr(obj, "_cache_key")
+        assert "val6" not in obj._cache
+        assert "val6" not in obj.universe._cache["_valid"]
 
         ret = obj.val6  # Trigger caching
         assert obj.val6 == obj.ref6
         assert ret is obj.val6
-        assert 'val6' in obj._cache
-        assert 'val6' in obj.universe._cache['_valid']
-        assert obj._cache_key in obj.universe._cache['_valid']['val6']
-        assert obj._cache['val6'] is ret
+        assert "val6" in obj._cache
+        assert "val6" in obj.universe._cache["_valid"]
+        assert obj._cache_key in obj.universe._cache["_valid"]["val6"]
+        assert obj._cache["val6"] is ret
 
         # Invalidate cache at universe level
-        obj.universe._cache['_valid']['val6'].clear()
+        obj.universe._cache["_valid"]["val6"].clear()
         ret2 = obj.val6
         assert ret2 is obj.val6
         assert ret2 is not ret
@@ -874,18 +981,19 @@ def test_val6_universe_validation(self, obj):
 
 
 class TestConvFloat(object):
-    @pytest.mark.parametrize('s, output', [
-        ('0.45', 0.45),
-        ('.45', 0.45),
-        ('a.b', 'a.b')
-    ])
+    @pytest.mark.parametrize(
+        "s, output", [("0.45", 0.45), (".45", 0.45), ("a.b", "a.b")]
+    )
     def test_float(self, s, output):
         assert util.conv_float(s) == output
 
-    @pytest.mark.parametrize('input, output', [
-        (('0.45', '0.56', '6.7'), [0.45, 0.56, 6.7]),
-        (('0.45', 'a.b', '!!'), [0.45, 'a.b', '!!'])
-    ])
+    @pytest.mark.parametrize(
+        "input, output",
+        [
+            (("0.45", "0.56", "6.7"), [0.45, 0.56, 6.7]),
+            (("0.45", "a.b", "!!"), [0.45, "a.b", "!!"]),
+        ],
+    )
     def test_map(self, input, output):
         ret = [util.conv_float(el) for el in input]
         assert ret == output
@@ -894,7 +1002,7 @@ def test_map(self, input, output):
 class TestFixedwidthBins(object):
     def test_keys(self):
         ret = util.fixedwidth_bins(0.5, 1.0, 2.0)
-        for k in ['Nbins', 'delta', 'min', 'max']:
+        for k in ["Nbins", "delta", "min", "max"]:
             assert k in ret
 
     def test_ValueError(self):
@@ -902,49 +1010,63 @@ def test_ValueError(self):
             util.fixedwidth_bins(0.1, 5.0, 4.0)
 
     @pytest.mark.parametrize(
-        'delta, xmin, xmax, output_Nbins, output_delta, output_min, output_max',
+        "delta, xmin, xmax, output_Nbins, output_delta, output_min, output_max",
         [
             (0.1, 4.0, 5.0, 10, 0.1, 4.0, 5.0),
-            (0.4, 4.0, 5.0, 3, 0.4, 3.9, 5.1)
-        ])
-    def test_usage(self, delta, xmin, xmax, output_Nbins, output_delta,
-                   output_min, output_max):
+            (0.4, 4.0, 5.0, 3, 0.4, 3.9, 5.1),
+        ],
+    )
+    def test_usage(
+        self,
+        delta,
+        xmin,
+        xmax,
+        output_Nbins,
+        output_delta,
+        output_min,
+        output_max,
+    ):
         ret = util.fixedwidth_bins(delta, xmin, xmax)
-        assert ret['Nbins'] == output_Nbins
-        assert ret['delta'] == output_delta
-        assert ret['min'], output_min
-        assert ret['max'], output_max
+        assert ret["Nbins"] == output_Nbins
+        assert ret["delta"] == output_delta
+        assert ret["min"], output_min
+        assert ret["max"], output_max
 
 
 @pytest.fixture
 def atoms():
     from MDAnalysisTests import make_Universe
+
     u = make_Universe(extras=("masses",), size=(3, 1, 1))
     return u.atoms
 
 
-@pytest.mark.parametrize('weights,result',
-                         [
-                             (None, None),
-                             ("mass", np.array([5.1, 4.2, 3.3])),
-                             (np.array([12.0, 1.0, 12.0]),
-                              np.array([12.0, 1.0, 12.0])),
-                             ([12.0, 1.0, 12.0], np.array([12.0, 1.0, 12.0])),
-                             (range(3), np.arange(3, dtype=int)),
-                         ])
+@pytest.mark.parametrize(
+    "weights,result",
+    [
+        (None, None),
+        ("mass", np.array([5.1, 4.2, 3.3])),
+        (np.array([12.0, 1.0, 12.0]), np.array([12.0, 1.0, 12.0])),
+        ([12.0, 1.0, 12.0], np.array([12.0, 1.0, 12.0])),
+        (range(3), np.arange(3, dtype=int)),
+    ],
+)
 def test_check_weights_ok(atoms, weights, result):
     assert_array_equal(util.get_weights(atoms, weights), result)
 
 
-@pytest.mark.parametrize('weights',
-                         [42,
-                          "geometry",
-                          np.array(1.0),
-                          np.array([12.0, 1.0, 12.0, 1.0]),
-                          [12.0, 1.0],
-                          np.array([[12.0, 1.0, 12.0]]),
-                          np.array([[12.0, 1.0, 12.0], [12.0, 1.0, 12.0]]),
-                          ])
+@pytest.mark.parametrize(
+    "weights",
+    [
+        42,
+        "geometry",
+        np.array(1.0),
+        np.array([12.0, 1.0, 12.0, 1.0]),
+        [12.0, 1.0],
+        np.array([[12.0, 1.0, 12.0]]),
+        np.array([[12.0, 1.0, 12.0], [12.0, 1.0, 12.0]]),
+    ],
+)
 def test_check_weights_raises_ValueError(atoms, weights):
     with pytest.raises(ValueError):
         util.get_weights(atoms, weights)
@@ -956,195 +1078,303 @@ class TestGuessFormat(object):
     Tests also getting the appropriate Parser and Reader from a
     given filename
     """
+
     # list of known formats, followed by the desired Parser and Reader
     # None indicates that there isn't a Reader for this format
     # All formats call fallback to the MinimalParser
     formats = [
-        ('CHAIN', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.chain.ChainReader),
-        ('CONFIG', mda.topology.DLPolyParser.ConfigParser,
-         mda.coordinates.DLPoly.ConfigReader),
-        ('CRD', mda.topology.CRDParser.CRDParser, mda.coordinates.CRD.CRDReader),
-        ('DATA', mda.topology.LAMMPSParser.DATAParser,
-         mda.coordinates.LAMMPS.DATAReader),
-        ('DCD', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.DCD.DCDReader),
-        ('DMS', mda.topology.DMSParser.DMSParser, mda.coordinates.DMS.DMSReader),
-        ('GMS', mda.topology.GMSParser.GMSParser, mda.coordinates.GMS.GMSReader),
-        ('GRO', mda.topology.GROParser.GROParser, mda.coordinates.GRO.GROReader),
-        ('HISTORY', mda.topology.DLPolyParser.HistoryParser,
-         mda.coordinates.DLPoly.HistoryReader),
-        ('INPCRD', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.INPCRD.INPReader),
-        ('LAMMPS', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.LAMMPS.DCDReader),
-        ('MDCRD', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.TRJ.TRJReader),
-        ('MMTF', mda.topology.MMTFParser.MMTFParser,
-         mda.coordinates.MMTF.MMTFReader),
-        ('MOL2', mda.topology.MOL2Parser.MOL2Parser,
-         mda.coordinates.MOL2.MOL2Reader),
-        ('NC', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.TRJ.NCDFReader),
-        ('NCDF', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.TRJ.NCDFReader),
-        ('PDB', mda.topology.PDBParser.PDBParser, mda.coordinates.PDB.PDBReader),
-        ('PDBQT', mda.topology.PDBQTParser.PDBQTParser,
-         mda.coordinates.PDBQT.PDBQTReader),
-        ('PRMTOP', mda.topology.TOPParser.TOPParser, None),
-        ('PQR', mda.topology.PQRParser.PQRParser, mda.coordinates.PQR.PQRReader),
-        ('PSF', mda.topology.PSFParser.PSFParser, None),
-        ('RESTRT', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.INPCRD.INPReader),
-        ('TOP', mda.topology.TOPParser.TOPParser, None),
-        ('TPR', mda.topology.TPRParser.TPRParser, None),
-        ('TRJ', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.TRJ.TRJReader),
-        ('TRR', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.TRR.TRRReader),
-        ('XML', mda.topology.HoomdXMLParser.HoomdXMLParser, None),
-        ('XPDB', mda.topology.ExtendedPDBParser.ExtendedPDBParser,
-         mda.coordinates.PDB.ExtendedPDBReader),
-        ('XTC', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.XTC.XTCReader),
-        ('XYZ', mda.topology.XYZParser.XYZParser, mda.coordinates.XYZ.XYZReader),
-        ('TRZ', mda.topology.MinimalParser.MinimalParser,
-         mda.coordinates.TRZ.TRZReader),
+        (
+            "CHAIN",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.chain.ChainReader,
+        ),
+        (
+            "CONFIG",
+            mda.topology.DLPolyParser.ConfigParser,
+            mda.coordinates.DLPoly.ConfigReader,
+        ),
+        (
+            "CRD",
+            mda.topology.CRDParser.CRDParser,
+            mda.coordinates.CRD.CRDReader,
+        ),
+        (
+            "DATA",
+            mda.topology.LAMMPSParser.DATAParser,
+            mda.coordinates.LAMMPS.DATAReader,
+        ),
+        (
+            "DCD",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.DCD.DCDReader,
+        ),
+        (
+            "DMS",
+            mda.topology.DMSParser.DMSParser,
+            mda.coordinates.DMS.DMSReader,
+        ),
+        (
+            "GMS",
+            mda.topology.GMSParser.GMSParser,
+            mda.coordinates.GMS.GMSReader,
+        ),
+        (
+            "GRO",
+            mda.topology.GROParser.GROParser,
+            mda.coordinates.GRO.GROReader,
+        ),
+        (
+            "HISTORY",
+            mda.topology.DLPolyParser.HistoryParser,
+            mda.coordinates.DLPoly.HistoryReader,
+        ),
+        (
+            "INPCRD",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.INPCRD.INPReader,
+        ),
+        (
+            "LAMMPS",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.LAMMPS.DCDReader,
+        ),
+        (
+            "MDCRD",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.TRJ.TRJReader,
+        ),
+        (
+            "MMTF",
+            mda.topology.MMTFParser.MMTFParser,
+            mda.coordinates.MMTF.MMTFReader,
+        ),
+        (
+            "MOL2",
+            mda.topology.MOL2Parser.MOL2Parser,
+            mda.coordinates.MOL2.MOL2Reader,
+        ),
+        (
+            "NC",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.TRJ.NCDFReader,
+        ),
+        (
+            "NCDF",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.TRJ.NCDFReader,
+        ),
+        (
+            "PDB",
+            mda.topology.PDBParser.PDBParser,
+            mda.coordinates.PDB.PDBReader,
+        ),
+        (
+            "PDBQT",
+            mda.topology.PDBQTParser.PDBQTParser,
+            mda.coordinates.PDBQT.PDBQTReader,
+        ),
+        ("PRMTOP", mda.topology.TOPParser.TOPParser, None),
+        (
+            "PQR",
+            mda.topology.PQRParser.PQRParser,
+            mda.coordinates.PQR.PQRReader,
+        ),
+        ("PSF", mda.topology.PSFParser.PSFParser, None),
+        (
+            "RESTRT",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.INPCRD.INPReader,
+        ),
+        ("TOP", mda.topology.TOPParser.TOPParser, None),
+        ("TPR", mda.topology.TPRParser.TPRParser, None),
+        (
+            "TRJ",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.TRJ.TRJReader,
+        ),
+        (
+            "TRR",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.TRR.TRRReader,
+        ),
+        ("XML", mda.topology.HoomdXMLParser.HoomdXMLParser, None),
+        (
+            "XPDB",
+            mda.topology.ExtendedPDBParser.ExtendedPDBParser,
+            mda.coordinates.PDB.ExtendedPDBReader,
+        ),
+        (
+            "XTC",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.XTC.XTCReader,
+        ),
+        (
+            "XYZ",
+            mda.topology.XYZParser.XYZParser,
+            mda.coordinates.XYZ.XYZReader,
+        ),
+        (
+            "TRZ",
+            mda.topology.MinimalParser.MinimalParser,
+            mda.coordinates.TRZ.TRZReader,
+        ),
     ]
     # list of possible compressed extensions
     # include no extension too!
-    compressed_extensions = ['.bz2', '.gz']
+    compressed_extensions = [".bz2", ".gz"]
 
-    @pytest.mark.parametrize('extention',
-                             [format_tuple[0].upper() for format_tuple in
-                              formats] +
-                             [format_tuple[0].lower() for format_tuple in
-                              formats])
+    @pytest.mark.parametrize(
+        "extention",
+        [format_tuple[0].upper() for format_tuple in formats]
+        + [format_tuple[0].lower() for format_tuple in formats],
+    )
     def test_get_extention(self, extention):
         """Check that get_ext works"""
-        file_name = 'file.{0}'.format(extention)
+        file_name = "file.{0}".format(extention)
         a, b = util.get_ext(file_name)
 
-        assert a == 'file'
+        assert a == "file"
         assert b == extention.lower()
 
-    @pytest.mark.parametrize('extention',
-                             [format_tuple[0].upper() for format_tuple in
-                              formats] +
-                             [format_tuple[0].lower() for format_tuple in
-                              formats])
+    @pytest.mark.parametrize(
+        "extention",
+        [format_tuple[0].upper() for format_tuple in formats]
+        + [format_tuple[0].lower() for format_tuple in formats],
+    )
     def test_compressed_without_compression_extention(self, extention):
         """Check that format suffixed by compressed extension works"""
-        file_name = 'file.{0}'.format(extention)
+        file_name = "file.{0}".format(extention)
         a = util.format_from_filename_extension(file_name)
         # expect answer to always be uppercase
         assert a == extention.upper()
 
-    @pytest.mark.parametrize('extention',
-                             [format_tuple[0].upper() for format_tuple in
-                              formats] +
-                             [format_tuple[0].lower() for format_tuple in
-                              formats])
-    @pytest.mark.parametrize('compression_extention', compressed_extensions)
+    @pytest.mark.parametrize(
+        "extention",
+        [format_tuple[0].upper() for format_tuple in formats]
+        + [format_tuple[0].lower() for format_tuple in formats],
+    )
+    @pytest.mark.parametrize("compression_extention", compressed_extensions)
     def test_compressed(self, extention, compression_extention):
         """Check that format suffixed by compressed extension works"""
-        file_name = 'file.{0}{1}'.format(extention, compression_extention)
+        file_name = "file.{0}{1}".format(extention, compression_extention)
         a = util.format_from_filename_extension(file_name)
         # expect answer to always be uppercase
         assert a == extention.upper()
 
-    @pytest.mark.parametrize('extention',
-                             [format_tuple[0].upper() for format_tuple in
-                              formats] + [format_tuple[0].lower() for
-                                          format_tuple in formats])
+    @pytest.mark.parametrize(
+        "extention",
+        [format_tuple[0].upper() for format_tuple in formats]
+        + [format_tuple[0].lower() for format_tuple in formats],
+    )
     def test_guess_format(self, extention):
-        file_name = 'file.{0}'.format(extention)
+        file_name = "file.{0}".format(extention)
         a = util.guess_format(file_name)
         # expect answer to always be uppercase
         assert a == extention.upper()
 
-    @pytest.mark.parametrize('extention',
-                             [format_tuple[0].upper() for format_tuple in
-                              formats] + [format_tuple[0].lower() for
-                                          format_tuple in formats])
-    @pytest.mark.parametrize('compression_extention', compressed_extensions)
+    @pytest.mark.parametrize(
+        "extention",
+        [format_tuple[0].upper() for format_tuple in formats]
+        + [format_tuple[0].lower() for format_tuple in formats],
+    )
+    @pytest.mark.parametrize("compression_extention", compressed_extensions)
     def test_guess_format_compressed(self, extention, compression_extention):
-        file_name = 'file.{0}{1}'.format(extention, compression_extention)
+        file_name = "file.{0}{1}".format(extention, compression_extention)
         a = util.guess_format(file_name)
         # expect answer to always be uppercase
         assert a == extention.upper()
 
-    @pytest.mark.parametrize('extention, parser',
-                             [(format_tuple[0], format_tuple[1]) for
-                              format_tuple in formats if
-                              format_tuple[1] is not None]
-                             )
+    @pytest.mark.parametrize(
+        "extention, parser",
+        [
+            (format_tuple[0], format_tuple[1])
+            for format_tuple in formats
+            if format_tuple[1] is not None
+        ],
+    )
     def test_get_parser(self, extention, parser):
-        file_name = 'file.{0}'.format(extention)
+        file_name = "file.{0}".format(extention)
         a = mda.topology.core.get_parser_for(file_name)
 
         assert a == parser
 
-    @pytest.mark.parametrize('extention, parser',
-                             [(format_tuple[0], format_tuple[1]) for
-                              format_tuple in formats if
-                              format_tuple[1] is not None]
-                             )
-    @pytest.mark.parametrize('compression_extention', compressed_extensions)
-    def test_get_parser_compressed(self, extention, parser,
-                                   compression_extention):
-        file_name = 'file.{0}{1}'.format(extention, compression_extention)
+    @pytest.mark.parametrize(
+        "extention, parser",
+        [
+            (format_tuple[0], format_tuple[1])
+            for format_tuple in formats
+            if format_tuple[1] is not None
+        ],
+    )
+    @pytest.mark.parametrize("compression_extention", compressed_extensions)
+    def test_get_parser_compressed(
+        self, extention, parser, compression_extention
+    ):
+        file_name = "file.{0}{1}".format(extention, compression_extention)
         a = mda.topology.core.get_parser_for(file_name)
 
         assert a == parser
 
-    @pytest.mark.parametrize('extention',
-                             [(format_tuple[0], format_tuple[1]) for
-                              format_tuple in formats if
-                              format_tuple[1] is None]
-                             )
+    @pytest.mark.parametrize(
+        "extention",
+        [
+            (format_tuple[0], format_tuple[1])
+            for format_tuple in formats
+            if format_tuple[1] is None
+        ],
+    )
     def test_get_parser_invalid(self, extention):
-        file_name = 'file.{0}'.format(extention)
+        file_name = "file.{0}".format(extention)
         with pytest.raises(ValueError):
             mda.topology.core.get_parser_for(file_name)
 
-    @pytest.mark.parametrize('extention, reader',
-                             [(format_tuple[0], format_tuple[2]) for
-                              format_tuple in formats if
-                              format_tuple[2] is not None]
-                             )
+    @pytest.mark.parametrize(
+        "extention, reader",
+        [
+            (format_tuple[0], format_tuple[2])
+            for format_tuple in formats
+            if format_tuple[2] is not None
+        ],
+    )
     def test_get_reader(self, extention, reader):
-        file_name = 'file.{0}'.format(extention)
+        file_name = "file.{0}".format(extention)
         a = mda.coordinates.core.get_reader_for(file_name)
 
         assert a == reader
 
-    @pytest.mark.parametrize('extention, reader',
-                             [(format_tuple[0], format_tuple[2]) for
-                              format_tuple in formats if
-                              format_tuple[2] is not None]
-                             )
-    @pytest.mark.parametrize('compression_extention', compressed_extensions)
-    def test_get_reader_compressed(self, extention, reader,
-                                   compression_extention):
-        file_name = 'file.{0}{1}'.format(extention, compression_extention)
+    @pytest.mark.parametrize(
+        "extention, reader",
+        [
+            (format_tuple[0], format_tuple[2])
+            for format_tuple in formats
+            if format_tuple[2] is not None
+        ],
+    )
+    @pytest.mark.parametrize("compression_extention", compressed_extensions)
+    def test_get_reader_compressed(
+        self, extention, reader, compression_extention
+    ):
+        file_name = "file.{0}{1}".format(extention, compression_extention)
         a = mda.coordinates.core.get_reader_for(file_name)
 
         assert a == reader
 
-    @pytest.mark.parametrize('extention',
-                             [(format_tuple[0], format_tuple[2]) for
-                              format_tuple in formats if
-                              format_tuple[2] is None]
-                             )
+    @pytest.mark.parametrize(
+        "extention",
+        [
+            (format_tuple[0], format_tuple[2])
+            for format_tuple in formats
+            if format_tuple[2] is None
+        ],
+    )
     def test_get_reader_invalid(self, extention):
-        file_name = 'file.{0}'.format(extention)
+        file_name = "file.{0}".format(extention)
         with pytest.raises(ValueError):
             mda.coordinates.core.get_reader_for(file_name)
 
     def test_check_compressed_format_TypeError(self):
         with pytest.raises(TypeError):
-            util.check_compressed_format(1234, 'bz2')
+            util.check_compressed_format(1234, "bz2")
 
     def test_format_from_filename_TypeError(self):
         with pytest.raises(TypeError):
@@ -1152,7 +1382,7 @@ def test_format_from_filename_TypeError(self):
 
     def test_guess_format_stream_ValueError(self):
         # This stream has no name, so can't guess format
-        s = StringIO('this is a very fun file')
+        s = StringIO("this is a very fun file")
         with pytest.raises(ValueError):
             util.guess_format(s)
 
@@ -1166,22 +1396,23 @@ class TestUniqueRows(object):
     def test_unique_rows_2(self):
         a = np.array([[0, 1], [1, 2], [2, 1], [0, 1], [0, 1], [2, 1]])
 
-        assert_array_equal(util.unique_rows(a),
-                           np.array([[0, 1], [1, 2], [2, 1]]))
+        assert_array_equal(
+            util.unique_rows(a), np.array([[0, 1], [1, 2], [2, 1]])
+        )
 
     def test_unique_rows_3(self):
         a = np.array([[0, 1, 2], [0, 1, 2], [2, 3, 4], [0, 1, 2]])
 
-        assert_array_equal(util.unique_rows(a),
-                           np.array([[0, 1, 2], [2, 3, 4]]))
+        assert_array_equal(
+            util.unique_rows(a), np.array([[0, 1, 2], [2, 3, 4]])
+        )
 
     def test_unique_rows_with_view(self):
         # unique_rows doesn't work when flags['OWNDATA'] is False,
         # happens when second dimension is created through broadcast
         a = np.array([1, 2])
 
-        assert_array_equal(util.unique_rows(a[None, :]),
-                           np.array([[1, 2]]))
+        assert_array_equal(util.unique_rows(a[None, :]), np.array([[1, 2]]))
 
 
 class TestGetWriterFor(object):
@@ -1192,7 +1423,7 @@ def test_no_filename_argument(self):
             mda.coordinates.core.get_writer_for()
 
     def test_precedence(self):
-        writer = mda.coordinates.core.get_writer_for('test.pdb', 'GRO')
+        writer = mda.coordinates.core.get_writer_for("test.pdb", "GRO")
         assert writer == mda.coordinates.GRO.GROWriter
         # Make sure ``get_writer_for`` uses *format* if provided
 
@@ -1200,7 +1431,7 @@ def test_missing_extension(self):
         # Make sure ``get_writer_for`` behave as expected if *filename*
         # has no extension
         with pytest.raises(ValueError):
-            mda.coordinates.core.get_writer_for(filename='test', format=None)
+            mda.coordinates.core.get_writer_for(filename="test", format=None)
 
     def test_extension_empty_string(self):
         """
@@ -1210,29 +1441,30 @@ def test_extension_empty_string(self):
         valid formats.
         """
         with pytest.raises(ValueError):
-            mda.coordinates.core.get_writer_for(filename='test', format='')
+            mda.coordinates.core.get_writer_for(filename="test", format="")
 
     def test_file_no_extension(self):
         """No format given"""
         with pytest.raises(ValueError):
-            mda.coordinates.core.get_writer_for('outtraj')
+            mda.coordinates.core.get_writer_for("outtraj")
 
     def test_wrong_format(self):
         # Make sure ``get_writer_for`` fails if the format is unknown
         with pytest.raises(TypeError):
-            mda.coordinates.core.get_writer_for(filename="fail_me",
-                                                format='UNK')
+            mda.coordinates.core.get_writer_for(
+                filename="fail_me", format="UNK"
+            )
 
     def test_compressed_extension(self):
-        for ext in ('.gz', '.bz2'):
-            fn = 'test.gro' + ext
+        for ext in (".gz", ".bz2"):
+            fn = "test.gro" + ext
             writer = mda.coordinates.core.get_writer_for(filename=fn)
             assert writer == mda.coordinates.GRO.GROWriter
             # Make sure ``get_writer_for`` works with compressed file file names
 
     def test_compressed_extension_fail(self):
-        for ext in ('.gz', '.bz2'):
-            fn = 'test.unk' + ext
+        for ext in (".gz", ".bz2"):
+            fn = "test.unk" + ext
             # Make sure ``get_writer_for`` fails if an unknown format is compressed
             with pytest.raises(TypeError):
                 mda.coordinates.core.get_writer_for(filename=fn)
@@ -1240,83 +1472,131 @@ def test_compressed_extension_fail(self):
     def test_non_string_filename(self):
         # Does ``get_writer_for`` fails with non string filename, no format
         with pytest.raises(ValueError):
-            mda.coordinates.core.get_writer_for(filename=StringIO(),
-                                                format=None)
+            mda.coordinates.core.get_writer_for(
+                filename=StringIO(), format=None
+            )
 
     def test_multiframe_failure(self):
         # does ``get_writer_for`` fail with invalid format and multiframe not None
         with pytest.raises(TypeError):
-            mda.coordinates.core.get_writer_for(filename="fail_me",
-                                                format='UNK', multiframe=True)
-            mda.coordinates.core.get_writer_for(filename="fail_me",
-                                                format='UNK', multiframe=False)
+            mda.coordinates.core.get_writer_for(
+                filename="fail_me", format="UNK", multiframe=True
+            )
+            mda.coordinates.core.get_writer_for(
+                filename="fail_me", format="UNK", multiframe=False
+            )
 
     def test_multiframe_nonsense(self):
         with pytest.raises(ValueError):
-            mda.coordinates.core.get_writer_for(filename='this.gro',
-                                                multiframe='sandwich')
+            mda.coordinates.core.get_writer_for(
+                filename="this.gro", multiframe="sandwich"
+            )
 
     formats = [
         # format name, related class, singleframe, multiframe
-        ('CRD', mda.coordinates.CRD.CRDWriter, True, False),
-        ('DATA', mda.coordinates.LAMMPS.DATAWriter, True, False),
-        ('DCD', mda.coordinates.DCD.DCDWriter, True, True),
+        ("CRD", mda.coordinates.CRD.CRDWriter, True, False),
+        ("DATA", mda.coordinates.LAMMPS.DATAWriter, True, False),
+        ("DCD", mda.coordinates.DCD.DCDWriter, True, True),
         # ('ENT', mda.coordinates.PDB.PDBWriter, True, False),
-        ('GRO', mda.coordinates.GRO.GROWriter, True, False),
-        ('LAMMPS', mda.coordinates.LAMMPS.DCDWriter, True, True),
-        ('MOL2', mda.coordinates.MOL2.MOL2Writer, True, True),
-        ('NCDF', mda.coordinates.TRJ.NCDFWriter, True, True),
-        ('NULL', mda.coordinates.null.NullWriter, True, True),
+        ("GRO", mda.coordinates.GRO.GROWriter, True, False),
+        ("LAMMPS", mda.coordinates.LAMMPS.DCDWriter, True, True),
+        ("MOL2", mda.coordinates.MOL2.MOL2Writer, True, True),
+        ("NCDF", mda.coordinates.TRJ.NCDFWriter, True, True),
+        ("NULL", mda.coordinates.null.NullWriter, True, True),
         # ('PDB', mda.coordinates.PDB.PDBWriter, True, True), special case, done separately
-        ('PDBQT', mda.coordinates.PDBQT.PDBQTWriter, True, False),
-        ('PQR', mda.coordinates.PQR.PQRWriter, True, False),
-        ('TRR', mda.coordinates.TRR.TRRWriter, True, True),
-        ('XTC', mda.coordinates.XTC.XTCWriter, True, True),
-        ('XYZ', mda.coordinates.XYZ.XYZWriter, True, True),
-        ('TRZ', mda.coordinates.TRZ.TRZWriter, True, True),
+        ("PDBQT", mda.coordinates.PDBQT.PDBQTWriter, True, False),
+        ("PQR", mda.coordinates.PQR.PQRWriter, True, False),
+        ("TRR", mda.coordinates.TRR.TRRWriter, True, True),
+        ("XTC", mda.coordinates.XTC.XTCWriter, True, True),
+        ("XYZ", mda.coordinates.XYZ.XYZWriter, True, True),
+        ("TRZ", mda.coordinates.TRZ.TRZWriter, True, True),
     ]
 
-    @pytest.mark.parametrize('format, writer',
-                             [(format_tuple[0], format_tuple[1]) for
-                              format_tuple in formats if
-                              format_tuple[2] is True])
+    @pytest.mark.parametrize(
+        "format, writer",
+        [
+            (format_tuple[0], format_tuple[1])
+            for format_tuple in formats
+            if format_tuple[2] is True
+        ],
+    )
     def test_singleframe(self, format, writer):
-        assert mda.coordinates.core.get_writer_for('this', format=format,
-                                                   multiframe=False) == writer
+        assert (
+            mda.coordinates.core.get_writer_for(
+                "this", format=format, multiframe=False
+            )
+            == writer
+        )
 
-    @pytest.mark.parametrize('format', [(format_tuple[0], format_tuple[1]) for
-                                        format_tuple in formats if
-                                        format_tuple[2] is False])
+    @pytest.mark.parametrize(
+        "format",
+        [
+            (format_tuple[0], format_tuple[1])
+            for format_tuple in formats
+            if format_tuple[2] is False
+        ],
+    )
     def test_singleframe_fails(self, format):
         with pytest.raises(TypeError):
-            mda.coordinates.core.get_writer_for('this', format=format,
-                                                multiframe=False)
+            mda.coordinates.core.get_writer_for(
+                "this", format=format, multiframe=False
+            )
 
-    @pytest.mark.parametrize('format, writer',
-                             [(format_tuple[0], format_tuple[1]) for
-                              format_tuple in formats if
-                              format_tuple[3] is True])
+    @pytest.mark.parametrize(
+        "format, writer",
+        [
+            (format_tuple[0], format_tuple[1])
+            for format_tuple in formats
+            if format_tuple[3] is True
+        ],
+    )
     def test_multiframe(self, format, writer):
-        assert mda.coordinates.core.get_writer_for('this', format=format,
-                                                   multiframe=True) == writer
+        assert (
+            mda.coordinates.core.get_writer_for(
+                "this", format=format, multiframe=True
+            )
+            == writer
+        )
 
-    @pytest.mark.parametrize('format',
-                             [format_tuple[0] for format_tuple in formats if
-                              format_tuple[3] is False])
+    @pytest.mark.parametrize(
+        "format",
+        [
+            format_tuple[0]
+            for format_tuple in formats
+            if format_tuple[3] is False
+        ],
+    )
     def test_multiframe_fails(self, format):
         with pytest.raises(TypeError):
-            mda.coordinates.core.get_writer_for('this', format=format,
-                                                multiframe=True)
+            mda.coordinates.core.get_writer_for(
+                "this", format=format, multiframe=True
+            )
 
     def test_get_writer_for_pdb(self):
-        assert mda.coordinates.core.get_writer_for('this', format='PDB',
-                                                   multiframe=False) == mda.coordinates.PDB.PDBWriter
-        assert mda.coordinates.core.get_writer_for('this', format='PDB',
-                                                   multiframe=True) == mda.coordinates.PDB.MultiPDBWriter
-        assert mda.coordinates.core.get_writer_for('this', format='ENT',
-                                                   multiframe=False) == mda.coordinates.PDB.PDBWriter
-        assert mda.coordinates.core.get_writer_for('this', format='ENT',
-                                                   multiframe=True) == mda.coordinates.PDB.MultiPDBWriter
+        assert (
+            mda.coordinates.core.get_writer_for(
+                "this", format="PDB", multiframe=False
+            )
+            == mda.coordinates.PDB.PDBWriter
+        )
+        assert (
+            mda.coordinates.core.get_writer_for(
+                "this", format="PDB", multiframe=True
+            )
+            == mda.coordinates.PDB.MultiPDBWriter
+        )
+        assert (
+            mda.coordinates.core.get_writer_for(
+                "this", format="ENT", multiframe=False
+            )
+            == mda.coordinates.PDB.PDBWriter
+        )
+        assert (
+            mda.coordinates.core.get_writer_for(
+                "this", format="ENT", multiframe=True
+            )
+            == mda.coordinates.PDB.MultiPDBWriter
+        )
 
 
 class TestBlocksOf(object):
@@ -1326,17 +1606,24 @@ def test_blocks_of_1(self):
         view = util.blocks_of(arr, 1, 1)
 
         assert view.shape == (4, 1, 1)
-        assert_array_almost_equal(view,
-                                  np.array([[[0]], [[5]], [[10]], [[15]]]))
+        assert_array_almost_equal(
+            view, np.array([[[0]], [[5]], [[10]], [[15]]])
+        )
 
         # Change my view, check changes are reflected in arr
         view[:] = 1001
 
-        assert_array_almost_equal(arr,
-                                  np.array([[1001, 1, 2, 3],
-                                            [4, 1001, 6, 7],
-                                            [8, 9, 1001, 11],
-                                            [12, 13, 14, 1001]]))
+        assert_array_almost_equal(
+            arr,
+            np.array(
+                [
+                    [1001, 1, 2, 3],
+                    [4, 1001, 6, 7],
+                    [8, 9, 1001, 11],
+                    [12, 13, 14, 1001],
+                ]
+            ),
+        )
 
     def test_blocks_of_2(self):
         arr = np.arange(16).reshape(4, 4)
@@ -1344,17 +1631,24 @@ def test_blocks_of_2(self):
         view = util.blocks_of(arr, 2, 2)
 
         assert view.shape == (2, 2, 2)
-        assert_array_almost_equal(view, np.array([[[0, 1], [4, 5]],
-                                                  [[10, 11], [14, 15]]]))
+        assert_array_almost_equal(
+            view, np.array([[[0, 1], [4, 5]], [[10, 11], [14, 15]]])
+        )
 
         view[0] = 100
         view[1] = 200
 
-        assert_array_almost_equal(arr,
-                                  np.array([[100, 100, 2, 3],
-                                            [100, 100, 6, 7],
-                                            [8, 9, 200, 200],
-                                            [12, 13, 200, 200]]))
+        assert_array_almost_equal(
+            arr,
+            np.array(
+                [
+                    [100, 100, 2, 3],
+                    [100, 100, 6, 7],
+                    [8, 9, 200, 200],
+                    [12, 13, 200, 200],
+                ]
+            ),
+        )
 
     def test_blocks_of_3(self):
         # testing non square array
@@ -1380,11 +1674,14 @@ def test_blocks_of_ValueError(self):
             util.blocks_of(arr[:, ::2], 2, 1)  # non-contiguous input
 
 
-@pytest.mark.parametrize('arr,answer', [
-    ([2, 3, 4, 7, 8, 9, 10, 15, 16], [[2, 3, 4], [7, 8, 9, 10], [15, 16]]),
-    ([11, 12, 13, 14, 15, 16], [[11, 12, 13, 14, 15, 16]]),
-    ([1, 2, 2, 2, 3, 6], [[1, 2, 2, 2, 3], [6]])
-])
+@pytest.mark.parametrize(
+    "arr,answer",
+    [
+        ([2, 3, 4, 7, 8, 9, 10, 15, 16], [[2, 3, 4], [7, 8, 9, 10], [15, 16]]),
+        ([11, 12, 13, 14, 15, 16], [[11, 12, 13, 14, 15, 16]]),
+        ([1, 2, 2, 2, 3, 6], [[1, 2, 2, 2, 3], [6]]),
+    ],
+)
 def test_group_same_or_consecutive_integers(arr, answer):
     assert_equal(util.group_same_or_consecutive_integers(arr), answer)
 
@@ -1397,22 +1694,22 @@ def ns():
 
     def test_getitem(self, ns):
         ns.this = 42
-        assert ns['this'] == 42
+        assert ns["this"] == 42
 
     def test_getitem_KeyError(self, ns):
         with pytest.raises(KeyError):
-            dict.__getitem__(ns, 'this')
+            dict.__getitem__(ns, "this")
 
     def test_setitem(self, ns):
-        ns['this'] = 42
+        ns["this"] = 42
 
-        assert ns['this'] == 42
+        assert ns["this"] == 42
 
     def test_delitem(self, ns):
-        ns['this'] = 42
-        assert 'this' in ns
-        del ns['this']
-        assert 'this' not in ns
+        ns["this"] = 42
+        assert "this" in ns
+        del ns["this"]
+        assert "this" not in ns
 
     def test_delitem_AttributeError(self, ns):
         with pytest.raises(AttributeError):
@@ -1424,55 +1721,58 @@ def test_setattr(self, ns):
         assert ns.this == 42
 
     def test_getattr(self, ns):
-        ns['this'] = 42
+        ns["this"] = 42
 
         assert ns.this == 42
 
     def test_getattr_AttributeError(self, ns):
         with pytest.raises(AttributeError):
-            getattr(ns, 'this')
+            getattr(ns, "this")
 
     def test_delattr(self, ns):
-        ns['this'] = 42
+        ns["this"] = 42
 
-        assert 'this' in ns
+        assert "this" in ns
         del ns.this
-        assert 'this' not in ns
+        assert "this" not in ns
 
     def test_eq(self, ns):
-        ns['this'] = 42
+        ns["this"] = 42
 
         ns2 = util.Namespace()
-        ns2['this'] = 42
+        ns2["this"] = 42
 
         assert ns == ns2
 
     def test_len(self, ns):
         assert len(ns) == 0
-        ns['this'] = 1
-        ns['that'] = 2
+        ns["this"] = 1
+        ns["that"] = 2
         assert len(ns) == 2
 
     def test_iter(self, ns):
-        ns['this'] = 12
-        ns['that'] = 24
-        ns['other'] = 48
+        ns["this"] = 12
+        ns["that"] = 24
+        ns["other"] = 48
 
         seen = []
         for val in ns:
             seen.append(val)
-        for val in ['this', 'that', 'other']:
+        for val in ["this", "that", "other"]:
             assert val in seen
 
 
 class TestTruncateInteger(object):
-    @pytest.mark.parametrize('a, b', [
-        ((1234, 1), 4),
-        ((1234, 2), 34),
-        ((1234, 3), 234),
-        ((1234, 4), 1234),
-        ((1234, 5), 1234),
-    ])
+    @pytest.mark.parametrize(
+        "a, b",
+        [
+            ((1234, 1), 4),
+            ((1234, 2), 34),
+            ((1234, 3), 234),
+            ((1234, 4), 1234),
+            ((1234, 5), 1234),
+        ],
+    )
     def test_ltruncate_int(self, a, b):
         assert util.ltruncate_int(*a) == b
 
@@ -1480,9 +1780,9 @@ def test_ltruncate_int(self, a, b):
 class TestFlattenDict(object):
     def test_flatten_dict(self):
         d = {
-            'A': {1: ('a', 'b', 'c')},
-            'B': {2: ('c', 'd', 'e')},
-            'C': {3: ('f', 'g', 'h')}
+            "A": {1: ("a", "b", "c")},
+            "B": {2: ("c", "d", "e")},
+            "C": {3: ("f", "g", "h")},
         }
         result = util.flatten_dict(d)
 
@@ -1495,53 +1795,57 @@ def test_flatten_dict(self):
 
 
 class TestStaticVariables(object):
-    """Tests concerning the decorator @static_variables
-    """
+    """Tests concerning the decorator @static_variables"""
 
     def test_static_variables(self):
         x = [0]
 
-        @static_variables(foo=0, bar={'test': x})
+        @static_variables(foo=0, bar={"test": x})
         def myfunc():
             assert myfunc.foo == 0
             assert type(myfunc.bar) is type(dict())
-            if 'test2' not in myfunc.bar:
-                myfunc.bar['test2'] = "a"
+            if "test2" not in myfunc.bar:
+                myfunc.bar["test2"] = "a"
             else:
-                myfunc.bar['test2'] += "a"
-            myfunc.bar['test'][0] += 1
-            return myfunc.bar['test']
+                myfunc.bar["test2"] += "a"
+            myfunc.bar["test"][0] += 1
+            return myfunc.bar["test"]
 
-        assert hasattr(myfunc, 'foo')
-        assert hasattr(myfunc, 'bar')
+        assert hasattr(myfunc, "foo")
+        assert hasattr(myfunc, "bar")
 
         y = myfunc()
         assert y is x
         assert x[0] == 1
-        assert myfunc.bar['test'][0] == 1
-        assert myfunc.bar['test2'] == "a"
+        assert myfunc.bar["test"][0] == 1
+        assert myfunc.bar["test2"] == "a"
 
         x = [0]
         y = myfunc()
         assert y is not x
-        assert myfunc.bar['test'][0] == 2
-        assert myfunc.bar['test2'] == "aa"
+        assert myfunc.bar["test"][0] == 2
+        assert myfunc.bar["test2"] == "aa"
 
 
 class TestWarnIfNotUnique(object):
-    """Tests concerning the decorator @warn_if_not_unique
-    """
+    """Tests concerning the decorator @warn_if_not_unique"""
 
     def warn_msg(self, func, group, group_name):
-        msg = ("{}.{}(): {} {} contains duplicates. Results might be "
-               "biased!".format(group.__class__.__name__, func.__name__,
-                                group_name, group.__repr__()))
+        msg = (
+            "{}.{}(): {} {} contains duplicates. Results might be "
+            "biased!".format(
+                group.__class__.__name__,
+                func.__name__,
+                group_name,
+                group.__repr__(),
+            )
+        )
         return msg
 
     def test_warn_if_not_unique(self, atoms):
         # Check that the warn_if_not_unique decorator has a "static variable"
         # warn_if_not_unique.warned:
-        assert hasattr(warn_if_not_unique, 'warned')
+        assert hasattr(warn_if_not_unique, "warned")
         assert warn_if_not_unique.warned is False
 
     def test_warn_if_not_unique_once_outer(self, atoms):
@@ -1667,8 +1971,11 @@ def test_warn_if_not_unique_unnamed(self, atoms):
         def func(group):
             pass
 
-        msg = self.warn_msg(func, atoms + atoms[0],
-                            "'unnamed {}'".format(atoms.__class__.__name__))
+        msg = self.warn_msg(
+            func,
+            atoms + atoms[0],
+            "'unnamed {}'".format(atoms.__class__.__name__),
+        )
         with pytest.warns(DuplicateWarning) as w:
             func(atoms + atoms[0])
             # Check warning message:
@@ -1702,8 +2009,7 @@ def func(group):
 
 
 class TestCheckCoords(object):
-    """Tests concerning the decorator @check_coords
-    """
+    """Tests concerning the decorator @check_coords"""
 
     prec = 6
 
@@ -1712,7 +2018,7 @@ def test_default_options(self):
         b_in = np.ones(3, dtype=np.float32)
         b_in2 = np.ones((2, 3), dtype=np.float32)
 
-        @check_coords('a', 'b')
+        @check_coords("a", "b")
         def func(a, b):
             # check that enforce_copy is True by default:
             assert a is not a_in
@@ -1739,24 +2045,36 @@ def atomgroup(self):
         return u.atoms
 
     # check atomgroup handling with every option except allow_atomgroup
-    @pytest.mark.parametrize('enforce_copy', [True, False])
-    @pytest.mark.parametrize('enforce_dtype', [True, False])
-    @pytest.mark.parametrize('allow_single', [True, False])
-    @pytest.mark.parametrize('convert_single', [True, False])
-    @pytest.mark.parametrize('reduce_result_if_single', [True, False])
-    @pytest.mark.parametrize('check_lengths_match', [True, False])
-    def test_atomgroup(self, atomgroup, enforce_copy, enforce_dtype,
-                       allow_single, convert_single, reduce_result_if_single,
-                       check_lengths_match):
+    @pytest.mark.parametrize("enforce_copy", [True, False])
+    @pytest.mark.parametrize("enforce_dtype", [True, False])
+    @pytest.mark.parametrize("allow_single", [True, False])
+    @pytest.mark.parametrize("convert_single", [True, False])
+    @pytest.mark.parametrize("reduce_result_if_single", [True, False])
+    @pytest.mark.parametrize("check_lengths_match", [True, False])
+    def test_atomgroup(
+        self,
+        atomgroup,
+        enforce_copy,
+        enforce_dtype,
+        allow_single,
+        convert_single,
+        reduce_result_if_single,
+        check_lengths_match,
+    ):
         ag1 = atomgroup
         ag2 = atomgroup
 
-        @check_coords('ag1', 'ag2', enforce_copy=enforce_copy,
-                      enforce_dtype=enforce_dtype, allow_single=allow_single,
-                      convert_single=convert_single,
-                      reduce_result_if_single=reduce_result_if_single,
-                      check_lengths_match=check_lengths_match,
-                      allow_atomgroup=True)
+        @check_coords(
+            "ag1",
+            "ag2",
+            enforce_copy=enforce_copy,
+            enforce_dtype=enforce_dtype,
+            allow_single=allow_single,
+            convert_single=convert_single,
+            reduce_result_if_single=reduce_result_if_single,
+            check_lengths_match=check_lengths_match,
+            allow_atomgroup=True,
+        )
         def func(ag1, ag2):
             assert_allclose(ag1, ag2)
             assert isinstance(ag1, np.ndarray)
@@ -1766,11 +2084,11 @@ def func(ag1, ag2):
 
         res = func(ag1, ag2)
 
-        assert_allclose(res, atomgroup.positions*2)
+        assert_allclose(res, atomgroup.positions * 2)
 
     def test_atomgroup_not_allowed(self, atomgroup):
 
-        @check_coords('ag1', allow_atomgroup=False)
+        @check_coords("ag1", allow_atomgroup=False)
         def func(ag1):
             return ag1
 
@@ -1779,7 +2097,7 @@ def func(ag1):
 
     def test_atomgroup_not_allowed_default(self, atomgroup):
 
-        @check_coords('ag1')
+        @check_coords("ag1")
         def func_default(ag1):
             return ag1
 
@@ -1793,7 +2111,7 @@ def test_enforce_copy(self):
         c_2d = np.zeros((1, 6), dtype=np.float32)[:, ::2]
         d_2d = np.zeros((1, 3), dtype=np.int64)
 
-        @check_coords('a', 'b', 'c', 'd', enforce_copy=False)
+        @check_coords("a", "b", "c", "d", enforce_copy=False)
         def func(a, b, c, d):
             # Assert that if enforce_copy is False:
             # no copy is made if input shape, order, and dtype are correct:
@@ -1824,7 +2142,7 @@ def func(a, b, c, d):
 
     def test_no_allow_single(self):
 
-        @check_coords('a', allow_single=False)
+        @check_coords("a", allow_single=False)
         def func(a):
             pass
 
@@ -1836,7 +2154,7 @@ def test_no_convert_single(self):
 
         a_1d = np.arange(-3, 0, dtype=np.float32)
 
-        @check_coords('a', enforce_copy=False, convert_single=False)
+        @check_coords("a", enforce_copy=False, convert_single=False)
         def func(a):
             # assert no conversion and no copy were performed:
             assert a is a_1d
@@ -1852,8 +2170,12 @@ def test_no_reduce_result_if_single(self):
         a_1d = np.zeros(3, dtype=np.float32)
 
         # Test without shape conversion:
-        @check_coords('a', enforce_copy=False, convert_single=False,
-                      reduce_result_if_single=False)
+        @check_coords(
+            "a",
+            enforce_copy=False,
+            convert_single=False,
+            reduce_result_if_single=False,
+        )
         def func(a):
             return a
 
@@ -1862,7 +2184,7 @@ def func(a):
         assert res is a_1d
 
         # Test with shape conversion:
-        @check_coords('a', enforce_copy=False, reduce_result_if_single=False)
+        @check_coords("a", enforce_copy=False, reduce_result_if_single=False)
         def func(a):
             return a
 
@@ -1875,7 +2197,7 @@ def test_no_check_lengths_match(self):
         a_2d = np.zeros((1, 3), dtype=np.float32)
         b_2d = np.zeros((3, 3), dtype=np.float32)
 
-        @check_coords('a', 'b', enforce_copy=False, check_lengths_match=False)
+        @check_coords("a", "b", enforce_copy=False, check_lengths_match=False)
         def func(a, b):
             return a, b
 
@@ -1889,52 +2211,59 @@ def test_atomgroup_mismatched_lengths(self):
         ag1 = u.select_atoms("index 0 to 10")
         ag2 = u.atoms
 
-        @check_coords('ag1', 'ag2', check_lengths_match=True,
-                      allow_atomgroup=True)
+        @check_coords(
+            "ag1", "ag2", check_lengths_match=True, allow_atomgroup=True
+        )
         def func(ag1, ag2):
 
             return ag1, ag2
 
-        with pytest.raises(ValueError, match="must contain the same number of "
-                           "coordinates"):
+        with pytest.raises(
+            ValueError, match="must contain the same number of " "coordinates"
+        ):
             _, _ = func(ag1, ag2)
 
     def test_invalid_input(self):
 
-        a_inv_dtype = np.array([['hello', 'world', '!']])
-        a_inv_type = [[0., 0., 0.]]
+        a_inv_dtype = np.array([["hello", "world", "!"]])
+        a_inv_type = [[0.0, 0.0, 0.0]]
         a_inv_shape_1d = np.zeros(6, dtype=np.float32)
         a_inv_shape_2d = np.zeros((3, 2), dtype=np.float32)
 
-        @check_coords('a')
+        @check_coords("a")
         def func(a):
             pass
 
         with pytest.raises(TypeError) as err:
             func(a_inv_dtype)
-            assert err.msg.startswith("func(): a.dtype must be convertible to "
-                                      "float32, got ")
+            assert err.msg.startswith(
+                "func(): a.dtype must be convertible to " "float32, got "
+            )
 
         with pytest.raises(TypeError) as err:
             func(a_inv_type)
-            assert err.msg == ("func(): Parameter 'a' must be a numpy.ndarray, "
-                               "got <class 'list'>.")
+            assert err.msg == (
+                "func(): Parameter 'a' must be a numpy.ndarray, "
+                "got <class 'list'>."
+            )
 
         with pytest.raises(ValueError) as err:
             func(a_inv_shape_1d)
-            assert err.msg == ("func(): a.shape must be (3,) or (n, 3), got "
-                               "(6,).")
+            assert err.msg == (
+                "func(): a.shape must be (3,) or (n, 3), got " "(6,)."
+            )
 
         with pytest.raises(ValueError) as err:
             func(a_inv_shape_2d)
-            assert err.msg == ("func(): a.shape must be (3,) or (n, 3), got "
-                               "(3, 2).")
+            assert err.msg == (
+                "func(): a.shape must be (3,) or (n, 3), got " "(3, 2)."
+            )
 
     def test_usage_with_kwargs(self):
 
         a_2d = np.zeros((1, 3), dtype=np.float32)
 
-        @check_coords('a', enforce_copy=False)
+        @check_coords("a", enforce_copy=False)
         def func(a, b, c=0):
             return a, b, c
 
@@ -1946,7 +2275,7 @@ def func(a, b, c=0):
 
     def test_wrong_func_call(self):
 
-        @check_coords('a', enforce_copy=False)
+        @check_coords("a", enforce_copy=False)
         def func(a, b, c=0):
             pass
 
@@ -2000,32 +2329,41 @@ def func():
 
         # usage without arguments:
         with pytest.raises(ValueError) as err:
+
             @check_coords()
             def func():
                 pass
 
-            assert err.msg == ("Decorator check_coords() cannot be used "
-                               "without positional arguments.")
+            assert err.msg == (
+                "Decorator check_coords() cannot be used "
+                "without positional arguments."
+            )
 
         # usage with defaultarg:
         with pytest.raises(ValueError) as err:
-            @check_coords('a')
+
+            @check_coords("a")
             def func(a=1):
                 pass
 
-            assert err.msg == ("In decorator check_coords(): Name 'a' doesn't "
-                               "correspond to any positional argument of the "
-                               "decorated function func().")
+            assert err.msg == (
+                "In decorator check_coords(): Name 'a' doesn't "
+                "correspond to any positional argument of the "
+                "decorated function func()."
+            )
 
         # usage with invalid parameter name:
         with pytest.raises(ValueError) as err:
-            @check_coords('b')
+
+            @check_coords("b")
             def func(a):
                 pass
 
-            assert err.msg == ("In decorator check_coords(): Name 'b' doesn't "
-                               "correspond to any positional argument of the "
-                               "decorated function func().")
+            assert err.msg == (
+                "In decorator check_coords(): Name 'b' doesn't "
+                "correspond to any positional argument of the "
+                "decorated function func()."
+            )
 
 
 @pytest.mark.parametrize("old_name", (None, "MDAnalysis.Universe"))
@@ -2050,10 +2388,14 @@ def AlternateUniverse(anything):
         """
         return True
 
-    oldfunc = util.deprecate(AlternateUniverse, old_name=old_name,
-                             new_name=new_name,
-                             release=release, remove=remove,
-                             message=message)
+    oldfunc = util.deprecate(
+        AlternateUniverse,
+        old_name=old_name,
+        new_name=new_name,
+        release=release,
+        remove=remove,
+        message=message,
+    )
     # match_expr changed to match (Issue 2329)
     with pytest.warns(DeprecationWarning, match="`.+` is deprecated"):
         oldfunc(42)
@@ -2071,13 +2413,15 @@ def AlternateUniverse(anything):
             default_message = "`{0}` is deprecated!".format(name)
         else:
             default_message = "`{0}` is deprecated, use `{1}` instead!".format(
-                name, new_name)
+                name, new_name
+            )
         deprecation_line_2 = default_message
     assert re.search(deprecation_line_2, doc)
 
     if remove:
         deprecation_line_3 = "`{0}` will be removed in release {1}".format(
-            name,  remove)
+            name, remove
+        )
         assert re.search(deprecation_line_3, doc)
 
     # check that the old docs are still present
@@ -2092,16 +2436,21 @@ def test_deprecate_missing_release_ValueError():
 def test_set_function_name(name="bar"):
     def foo():
         pass
+
     util._set_function_name(foo, name)
     assert foo.__name__ == name
 
 
-@pytest.mark.parametrize("text",
-                         ("",
-                          "one line text",
-                          "  one line with leading space",
-                          "multiline\n\n   with some\n   leading space",
-                          "   multiline\n\n   with all\n   leading space"))
+@pytest.mark.parametrize(
+    "text",
+    (
+        "",
+        "one line text",
+        "  one line with leading space",
+        "multiline\n\n   with some\n   leading space",
+        "   multiline\n\n   with all\n   leading space",
+    ),
+)
 def test_dedent_docstring(text):
     doc = util.dedent_docstring(text)
     for line in doc.splitlines():
@@ -2112,56 +2461,61 @@ class TestCheckBox(object):
 
     prec = 6
     ref_ortho = np.ones(3, dtype=np.float32)
-    ref_tri_vecs = np.array([[1, 0, 0], [0, 1, 0], [0, 2 ** 0.5, 2 ** 0.5]],
-                            dtype=np.float32)
-
-    @pytest.mark.parametrize('box',
-                             ([1, 1, 1, 90, 90, 90],
-                              (1, 1, 1, 90, 90, 90),
-                                 ['1', '1', 1, 90, '90', '90'],
-                                 ('1', '1', 1, 90, '90', '90'),
-                                 np.array(['1', '1', 1, 90, '90', '90']),
-                                 np.array([1, 1, 1, 90, 90, 90],
-                                          dtype=np.float32),
-                                 np.array([1, 1, 1, 90, 90, 90],
-                                          dtype=np.float64),
-                                 np.array([1, 1, 1, 1, 1, 1,
-                                           90, 90, 90, 90, 90, 90],
-                                          dtype=np.float32)[::2]))
+    ref_tri_vecs = np.array(
+        [[1, 0, 0], [0, 1, 0], [0, 2**0.5, 2**0.5]], dtype=np.float32
+    )
+
+    @pytest.mark.parametrize(
+        "box",
+        (
+            [1, 1, 1, 90, 90, 90],
+            (1, 1, 1, 90, 90, 90),
+            ["1", "1", 1, 90, "90", "90"],
+            ("1", "1", 1, 90, "90", "90"),
+            np.array(["1", "1", 1, 90, "90", "90"]),
+            np.array([1, 1, 1, 90, 90, 90], dtype=np.float32),
+            np.array([1, 1, 1, 90, 90, 90], dtype=np.float64),
+            np.array(
+                [1, 1, 1, 1, 1, 1, 90, 90, 90, 90, 90, 90], dtype=np.float32
+            )[::2],
+        ),
+    )
     def test_check_box_ortho(self, box):
         boxtype, checked_box = util.check_box(box)
-        assert boxtype == 'ortho'
+        assert boxtype == "ortho"
         assert_allclose(checked_box, self.ref_ortho)
         assert checked_box.dtype == np.float32
-        assert checked_box.flags['C_CONTIGUOUS']
+        assert checked_box.flags["C_CONTIGUOUS"]
 
     def test_check_box_None(self):
         with pytest.raises(ValueError, match="Box is None"):
             util.check_box(None)
 
-    @pytest.mark.parametrize('box',
-                             ([1, 1, 2, 45, 90, 90],
-                              (1, 1, 2, 45, 90, 90),
-                                 ['1', '1', 2, 45, '90', '90'],
-                                 ('1', '1', 2, 45, '90', '90'),
-                                 np.array(['1', '1', 2, 45, '90', '90']),
-                                 np.array([1, 1, 2, 45, 90, 90],
-                                          dtype=np.float32),
-                                 np.array([1, 1, 2, 45, 90, 90],
-                                          dtype=np.float64),
-                                 np.array([1, 1, 1, 1, 2, 2,
-                                           45, 45, 90, 90, 90, 90],
-                                          dtype=np.float32)[::2]))
+    @pytest.mark.parametrize(
+        "box",
+        (
+            [1, 1, 2, 45, 90, 90],
+            (1, 1, 2, 45, 90, 90),
+            ["1", "1", 2, 45, "90", "90"],
+            ("1", "1", 2, 45, "90", "90"),
+            np.array(["1", "1", 2, 45, "90", "90"]),
+            np.array([1, 1, 2, 45, 90, 90], dtype=np.float32),
+            np.array([1, 1, 2, 45, 90, 90], dtype=np.float64),
+            np.array(
+                [1, 1, 1, 1, 2, 2, 45, 45, 90, 90, 90, 90], dtype=np.float32
+            )[::2],
+        ),
+    )
     def test_check_box_tri_vecs(self, box):
         boxtype, checked_box = util.check_box(box)
-        assert boxtype == 'tri_vecs'
+        assert boxtype == "tri_vecs"
         assert_almost_equal(checked_box, self.ref_tri_vecs, self.prec)
         assert checked_box.dtype == np.float32
-        assert checked_box.flags['C_CONTIGUOUS']
+        assert checked_box.flags["C_CONTIGUOUS"]
 
     def test_check_box_wrong_data(self):
         with pytest.raises(ValueError):
-            wrongbox = ['invalid', 1, 1, 90, 90, 90]
+            wrongbox = ["invalid", 1, 1, 90, 90, 90]
             boxtype, checked_box = util.check_box(wrongbox)
 
     def test_check_box_wrong_shape(self):
@@ -2174,6 +2528,7 @@ class StoredClass:
     """
     A simple class that takes positional and keyword arguments of various types
     """
+
     @store_init_arguments
     def __init__(self, a, b, /, *args, c="foo", d="bar", e="foobar", **kwargs):
         self.a = a
@@ -2186,22 +2541,21 @@ def __init__(self, a, b, /, *args, c="foo", d="bar", e="foobar", **kwargs):
 
     def copy(self):
         kwargs = copy.deepcopy(self._kwargs)
-        args = kwargs.pop('args', tuple())
-        new = self.__class__(kwargs.pop('a'), kwargs.pop('b'),
-                             *args, **kwargs)
+        args = kwargs.pop("args", tuple())
+        new = self.__class__(kwargs.pop("a"), kwargs.pop("b"), *args, **kwargs)
         return new
 
 
 class TestStoreInitArguments:
     def test_store_arguments_default(self):
-        store = StoredClass('parsnips', ['roast'])
-        assert store.a == store._kwargs['a'] == 'parsnips'
-        assert store.b is store._kwargs['b'] == ['roast']
-        assert store._kwargs['c'] == 'foo'
-        assert store._kwargs['d'] == 'bar'
-        assert store._kwargs['e'] == 'foobar'
-        assert 'args' not in store._kwargs.keys()
-        assert 'kwargs' not in store._kwargs.keys()
+        store = StoredClass("parsnips", ["roast"])
+        assert store.a == store._kwargs["a"] == "parsnips"
+        assert store.b is store._kwargs["b"] == ["roast"]
+        assert store._kwargs["c"] == "foo"
+        assert store._kwargs["d"] == "bar"
+        assert store._kwargs["e"] == "foobar"
+        assert "args" not in store._kwargs.keys()
+        assert "kwargs" not in store._kwargs.keys()
         assert store.args is ()
 
         store2 = store.copy()
@@ -2209,29 +2563,39 @@ def test_store_arguments_default(self):
         assert store2.__dict__["b"] is not store.__dict__["b"]
 
     def test_store_arguments_withkwargs(self):
-        store = StoredClass('parsnips', 'roast', 'honey', 'glaze', c='richard',
-                            d='has', e='a', f='recipe', g='allegedly')
-        assert store.a == store._kwargs['a'] == "parsnips"
-        assert store.b == store._kwargs['b'] == "roast"
-        assert store.c == store._kwargs['c'] == "richard"
-        assert store.d == store._kwargs['d'] == "has"
-        assert store.e == store._kwargs['e'] == "a"
-        assert store.kwargs['f'] == store._kwargs['f'] == "recipe"
-        assert store.kwargs['g'] == store._kwargs['g'] == "allegedly"
-        assert store.args[0] == store._kwargs['args'][0] == "honey"
-        assert store.args[1] == store._kwargs['args'][1] == "glaze"
+        store = StoredClass(
+            "parsnips",
+            "roast",
+            "honey",
+            "glaze",
+            c="richard",
+            d="has",
+            e="a",
+            f="recipe",
+            g="allegedly",
+        )
+        assert store.a == store._kwargs["a"] == "parsnips"
+        assert store.b == store._kwargs["b"] == "roast"
+        assert store.c == store._kwargs["c"] == "richard"
+        assert store.d == store._kwargs["d"] == "has"
+        assert store.e == store._kwargs["e"] == "a"
+        assert store.kwargs["f"] == store._kwargs["f"] == "recipe"
+        assert store.kwargs["g"] == store._kwargs["g"] == "allegedly"
+        assert store.args[0] == store._kwargs["args"][0] == "honey"
+        assert store.args[1] == store._kwargs["args"][1] == "glaze"
 
         store2 = store.copy()
         assert store2.__dict__ == store.__dict__
 
 
-@pytest.mark.xfail(os.name == 'nt',
-                   reason="util.which does not get right binary on Windows")
+@pytest.mark.xfail(
+    os.name == "nt", reason="util.which does not get right binary on Windows"
+)
 def test_which():
     wmsg = "This method is deprecated"
 
     with pytest.warns(DeprecationWarning, match=wmsg):
-        assert util.which('python') == shutil.which('python')
+        assert util.which("python") == shutil.which("python")
 
 
 @pytest.mark.parametrize(
diff --git a/testsuite/pyproject.toml b/testsuite/pyproject.toml
index b53e8782e1..359430e131 100644
--- a/testsuite/pyproject.toml
+++ b/testsuite/pyproject.toml
@@ -160,6 +160,7 @@ include = '''
 (
 setup\.py
 | MDAnalysisTests/auxiliary/.*\.py
+| MDAnalysisTests/lib/.*\.py
 | MDAnalysisTests/transformations/.*\.py
 )
 '''