diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 000000000..a8be244a7 Binary files /dev/null and b/.DS_Store differ diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index fd1229d0b..dfb448326 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -2,6 +2,7 @@ ### New features +* Adds the function `hafnian_sparse` to compute sparse loop hafnians (pure python implementation). [#245](https://github.com/XanaduAI/thewalrus/pull/245) * ``symplectic.squeezing`` function is now generalized to multiple modes of single mode squeezing [#249](https://github.com/XanaduAI/thewalrus/pull/249) @@ -11,10 +12,9 @@ * Adds the function `hafnian_banded` to calculate the hafnian of a banded matrix [#246](https://github.com/XanaduAI/thewalrus/pull/246) - ### Improvements -* Speeds up the calculation of photon number variances/covariances [#224](https://github.com/XanaduAI/thewalrus/pull/224) +* Speeds up the calculation of photon number variances/covariances [#244](https://github.com/XanaduAI/thewalrus/pull/244) ### Bug fixes @@ -24,8 +24,7 @@ This release contains contributions from (in alphabetical order): -Jake Bulmer, Timjan Kalajdzievski, Nicolas Quesada - +Jake Bulmer, Timjan Kalajdzievski, Filippo Miatto, Nicolas Quesada --- diff --git a/docs/algorithms.rst b/docs/algorithms.rst index e2bb3d486..8c57ac483 100644 --- a/docs/algorithms.rst +++ b/docs/algorithms.rst @@ -202,4 +202,17 @@ Consider now the :math:`r`-partitions of the integer :math:`n`, there are :math: .. tip:: - *Implemented as* :func:`thewalrus.low_rank_hafnian`. This function takes as argument the matrix :math:`\bm{G} \in \mathbb{C}^{n \times r}` and returns the value of the hafnian of the matrix :math:`\bm{A} = \bm{G} \bm{G}^T` \ No newline at end of file + *Implemented as* :func:`thewalrus.low_rank_hafnian`. This function takes as argument the matrix :math:`\bm{G} \in \mathbb{C}^{n \times r}` and returns the value of the hafnian of the matrix :math:`\bm{A} = \bm{G} \bm{G}^T`. + +Sparse and banded algorithms +---------------------------- +These algorithms take advantage of the Laplace expansion of the hafnian to calculate hafnians of matrices with many zeros. These matrices can be either banded or sparse. The Laplace expansion of the hafnian is given by + +.. math:: + \text{haf}(\bm{B}) = \sum_{j \neq i} B_{i,j} \text{haf}(\bm{B}_{-i-j}), + +where :math:`j` is a fixed index and :math:`\bm{B}_{-i-j}` is the matrix obtained from :math:`\bm{B}` by removing rows and columns :math:`i` and :math:`j`. The banded algorithm was introduced in Sec. V of Qi *et al* :cite:`qi2020efficient`. + +.. tip:: + + *Implemented as* :func:`thewalrus.hafnian_sparse` and :func:`thewalrus.hafnian_banded`. The loop hafnian calculation can be done by setting the option ``loop=True``. \ No newline at end of file diff --git a/thewalrus/__init__.py b/thewalrus/__init__.py index 380e9cb23..13afb8f36 100644 --- a/thewalrus/__init__.py +++ b/thewalrus/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. +# Copyright 2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,9 +36,9 @@ This algorithm scales like :math:`\mathcal{O}(n^4 2^{n/2})`. This algorithm does not currently support the loop hafnian. -Repeating hafnian algorithm +Repeated hafnian algorithm The algorithm described in *From moments of sum to moments of product*, :cite:`kan2008moments`. - This method is more efficient for matrices with repeated rows and columns, and supports caclulation of + This method is more efficient for matrices with repeated rows and columns, and supports calculation of the loop hafnian. Approximate hafnian algorithm @@ -69,6 +69,11 @@ *Efficient sampling from shallow Gaussian quantum-optical circuits with local interactions*, :cite:`qi2020efficient`. +Sparse hafnian algorithm + An algorithm that calculates the hafnian of a sparse matrix by taking advantage of the Laplace expansion and memoization, to store + only the relevant paths that contribute non-zero values to the final calculation. + + Python wrappers --------------- @@ -108,8 +113,10 @@ hafnian, hafnian_repeated, reduction, + hafnian_sparse, hafnian_banded, ) + from ._low_rank_haf import low_rank_hafnian from ._hermite_multidimensional import hafnian_batched, hermite_multidimensional from ._permanent import perm, perm_complex, perm_real, permanent_repeated @@ -121,6 +128,7 @@ "hafnian", "hafnian_repeated", "hafnian_batched", + "hafnian_sparse", "hafnian_banded", "tor", "perm", diff --git a/thewalrus/_hafnian.py b/thewalrus/_hafnian.py index ccf5f3c65..a1fbb0584 100644 --- a/thewalrus/_hafnian.py +++ b/thewalrus/_hafnian.py @@ -1,4 +1,4 @@ -# Copyright 2019 Xanadu Quantum Technologies Inc. +# Copyright 2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,8 +14,10 @@ """ Hafnian Python interface """ -from itertools import chain, combinations +from functools import lru_cache +from collections import Counter +from itertools import chain, combinations import numpy as np from .libwalrus import haf_complex, haf_int, haf_real, haf_rpt_complex, haf_rpt_real @@ -210,6 +212,50 @@ def hafnian( ) +def hafnian_sparse(A, D=None, loop=False): + r"""Returns the hafnian of a sparse symmetric matrix. + + This pure python implementation is very slow on full matrices, but faster the sparser a matrix is. + As a rule of thumb, the crossover in runtime with respect to :func:`~.hafnian` happens around 50% sparsity. + + Args: + A (array): the symmetric matrix of which we want to compute the hafnian + D (set): set of indices that identify a submatrix. If ``None`` (default) it computes + the hafnian of the whole matrix. + loop (bool): If ``True``, the loop hafnian is returned. Default is ``False``. + + Returns: + (float) hafnian of ``A`` or of the submatrix of ``A`` defined by the set of indices ``D``. + """ + if D is None: + D = frozenset(range(len(A))) + else: + D = frozenset(D) + + if not loop: + A = A - np.diag(np.diag(A)) + + if np.allclose(A, 0): + return 0.0 + + r, _ = np.nonzero(A) + m = max(Counter(r).values()) # max nonzero values per row/column + + @lru_cache(maxsize=2 ** m) + def indices(d, k): + return d.intersection(set(np.nonzero(A[k])[0])) + + @lru_cache(maxsize=2 ** m) + def lhaf(d: frozenset) -> float: + if not d: + return 1 + d_ = set(d) + k = d_.pop() + return sum(A[i, k] * lhaf(frozenset(d_).difference({i})) for i in indices(d, k)) + + return lhaf(D) + + def hafnian_repeated(A, rpt, mu=None, loop=False, rtol=1e-05, atol=1e-08): r"""Returns the hafnian of matrix with repeated rows/columns. diff --git a/thewalrus/tests/conftest.py b/thewalrus/tests/conftest.py index 4af8feffa..fd0c3928d 100644 --- a/thewalrus/tests/conftest.py +++ b/thewalrus/tests/conftest.py @@ -41,10 +41,12 @@ def random_matrix(dtype): """Returns a random symmetric matrix of type dtype and of size n x n""" - def _wrapper(n): + def _wrapper(n, fill_factor=1.0): """wrapper function""" A = np.complex128(np.random.random([n, n])) A += 1j * np.random.random([n, n]) + mask = np.random.binomial(1, p=fill_factor, size=(n, n)) + A *= mask A += A.T if not np.issubdtype(dtype, np.complexfloating): diff --git a/thewalrus/tests/test_hafnian.py b/thewalrus/tests/test_hafnian.py index 7ede30198..15b28d64b 100644 --- a/thewalrus/tests/test_hafnian.py +++ b/thewalrus/tests/test_hafnian.py @@ -19,7 +19,8 @@ from scipy.special import factorial as fac import thewalrus as hf -from thewalrus import hafnian, reduction, hafnian_banded +from thewalrus import hafnian, reduction, hafnian_sparse, hafnian_banded + from thewalrus.libwalrus import haf_complex, haf_real, haf_int from thewalrus._hafnian import bandwidth @@ -298,6 +299,17 @@ def test_diag(self, n): assert np.allclose(haf, expected) + @pytest.mark.parametrize("n", [2, 3, 5, 10, 15, 20]) + @pytest.mark.parametrize("fill", [0.5, 0.2, 0.1, 0.05]) + def test_valid_output(self, random_matrix, n, fill): + """Tests that sparse loop hafnian matches full implementation""" + A = random_matrix(n, fill_factor=fill) + assert np.allclose(hafnian_sparse(A, loop=True), hafnian_sparse(A, D=set(range(len(A))), loop=True)) + assert np.allclose(hafnian_sparse(A, loop=False), hafnian_sparse(A, D=set(range(len(A))), loop=False)) + assert np.allclose(hafnian(A, loop=True), hafnian_sparse(A, loop=True)) + assert np.allclose(hafnian(A, loop=False), hafnian_sparse(A, loop=False)) + + @pytest.mark.parametrize("n", [7, 8, 9, 10, 11, 12]) @pytest.mark.parametrize("w", [1, 2, 3, 4, 5, 6]) @pytest.mark.parametrize("loop", [True, False]) @@ -328,4 +340,3 @@ def test_bandwidth_zero(N): result = bandwidth(A) expected = 0 assert np.allclose(result, expected) -