Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Brute force samplers #152

Merged
merged 24 commits into from
Mar 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@

* Adds the ability to calculate the mean number of photons in a given mode of a Gaussian state. [#148](https://github.com/XanaduAI/thewalrus/pull/148)

* Adds the ability to calculate the photon number distribution of a pure or mixed state using `generate_probabilities`. [#152](https://github.com/XanaduAI/thewalrus/pull/152)

* Allows to update the photon number distribution when undergoing loss by using `update_probabilities_with_loss`. [#152](https://github.com/XanaduAI/thewalrus/pull/152)

* Adds a brute force sampler `photon_number_sampler` that given a (multi-)mode photon number distribution generates photon number samples. [#152](https://github.com/XanaduAI/thewalrus/pull/152)

### Improvements


Expand Down
91 changes: 91 additions & 0 deletions thewalrus/quantum.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@
density_matrix_element
density_matrix
fock_tensor
probabilities
update_probabilities_with_loss

Details
^^^^^^^
Expand All @@ -69,6 +71,11 @@
.. autofunction::
fock_tensor

.. autofunction::
probabilities

.. autofunction::
update_probabilities_with_loss

Utility functions
-----------------
Expand Down Expand Up @@ -109,6 +116,7 @@
from scipy.optimize import root_scalar
from scipy.special import factorial as fac
from scipy.stats import nbinom
from numba import jit

from thewalrus.symplectic import expand, sympmat, is_symplectic

Expand Down Expand Up @@ -587,6 +595,7 @@ def mean_number_of_clicks(A):

Args:
A (array): rescaled adjacency matrix

Returns:
float: mean number of clicks
"""
Expand Down Expand Up @@ -963,6 +972,7 @@ def gen_multi_mode_dist(s, cutoff=50, padding_factor=2):
Args:
s (array): array of squeezing parameters
cutoff (int): Fock cutoff

Returns:
(array[int]): total photon number distribution
"""
Expand Down Expand Up @@ -1011,6 +1021,7 @@ def fock_tensor(S, alpha, cutoff, choi_r=np.arcsinh(1.0), check_symplectic=True,
choi_r (float): squeezing parameter used for the Choi expansion
check_symplectic (boolean): checks whether the input matrix is symplectic
sf_order (boolean): reshapes the tensor so that it follows the sf ordering of indices

Return:
(array): Tensor containing the Fock representation of the Gaussian unitary
"""
Expand Down Expand Up @@ -1048,3 +1059,83 @@ def fock_tensor(S, alpha, cutoff, choi_r=np.arcsinh(1.0), check_symplectic=True,
return tensor.transpose(sf_indexing)

return tensor


def probabilities(mu, cov, cutoff, hbar=2.0):
r"""Generate the Fock space probabilities of a Gaussian state up to a Fock space cutoff.

Args:
mu (array): vector of means of length ``2*n_modes``
cov (array): covariance matrix of shape ``[2*n_modes, 2*n_modes]``
cutoff (int): cutoff in Fock space
hbar (float): value of :math:`\hbar` in the commutation relation :math;`[\hat{x}, \hat{p}]=i\hbar`

Returns:
(array): Fock space probabilities up to cutoff. The shape of this tensor is ``[cutoff]*num_modes``.
"""
if is_pure_cov(cov, hbar=hbar): # Check if the covariance matrix cov is pure
return np.abs(state_vector(mu, cov, cutoff=cutoff, hbar=hbar, check_purity=False)) ** 2
num_modes = len(mu) // 2
probs = np.zeros([cutoff] * num_modes)
for i in product(range(cutoff), repeat=num_modes):
probs[i] = np.maximum(
0.0, np.real_if_close(density_matrix_element(mu, cov, i, i, hbar=hbar))
)
# The maximum is needed because every now and then a probability is very close to zero from below.
nquesada marked this conversation as resolved.
Show resolved Hide resolved
return probs


@jit(nopython=True)
def loss_mat(eta, cutoff): # pragma: no cover
nquesada marked this conversation as resolved.
Show resolved Hide resolved
r"""Constructs a binomial loss matrix with transmission eta up to n photons.

Args:
eta (float): Transmission coefficient. ``eta=0.0`` corresponds to complete loss and ``eta=1.0`` corresponds to no loss.
cutoff (int): cutoff in Fock space.

Returns:
array: :math:`n\times n` matrix representing the loss.
"""
# If full transmission return the identity

if eta < 0.0 or eta > 1.0:
raise ValueError("The transmission parameter eta should be a number between 0 and 1.")

if eta == 1.0:
return np.identity(cutoff)

# Otherwise construct the matrix elements recursively
lm = np.zeros((cutoff, cutoff))
mu = 1.0 - eta
lm[:, 0] = mu ** (np.arange(cutoff))
for i in range(cutoff):
for j in range(1, i + 1):
lm[i, j] = lm[i, j - 1] * (eta / mu) * (i - j + 1) / (j)
return lm


def update_probabilities_with_loss(etas, probs):
"""Given a list of transmissivities a tensor of probabilitites, calculate
an updated tensor of probabilities after loss is applied.

Args:
etas (list): List of transmissitivities describing the loss in each of the modes
probs (array): Array of probabilitites in the different modes
nquesada marked this conversation as resolved.
Show resolved Hide resolved

Returns:
array: List of loss-updated probabilities with the same shape as probs.
"""

probs_shape = probs.shape
if len(probs_shape) != len(etas):
raise ValueError("The list of transmission etas and the tensor of probabilities probs have incompatible dimensions.")

alphabet = "abcdefghijklmnopqrstuvwxyz"
josh146 marked this conversation as resolved.
Show resolved Hide resolved
cutoff = probs_shape[0]
for i, eta in enumerate(etas):
einstrings = "ij,{}i...->{}j...".format(alphabet[:i], alphabet[:i])

qein = np.zeros_like(probs)
qein = np.einsum(einstrings, loss_mat(eta, cutoff), probs)
probs = np.copy(qein)
nquesada marked this conversation as resolved.
Show resolved Hide resolved
return qein
43 changes: 43 additions & 0 deletions thewalrus/samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@
torontonian_sample_graph
torontonian_sample_classical_state

Brute force sampling
--------------------

.. autosummary::
photon_number_sampler

Code details
------------
"""
Expand Down Expand Up @@ -530,6 +536,43 @@ def torontonian_sample_classical_state(cov, samples, mean=None, hbar=2, atol=1e-
)


def photon_number_sampler(probabilities, num_samples, out_of_bounds=False):
"""Given a photon-number probability mass function(PMF) it returns samples according to said PMF.

Args:
nquesada marked this conversation as resolved.
Show resolved Hide resolved
probabilities (array): probability tensor of the modes, has shape ``[cutoff]*num_modes``
num_samples (int): number of samples requested
out_of_bounds (boolean): if ``False`` the probability distribution is renormalized. If not ``False``, the value of
``out_of_bounds`` is used as a placeholder for samples where more than the cutoff of probabilities are detected.

Returns:
(array): Samples, with shape [num_sample, num_modes]
"""
num_modes = len(probabilities.shape)
cutoff = probabilities.shape[0]
sum_p = np.sum(probabilities)

if out_of_bounds is False:
probabilities = probabilities.flatten() / sum_p
vals = np.arange(cutoff**num_modes, dtype=int)
return [
np.unravel_index(np.random.choice(vals, p=probabilities), [cutoff] * num_modes)
for _ in range(num_samples)
]

upper_limit = cutoff ** num_modes

def sorter(index):
if index == upper_limit:
return out_of_bounds

return np.unravel_index(index, [cutoff] * num_modes)

vals = np.arange(1 + cutoff**num_modes, dtype=int)
probabilities = np.append(probabilities.flatten(), 1.0 - sum_p)
return [sorter(np.random.choice(vals, p=probabilities)) for _ in range(num_samples)]

nquesada marked this conversation as resolved.
Show resolved Hide resolved

def seed(seed_val=None):
r""" Seeds the random number generator used in the sampling algorithms.

Expand Down
46 changes: 45 additions & 1 deletion thewalrus/tests/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@

import numpy as np
import pytest
from thewalrus.quantum import density_matrix, state_vector
from scipy.linalg import block_diag
from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss
from thewalrus.symplectic import expand, interferometer, two_mode_squeezing, loss


@pytest.mark.parametrize("hbar", [0.1, 0.5, 1, 2, 1.0/137])
nquesada marked this conversation as resolved.
Show resolved Hide resolved
def test_cubic_phase(hbar):
Expand Down Expand Up @@ -47,3 +50,44 @@ def test_cubic_phase(hbar):
assert np.allclose(np.outer(psi, psi.conj()), rho)
assert np.allclose(np.outer(psi_c, psi_c.conj()), rho)
assert np.allclose(rho_c, rho)

nquesada marked this conversation as resolved.
Show resolved Hide resolved

@pytest.mark.parametrize("hbar", [2.0, 1.0/137])
def test_four_modes(hbar):
""" Test that probabilities are correctly updates for a four modes system under loss"""
# All this block is to generate the correct covariance matrix.
# It correnponds to num_modes=4 modes that undergo two mode squeezing between modes i and i + (num_modes / 2).
# Then they undergo displacement.
# The signal and idlers see and interferometer with unitary matrix u2x2.
# And then they see loss by amount etas[i].
num_modes = 4
theta = 0.45
phi = 0.7
u2x2 = np.array([[np.cos(theta / 2), np.exp(1j * phi) * np.sin(theta / 2)],
[-np.exp(-1j * phi) * np.sin(theta / 2), np.cos(theta / 2)]])

u4x4 = block_diag(u2x2, u2x2)

cov = np.identity(2 * num_modes) * hbar / 2
means = 0.5 * np.random.rand(2 * num_modes) * np.sqrt(hbar / 2)
rs = [0.1, 0.9]
n_half = num_modes // 2

for i, r_val in enumerate(rs):
Sexpanded = expand(two_mode_squeezing(r_val, 0.0), [i, n_half + i], num_modes)
cov = Sexpanded @ cov @ (Sexpanded.T)

Su = expand(interferometer(u4x4), range(num_modes), num_modes)
cov = Su @ cov @ (Su.T)
cov_lossless = np.copy(cov)
means_lossless = np.copy(means)
etas = [0.9, 0.7, 0.9, 0.1]

for i, eta in enumerate(etas):
means, cov = loss(means, cov, eta, i, hbar=hbar)

cutoff = 3
probs_lossless = probabilities(means_lossless, cov_lossless, 4 * cutoff, hbar=hbar)
probs = probabilities(means, cov, cutoff, hbar=hbar)
probs_updated = update_probabilities_with_loss(etas, probs_lossless)
assert np.allclose(probs, probs_updated[:cutoff, :cutoff, :cutoff, :cutoff], atol=1e-6)
73 changes: 72 additions & 1 deletion thewalrus/tests/test_quantum.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import numpy as np
from scipy.linalg import qr

from thewalrus.symplectic import rotation, squeezing, interferometer, two_mode_squeezing, beam_splitter
from thewalrus.symplectic import rotation, squeezing, interferometer, two_mode_squeezing, beam_splitter, loss

from thewalrus.quantum import (
reduced_gaussian,
Expand Down Expand Up @@ -48,6 +48,9 @@
fock_tensor,
photon_number_mean_vector,
photon_number_mean,
probabilities,
update_probabilities_with_loss,
loss_mat,
)


Expand Down Expand Up @@ -928,3 +931,71 @@ def test_pnd_squeeze_displace(tol, r, phi, alpha, hbar):
mean_analytic = np.abs(alpha) ** 2 + np.sinh(r) ** 2
assert np.isclose(float(pnd_cov), pnd_cov_analytic, atol=tol, rtol=0)
assert np.isclose(photon_number_mean(mu, cov, 0, hbar=hbar), mean_analytic, atol=tol, rtol=0)


@pytest.mark.parametrize("hbar", [0.1, 1, 2])
@pytest.mark.parametrize("etas", [0.1, 0.4, 0.9, 1.0])
@pytest.mark.parametrize("etai", [0.1, 0.4, 0.9, 1.0])
def test_update_with_loss_two_mode_squeezed(etas, etai, hbar):
"""Test the probabilities are updated correctly for a lossy two mode squeezed vacuum state"""
cov2 = two_mode_squeezing(np.arcsinh(1.0), 0.0)
cov2 = hbar * cov2 @ cov2.T / 2.0
mean2 = np.zeros([4])
eta2 = [etas, etai]
cov2l = np.copy(cov2)

for i, eta in enumerate(eta2):
mean2, cov2l = loss(mean2, cov2l, eta, i, hbar=hbar)

cutoff = 6
probs = probabilities(mean2, cov2l, cutoff, hbar=hbar)
probs_lossless = probabilities(mean2, cov2, 3 * cutoff, hbar=hbar)
probs_updated = update_probabilities_with_loss(eta2, probs_lossless)

assert np.allclose(probs, probs_updated[:cutoff, :cutoff], atol=1.0e-5)


@pytest.mark.parametrize("hbar", [0.1, 1, 2])
@pytest.mark.parametrize("etas", [0.1, 0.4, 0.9, 1.0])
@pytest.mark.parametrize("etai", [0.1, 0.4, 0.9, 1.0])
def test_update_with_loss_coherent_states(etas, etai, hbar):
"""Checks probabilities are updated correctly for coherent states"""
n_modes = 2
cov = hbar * np.identity(2 * n_modes) / 2
eta_vals = [etas, etai]
means = 2 * np.random.rand(2 * n_modes)
means_lossy = np.sqrt(np.array(eta_vals + eta_vals)) * means
cutoff = 6
probs_lossless = probabilities(means, cov, 10 * cutoff, hbar=hbar)

probs = probabilities(means_lossy, cov, cutoff, hbar=hbar)
probs_updated = update_probabilities_with_loss(eta_vals, probs_lossless)

assert np.allclose(probs, probs_updated[:cutoff, :cutoff], atol=1.0e-5)

nquesada marked this conversation as resolved.
Show resolved Hide resolved

@pytest.mark.parametrize("eta", [0.1, 0.5, 1.0])
def test_loss_is_stochastic_matrix(eta):
"""Test the loss matrix is an stochastic matrix, implying that the sum
of the entries a long the rows is 1"""
n = 50
M = loss_mat(eta, n)
assert np.allclose(np.sum(M, axis=1), np.ones([n]))


@pytest.mark.parametrize("eta", [0.1, 0.5, 1.0])
def test_loss_is_nonnegative_matrix(eta):
"""Test the loss matrix is a nonnegative matrix"""
n = 50
M = loss_mat(eta, n)
assert np.alltrue(M >= 0.0)

nquesada marked this conversation as resolved.
Show resolved Hide resolved

@pytest.mark.parametrize("eta", [-1.0, 2.0])
def test_loss_value_error(eta):
"""Tests the correct error is raised"""
n = 50
with pytest.raises(
ValueError, match="The transmission parameter eta should be a number between 0 and 1."
):
loss_mat(eta, n)
28 changes: 26 additions & 2 deletions thewalrus/tests/test_samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
hafnian_sample_classical_state,
torontonian_sample_classical_state,
seed,
photon_number_sampler,
)
from thewalrus.quantum import gen_Qmat_from_graph, density_matrix_element

from thewalrus.quantum import gen_Qmat_from_graph, density_matrix_element, probabilities
seed(137)

rel_tol = 3.0
Expand Down Expand Up @@ -443,6 +443,30 @@ def test_torontonian_sample_graph(self, parallel):
assert np.all(samples[:, 0] == samples[:, 1])

nquesada marked this conversation as resolved.
Show resolved Hide resolved

def test_photon_number_sampler_two_mode_squeezed():
"""Test the brute force sampler when one truncates the probability distribution """
hbar = 2.0
r = np.arcsinh(1.0)
cov = TMS_cov(r, 0.0)
cutoff = 10
probs = probabilities(np.zeros([4]), cov, cutoff, hbar=hbar)
samples = np.array(photon_number_sampler(probs, 1000))
assert np.allclose(samples[:, 0], samples[:, 1])

nquesada marked this conversation as resolved.
Show resolved Hide resolved

def test_photon_number_sampler_out_of_bounds():
"""Test the brute force sampler when one use 'Coo coo ca choo' as the string for out_of_bounds"""
hbar = 2.0
r = np.arcsinh(np.sqrt(100))
cov = TMS_cov(r, 0.0)
cutoff = 5
probs = probabilities(np.zeros([4]), cov, cutoff, hbar=hbar)
samples = photon_number_sampler(probs, 1000, out_of_bounds='Coo coo ca choo')
assert 'Coo coo ca choo' in samples
nquesada marked this conversation as resolved.
Show resolved Hide resolved
numerical_samples = np.array([x for x in samples if x != "Coo coo ca choo"])
assert np.allclose(numerical_samples[:, 0], numerical_samples[:, 1])

nquesada marked this conversation as resolved.
Show resolved Hide resolved

def test_seed():
"""Tests that seed method does reset the random number generators"""
n_samples = 10
Expand Down