Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding probability tests #368

Merged
merged 15 commits into from
Nov 4, 2023
2 changes: 1 addition & 1 deletion thewalrus/internal_modes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@
Functions to do internal modes/distinguishable GBS
"""

from .pnr_statistics import pnr_prob
from .pnr_statistics import pnr_prob, probabilities_single_mode
from .fock_density_matrices import density_matrix_single_mode
from .prepare_cov import *
15 changes: 2 additions & 13 deletions thewalrus/internal_modes/fock_density_matrices.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@
from ..symplectic import passive_transformation
from .._hafnian import nb_binom, nb_ix, find_kept_edges, f_from_matrix
from .utils import (
nb_block,
nb_Qmat,
rachelchadwick marked this conversation as resolved.
Show resolved Hide resolved
spatial_reps_to_schmidt_reps,
fact,
project_onto_local_oscillator,
)


Expand Down Expand Up @@ -52,18 +52,7 @@ def _density_matrix_single_mode(cov, pattern, normalize=False, LO_overlap=None,

# filter out all unwanted Schmidt modes in heralded spatial mode

# create passive transformation of filter
T = np.zeros((M * K, M * K), dtype=np.complex128)
if LO_overlap is not None:
T[0][:K] = LO_overlap
else:
T[0, 0] = 1
T[K:, K:] = np.eye((M - 1) * K, dtype=np.complex128)

# apply channel of filter
P = nb_block(((T.real, -T.imag), (T.imag, T.real)))
L = (hbar / 2) * (np.eye(P.shape[0]) - P @ P.T)
cov = P @ cov @ P.T + L
cov = project_onto_local_oscillator(cov, M, LO_overlap=LO_overlap, hbar=hbar)

Q = nb_Qmat(cov, hbar=hbar)
O = np.eye(2 * M * K) - np.linalg.inv(Q)
Expand Down
143 changes: 140 additions & 3 deletions thewalrus/internal_modes/pnr_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,23 @@
Set of functions for calculating photon number resolved measurement probabilities on Gaussian states with multiple internal modes
"""


import numpy as np

import numba

from scipy.special import factorial as fac

from ..quantum import Qmat
from .._hafnian import find_kept_edges, nb_binom, f_from_powertrace, nb_ix
from ..quantum import Qmat, Amat
from thewalrus.symplectic import passive_transformation
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not a relative import like the other imports?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does that make a difference?

from .._hafnian import find_kept_edges, nb_binom, f_from_powertrace, nb_ix, f_from_matrix, get_AX_S
from ..charpoly import powertrace

from .utils import spatial_reps_to_schmidt_reps, spatial_modes_to_schmidt_modes
from .utils import (
spatial_reps_to_schmidt_reps,
spatial_modes_to_schmidt_modes,
project_onto_local_oscillator,
)


@numba.jit(nopython=True, parallel=True, cache=True)
Expand Down Expand Up @@ -120,3 +126,134 @@ def pnr_prob(covs, i, hbar=2):
prob = haf.real * vac_prob / fac_prod

return prob


@numba.jit(nopython=True, cache=True)
def finite_difference_operator_coeffs(der_order, m, u=None, v=None):
rachelchadwick marked this conversation as resolved.
Show resolved Hide resolved
""" Returns the mth coefficient of the finite difference operator of given derivative order.
For details see: E. T. Bax, Finite-difference algorithms for counting problems. PhD thesis, Caltech, 1998.

Args:
der_order (int): derivative order.
m (int): index of the coefficient.
u (int): u value from Bax.
v (int): v value from Bax.

Returns:
tuple: prefactor and value when applied to the finite difference.

"""
if u is None:
u = 2 - der_order
if v is None:
v = -der_order
prefac = (-1) ** (der_order - m) * nb_binom(der_order, m) / (u - v) ** der_order
val = v + m * (u - v)
return prefac, val


def haf_blocked(A, blocks, repeats):
"""Wrapper function for _haf_blocked_numba which calculate blocked hafnian associated with photon number probabilities.

Args:
A (array): input matrix
blocks (list): how to block (coarse graining) different outcomes
repeats (list): pattern for repetition but with one added in each repetition
rachelchadwick marked this conversation as resolved.
Show resolved Hide resolved
nquesada marked this conversation as resolved.
Show resolved Hide resolved

Returns:
value of the hafnian
"""
# Note that the two lines below cannot be done inside numba hence the need for this function
repeats = tuple(val + 1 for val in repeats)
blocks = [np.array(val, dtype=np.int32) for val in blocks]
return _haf_blocked_numba(A, blocks, repeats)


@numba.jit(nopython=True)
def _haf_blocked_numba(A, blocks, repeats_p):
"""Calculates the hafnian of the matrix with a given block and repetition pattern.

Args:
A (array): input matrix
blocks (list): how to block (coarse graining) different outcomes
repeats_p (list): pattern for repetition but with one added in each repetition

Returns:
value of the hafnian
"""
n = sum(repeats_p) - len(repeats_p)
num_indices = 0
for block in blocks:
num_indices += len(block)
netsum = 0.0 + 0j
coeff_vect = np.zeros(num_indices, dtype=np.int32)
for index in np.ndindex(*repeats_p):
coeff_pref = 1.0 + 0j
for i, block in enumerate(blocks):
(coeff, val) = finite_difference_operator_coeffs(repeats_p[i] - 1, index[i])
coeff_pref *= coeff
for mode in block:
coeff_vect[mode] = val
AX_S = get_AX_S(coeff_vect, A)

netsum += coeff_pref * f_from_matrix(AX_S, 2 * n)[n]
return netsum


def probabilities_single_mode(cov, pattern, normalize=False, LO_overlap=None, cutoff=13, hbar=2):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is this function supposed to be? In the docstring it says density matrix but should it say probability pattern? Also I don't really understand why there's an option to project onto LO for this, given we assume the PNRs are bucket detectors, it shouldn't affect how we split the internal modes up, no?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have rewritten the doc string, hopefully this clarifies the meaning.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand, it's essentially the pnr distribution of the 'principal mode' (or mode of our choosing). Still, I feel it might be more appropriate to call it something like dm_diag_single_mode (or something to that effect) to avoid confusion with pnr_prob, but ultimately I don't feel that strongly about it.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

well, PNR prob does something similar, but the signature of the function is different.

"""
Calculates the diagonal of the density matrix, hence the name probabilities, of first mode when heralded by pattern on a zero-displaced, M-mode Gaussian state
where each mode contains K internal modes.

Args:
cov (array): 2MK x 2MK covariance matrix
pattern (dict): heralding pattern total photon number in the spatial modes (int), indexed by spatial mode
normalize (bool): whether to normalise the output density matrix
LO_overlap (array): overlap between internal modes and local oscillator
cutoff (int): photon number cutoff. Should be odd. Even numbers will be rounded up to an odd number
hbar (float): the value of hbar (default 2)
Returns:
array[complex]: (cutoff+1, cutoff+1) dimension density matrix
"""

# The lines until A = Amat(...) are copies from density_single_mode
nquesada marked this conversation as resolved.
Show resolved Hide resolved
cov = np.array(cov).astype(np.float64)
M = len(pattern) + 1
K = cov.shape[0] // (2 * M)
if not set(list(pattern.keys())).issubset(set(list(np.arange(M)))):
raise ValueError("Keys of pattern must correspond to all but one spatial mode")
N_nums = np.array(list(pattern.values()))
nquesada marked this conversation as resolved.
Show resolved Hide resolved
HM = list(set(list(np.arange(M))).difference(list(pattern.keys())))[0]
if LO_overlap is not None:
if not K == LO_overlap.shape[0]:
raise ValueError("Number of overlaps with LO must match number of internal modes")
if not (np.linalg.norm(LO_overlap) < 1 or np.allclose(np.linalg.norm(LO_overlap), 1)):
raise ValueError("Norm of overlaps must not be greater than 1")

# swapping the spatial modes around such that we are heralding in spatial mode 0
Uswap = np.zeros((M, M))
swapV = np.concatenate((np.array([HM]), np.arange(HM), np.arange(HM + 1, M)))
for j, k in enumerate(swapV):
Uswap[j][k] = 1
U_K = np.zeros((M * K, M * K))
for i in range(K):
U_K[i::K, i::K] = Uswap
_, cov = passive_transformation(np.zeros(cov.shape[0]), cov, U_K, hbar=hbar)

cov = project_onto_local_oscillator(cov, M, LO_overlap=LO_overlap, hbar=hbar)

A = Amat(cov)
Q = Qmat(cov)
fact = 1 / np.sqrt(np.linalg.det(Q).real)
blocks = np.arange(K * M).reshape([M, K])
probs = np.zeros([cutoff])
patt = [pattern[i] for i in range(1, 1 + len(pattern))]
for i in range(cutoff):
patt_long = [i] + patt
nquesada marked this conversation as resolved.
Show resolved Hide resolved
probs[i] = fact * np.real(
haf_blocked(A, blocks=blocks, repeats=patt_long) / np.prod(fac(patt_long))
)

if normalize:
probs = probs / np.sum(probs)
return probs
32 changes: 32 additions & 0 deletions thewalrus/internal_modes/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,3 +104,35 @@ def nb_block(X): # pragma: no cover
xtmp1 = np.concatenate(X[0], axis=1)
xtmp2 = np.concatenate(X[1], axis=1)
return np.concatenate((xtmp1, xtmp2), axis=0)


@numba.jit(nopython=True, parallel=True, cache=True)
def project_onto_local_oscillator(cov, M, LO_overlap=None, hbar=2):
"""Projects a given covariance matrix into the relevant internal mode in the first external mode.

Args:
cov (array): 2MK x 2MK covariance matrix
LO_overlap (array): overlap between internal modes and local oscillator

Returns:
(array): projected covariance matrix
"""

K = cov.shape[0] // (2 * M)

# filter out all unwanted Schmidt modes in heralded spatial mode

# create passive transformation of filter
T = np.zeros((M * K, M * K), dtype=np.complex128)
if LO_overlap is not None:
T[0][:K] = LO_overlap
else:
T[0, 0] = 1
T[K:, K:] = np.eye((M - 1) * K, dtype=np.complex128)

# apply channel of filter
P = nb_block(((T.real, -T.imag), (T.imag, T.real)))
L = (hbar / 2) * (np.eye(P.shape[0]) - P @ P.T)
cov = P @ cov @ P.T + L

return cov
56 changes: 41 additions & 15 deletions thewalrus/tests/test_internal_modes.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
Amat,
Qmat,
state_vector,
photon_number_mean,
)
from thewalrus.symplectic import (
beam_splitter,
Expand All @@ -49,7 +50,7 @@
squeezing,
)

from thewalrus.internal_modes import pnr_prob, density_matrix_single_mode
from thewalrus.internal_modes import pnr_prob, density_matrix_single_mode, probabilities_single_mode
from thewalrus.internal_modes.prepare_cov import (
O_matrix,
orthonormal_basis,
Expand All @@ -58,6 +59,8 @@
LO_overlaps,
)

from thewalrus.internal_modes.utils import project_onto_local_oscillator

### auxilliary functions for testing ###
# if we want to have less auxilliary functions, we can remove a few tests and get rid of it all

Expand Down Expand Up @@ -1127,42 +1130,45 @@ def test_LO_overlaps(r, S, phi):
)


def test_mixed_heralded_photon():
"""test code for generating heralded single photon state from squeezed states with 2 internal modes"""
@pytest.mark.parametrize("nh", [1, 2, 3, 4])
def test_mixed_heralded_photon(nh):
"""test code for generating heralded fock states from squeezed states with 2 internal modes"""
na = 1
nb = 0.5
ns = np.array([na, nb])
rs = np.arcsinh(np.sqrt(ns))
gs = ns / (1 + ns)
cutoff = 5
cutoff = nh + 1
ps = np.array([g ** np.arange(cutoff) / (1 + n) for g, n in zip(gs, ns)])
herald_val = 1
herald_val = nh
dm_modea = np.array([ps[0, i] * ps[1, herald_val - i] for i in range(herald_val + 1)])
dm_modeb = dm_modea[::-1]
dm_modea = np.diag(dm_modea) / np.sum(dm_modea)
dm_modeb = np.diag(dm_modeb) / np.sum(dm_modeb)

F = [np.array([1, 0]), np.array([0, 1])]
theta = np.pi / 4
phi = -np.pi / 2
U_TMSV = np.array(
[
[np.cos(theta), np.exp(-1j * phi) * np.sin(theta)],
[-np.exp(1j * phi) * np.sin(theta), np.cos(theta)],
]
)
U_TMSV = np.sqrt(0.5) * np.array([[1, 1j], [1j, 1]])
cov, chis = prepare_cov([rs, rs], U_TMSV, F=[F, F])
LO_overlapa = LO_overlaps(chis, chis[0])
LO_overlapb = LO_overlaps(chis, chis[1])
rho_a = density_matrix_single_mode(
cov, {1: 1}, normalize=True, LO_overlap=LO_overlapa, cutoff=2
cov, {1: nh}, normalize=True, LO_overlap=LO_overlapa, cutoff=nh + 1
)
rho_b = density_matrix_single_mode(
cov, {1: 1}, normalize=True, LO_overlap=LO_overlapb, cutoff=2
cov, {1: nh}, normalize=True, LO_overlap=LO_overlapb, cutoff=nh + 1
)

p_a = probabilities_single_mode(
cov, {1: nh}, normalize=True, LO_overlap=LO_overlapa, cutoff=nh + 1
)
p_b = probabilities_single_mode(
cov, {1: nh}, normalize=True, LO_overlap=LO_overlapb, cutoff=nh + 1
)

assert np.allclose(dm_modea, rho_a)
assert np.allclose(dm_modeb, rho_b)
assert np.allclose(np.diag(dm_modea), p_a)
assert np.allclose(np.diag(dm_modeb), p_b)


def test_pure_gkp():
Expand Down Expand Up @@ -1223,6 +1229,9 @@ def test_pure_gkp():
assert np.allclose(rho1, rho2, atol=2.5e-4)
assert np.allclose(rho1, rho3, atol=4.7e-4)
assert np.allclose(rho2, rho3, atol=4.8e-4)
probs = probabilities_single_mode(cov, {1: m1, 2: m2}, cutoff=cutoff, normalize=True)
assert np.allclose(np.diag(rho1), probs)

#### Note that the tolerances are higher than they should be.


Expand Down Expand Up @@ -1280,6 +1289,8 @@ def test_lossy_gkp():
rho_loss2 = density_matrix_single_mode(cov_lossy, {1: m1, 2: m2}, cutoff=cutoff)
rho_loss2 /= np.trace(rho_loss2)
assert np.allclose(rho_loss1, rho_loss2, atol=2.7e-4)
probs = probabilities_single_mode(cov_lossy, {1: m1, 2: m2}, cutoff=cutoff, normalize=True)
assert np.allclose(np.diag(rho_loss1), probs)


def test_vac_schmidt_modes_gkp():
Expand Down Expand Up @@ -1339,6 +1350,8 @@ def test_vac_schmidt_modes_gkp():
rho_big /= np.trace(rho_big)

assert np.allclose(rho1, rho_big, atol=4e-4)
probs = probabilities_single_mode(big_cov, {1: m1, 2: m2}, cutoff=cutoff, normalize=True)
assert np.allclose(np.diag(rho1), probs)


def test_density_matrix_error():
Expand Down Expand Up @@ -1419,7 +1432,10 @@ def test_density_matrix(cutoff):
rho2 = density_matrix_single_mode(cov, N, cutoff=cutoff)
rho2_norm = rho2 / np.trace(rho2).real

# probs = probabilities_single_mode(cov, N, cutoff=cutoff, normalize=True)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@DavidSPhillips this is a waaay too long test..

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it take ages in the internal_modes branch?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

well, it takes ages on my computer, that is why I decided against adding the new functions in this test

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's too long? The whole test or the addition of the commented out probabilities bit? The new bit doesn't take much longer for my laptop at all (after correcting for the PNR on mode 0). The whole test takes about 2.5 minutes.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test. 2.5 minutes is too long.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But the test is still included so it will still take 2.5 minutes. Adding the commented out lines (but not commented out) wouldn't increase the time much.

nquesada marked this conversation as resolved.
Show resolved Hide resolved

assert np.allclose(rho_norm, rho2_norm, atol=1e-6, rtol=1e-6)
# assert np.allclose(np.diag(rho_norm), probs)
nquesada marked this conversation as resolved.
Show resolved Hide resolved


def test_density_matrix_LO():
Expand Down Expand Up @@ -1470,3 +1486,13 @@ def test_density_matrix_LO():
rho2_norm = rho2 / np.trace(rho2).real

assert np.allclose(rho_norm, rho2_norm, atol=1e-6, rtol=1e-6)


def test_project_onto_local_oscillator():
"""Test that the mode that is orthogonal to the modes that are orthogonal to the local oscillator end up with zero photons"""
M = 2
K = 3
cov_test = squeezing([1] * K + [-1] * K)
cov_filt = project_onto_local_oscillator(cov_test, M, np.array([1, 0, 0]))
np.allclose(photon_number_mean(np.zeros(len(cov_filt)), cov_filt, 1), 0)
np.allclose(photon_number_mean(np.zeros(len(cov_filt)), cov_filt, 2), 0)