From 2d6291da715602bf2ffa75a072a16512b67fae85 Mon Sep 17 00:00:00 2001 From: Anthony Date: Mon, 19 Aug 2024 14:47:16 -0400 Subject: [PATCH 1/3] inital --- mrmustard/lab/abstract/state.py | 38 +++--- mrmustard/lab/abstract/transformation.py | 42 +++--- mrmustard/lab/circuit.py | 12 +- mrmustard/lab/detectors.py | 58 +++++---- mrmustard/lab/gates.py | 122 +++++++++--------- mrmustard/lab/states.py | 78 +++++------ mrmustard/lab/utils.py | 7 +- mrmustard/lab_dev/circuit_components.py | 22 ++-- .../circuit_components_utils/b_to_q.py | 2 +- mrmustard/lab_dev/circuits.py | 7 +- mrmustard/lab_dev/states/base.py | 38 +++--- mrmustard/lab_dev/states/coherent.py | 10 +- .../lab_dev/states/displaced_squeezed.py | 18 +-- .../lab_dev/states/quadrature_eigenstate.py | 6 +- mrmustard/lab_dev/states/squeezed_vacuum.py | 10 +- mrmustard/lab_dev/states/thermal.py | 6 +- .../states/two_mode_squeezed_vacuum.py | 8 +- .../lab_dev/transformations/amplifier.py | 6 +- .../lab_dev/transformations/attenuator.py | 6 +- mrmustard/lab_dev/transformations/base.py | 14 +- mrmustard/lab_dev/transformations/bsgate.py | 8 +- mrmustard/lab_dev/transformations/dgate.py | 10 +- .../lab_dev/transformations/fockdamping.py | 8 +- mrmustard/lab_dev/transformations/rgate.py | 6 +- mrmustard/lab_dev/transformations/s2gate.py | 8 +- mrmustard/lab_dev/transformations/sgate.py | 10 +- mrmustard/lab_dev/utils.py | 4 +- mrmustard/math/backend_manager.py | 45 ++++--- mrmustard/math/backend_numpy.py | 52 ++++---- mrmustard/math/backend_tensorflow.py | 53 ++++---- mrmustard/math/lattice/strategies/binomial.py | 8 +- mrmustard/math/parameters.py | 18 +-- mrmustard/math/tensor_networks/networks.py | 3 +- mrmustard/math/tensor_networks/tensors.py | 41 +++--- mrmustard/math/tensor_wrappers/mmtensor.py | 8 +- mrmustard/math/tensor_wrappers/xptensor.py | 66 +++++----- mrmustard/physics/ansatze.py | 30 ++--- mrmustard/physics/fock.py | 20 +-- mrmustard/physics/gaussian.py | 42 +++--- mrmustard/physics/gaussian_integrals.py | 22 ++-- mrmustard/training/callbacks.py | 17 +-- mrmustard/training/parameter_update.py | 10 +- 42 files changed, 497 insertions(+), 502 deletions(-) diff --git a/mrmustard/lab/abstract/state.py b/mrmustard/lab/abstract/state.py index f5eeee067..e52a52d66 100644 --- a/mrmustard/lab/abstract/state.py +++ b/mrmustard/lab/abstract/state.py @@ -20,11 +20,7 @@ from typing import ( TYPE_CHECKING, Iterable, - List, - Optional, Sequence, - Tuple, - Union, ) import numpy as np import matplotlib.pyplot as plt @@ -114,7 +110,7 @@ def __init__( len(modes) == self.num_modes ), f"Number of modes supplied ({len(modes)}) must match the representation dimension {self.num_modes}" - def _add_parameter(self, parameter: Union[Constant, Variable]): + def _add_parameter(self, parameter: Constant | Variable): r""" Adds a parameter to a state. @@ -141,7 +137,7 @@ def modes(self): return list(range(self.num_modes)) return self._modes - def indices(self, modes) -> Union[Tuple[int], int]: + def indices(self, modes) -> int | tuple[int]: r"""Returns the indices of the given modes. Args: @@ -175,12 +171,12 @@ def is_pure(self): return np.isclose(self.purity, 1.0, atol=1e-6) @property - def means(self) -> Optional[RealVector]: + def means(self) -> RealVector | None: r"""Returns the means vector of the state.""" return self._means @property - def cov(self) -> Optional[RealMatrix]: + def cov(self) -> RealMatrix | None: r"""Returns the covariance matrix of the state.""" return self._cov @@ -195,7 +191,7 @@ def number_stdev(self) -> RealVector: ) @property - def cutoffs(self) -> List[int]: + def cutoffs(self) -> list[int]: r"""Returns the Hilbert space dimension of each mode.""" if self._cutoffs is None: if self._ket is None and self._dm is None: @@ -214,7 +210,7 @@ def cutoffs(self) -> List[int]: return self._cutoffs @property - def shape(self) -> List[int]: + def shape(self) -> list[int]: r"""Returns the shape of the state, accounting for ket/dm representation. If the state is in Gaussian representation, the shape is inferred from @@ -274,10 +270,10 @@ def probability(self) -> float: def ket( self, - cutoffs: List[int] = None, + cutoffs: list[int] | None = None, max_prob: float = 1.0, - max_photons: int = None, - ) -> Optional[ComplexTensor]: + max_photons: int | None = None, + ) -> ComplexTensor | None: r"""Returns the ket of the state in Fock representation or ``None`` if the state is mixed. Args: @@ -323,7 +319,7 @@ def ket( return padded[tuple(slice(s) for s in cutoffs)] return self._ket[tuple(slice(s) for s in cutoffs)] - def dm(self, cutoffs: Optional[List[int]] = None) -> ComplexTensor: + def dm(self, cutoffs: list[int] | None = None) -> ComplexTensor: r"""Returns the density matrix of the state in Fock representation. Args: @@ -376,7 +372,7 @@ def fock_probabilities(self, cutoffs: Sequence[int]) -> RealTensor: self._fock_probabilities = fock.ket_to_probs(ket) return self._fock_probabilities - def primal(self, other: Union[State, Transformation]) -> State: + def primal(self, other: State | Transformation) -> State: r"""Returns the post-measurement state after ``other`` is projected onto ``self``. ``other << self`` is other projected onto ``self``. @@ -399,7 +395,7 @@ def primal(self, other: Union[State, Transformation]) -> State: f"Cannot apply {other.__class__.__qualname__} to {self.__class__.__qualname__}" ) from e - def _project_onto_state(self, other: State) -> Union[State, float]: + def _project_onto_state(self, other: State) -> State | float: """If states are gaussian use generaldyne measurement, else use the states' Fock representation.""" @@ -410,7 +406,7 @@ def _project_onto_state(self, other: State) -> Union[State, float]: # either self or other is not gaussian return self._project_onto_fock(other) - def _project_onto_fock(self, other: State) -> Union[State, float]: + def _project_onto_fock(self, other: State) -> State | float: """Returns the post-measurement state of the projection between two non-Gaussian states on the remaining modes or the probability of the result. When doing homodyne sampling, returns the post-measurement state or the measument outcome if no modes remain. @@ -459,7 +455,7 @@ def _contract_with_other(self, other): return out_fock - def _project_onto_gaussian(self, other: State) -> Union[State, float]: + def _project_onto_gaussian(self, other: State) -> State | float: """Returns the result of a generaldyne measurement given that states ``self`` and ``other`` are gaussian. @@ -549,7 +545,7 @@ def __getitem__(self, item) -> State: self._modes = item return self - def bargmann(self, numpy=False) -> Optional[tuple[ComplexMatrix, ComplexVector, complex]]: + def bargmann(self, numpy=False) -> tuple[ComplexMatrix, ComplexVector, complex] | None: r"""Returns the Bargmann representation of the state. If numpy=True, returns the numpy arrays instead of the backend arrays. """ @@ -700,8 +696,8 @@ def _repr_markdown_(self): def mikkel_plot( rho: np.ndarray, - xbounds: Tuple[int] = (-6, 6), - ybounds: Tuple[int] = (-6, 6), + xbounds: tuple[int] = (-6, 6), + ybounds: tuple[int] = (-6, 6), **kwargs, ): # pylint: disable=too-many-statements """Plots the Wigner function of a state given its density matrix. diff --git a/mrmustard/lab/abstract/transformation.py b/mrmustard/lab/abstract/transformation.py index 057310a04..93044b4b5 100644 --- a/mrmustard/lab/abstract/transformation.py +++ b/mrmustard/lab/abstract/transformation.py @@ -19,7 +19,7 @@ from __future__ import annotations -from typing import Callable, Iterable, Optional, Sequence, Tuple, Union +from typing import Callable, Iterable, Sequence import numpy as np @@ -41,10 +41,10 @@ class Transformation(Tensor): def __init__( self, name: str, - modes_in_ket: Optional[list[int]] = None, - modes_out_ket: Optional[list[int]] = None, - modes_in_bra: Optional[list[int]] = None, - modes_out_bra: Optional[list[int]] = None, + modes_in_ket: list[int] | None = None, + modes_out_ket: list[int] | None = None, + modes_in_bra: list[int] | None = None, + modes_out_bra: list[int] | None = None, ): super().__init__( name=name, @@ -55,7 +55,7 @@ def __init__( ) self._parameter_set = ParameterSet() - def _add_parameter(self, parameter: Union[Constant, Variable]): + def _add_parameter(self, parameter: Constant | Variable): r""" Adds a parameter to a transformation. @@ -136,25 +136,25 @@ def _validate_modes(self, modes): pass @property - def X_matrix(self) -> Optional[RealMatrix]: + def X_matrix(self) -> RealMatrix | None: return None @property - def Y_matrix(self) -> Optional[RealMatrix]: + def Y_matrix(self) -> RealMatrix | None: return None @property - def d_vector(self) -> Optional[RealVector]: + def d_vector(self) -> RealVector | None: return None @property - def X_matrix_dual(self) -> Optional[RealMatrix]: + def X_matrix_dual(self) -> RealMatrix | None: if (X := self.X_matrix) is None: return None return gaussian.math.inv(X) @property - def Y_matrix_dual(self) -> Optional[RealMatrix]: + def Y_matrix_dual(self) -> RealMatrix | None: if (Y := self.Y_matrix) is None: return None if (Xdual := self.X_matrix_dual) is None: @@ -162,7 +162,7 @@ def Y_matrix_dual(self) -> Optional[RealMatrix]: return math.matmul(math.matmul(Xdual, Y), math.transpose(Xdual)) @property - def d_vector_dual(self) -> Optional[RealVector]: + def d_vector_dual(self) -> RealVector | None: if (d := self.d_vector) is None: return None if (Xdual := self.X_matrix_dual) is None: @@ -181,8 +181,8 @@ def bargmann(self, numpy=False): def choi( self, - cutoffs: Optional[Sequence[int]] = None, - shape: Optional[Sequence[int]] = None, + cutoffs: Sequence[int] | None = None, + shape: Sequence[int] | None = None, dual: bool = False, ): r"""Returns the Choi representation of the transformation. @@ -224,7 +224,7 @@ def choi( def XYd( self, allow_none: bool = True - ) -> Tuple[Optional[RealMatrix], Optional[RealMatrix], Optional[RealVector]]: + ) -> tuple[RealMatrix | None, RealMatrix | None, RealVector | None]: r"""Returns the ```(X, Y, d)``` triple. Override in subclasses if computing ``X``, ``Y`` and ``d`` together is more efficient. @@ -238,7 +238,7 @@ def XYd( def XYd_dual( self, allow_none: bool = True - ) -> tuple[Optional[RealMatrix], Optional[RealMatrix], Optional[RealVector]]: + ) -> tuple[RealMatrix | None, RealMatrix | None, RealVector | None]: r"""Returns the ```(X, Y, d)``` triple of the dual of the current transformation. Override in subclasses if computing ``Xdual``, ``Ydual`` and ``ddual`` together is more efficient. @@ -290,7 +290,7 @@ def __rshift__(self, other: Transformation): ops2 = other._ops if isinstance(other, Circuit) else [other] return Circuit(ops1 + ops2) - def __lshift__(self, other: Union[State, Transformation]): + def __lshift__(self, other: State | Transformation): r"""Applies the dual of self to other. If other is a state, the dual of self is applied to the state. @@ -375,7 +375,7 @@ def __init__(self, name: str, modes: list[int]): super().__init__(name=name, modes_in_ket=modes, modes_out_ket=modes) self.is_unitary = True - def value(self, shape: Tuple[int]): + def value(self, shape: tuple[int]): return self.U(shape=shape) def _transform_fock(self, state: State, dual=False) -> State: @@ -387,8 +387,8 @@ def _transform_fock(self, state: State, dual=False) -> State: def U( self, - cutoffs: Optional[Sequence[int]] = None, - shape: Optional[Sequence[int]] = None, + cutoffs: Sequence[int] | None = None, + shape: Sequence[int] | None = None, ): r"""Returns the unitary representation of the transformation. @@ -456,7 +456,7 @@ def _transform_fock(self, state: State, dual: bool = False) -> State: return State(dm=fock.apply_choi_to_ket(choi, state.ket(), op_idx), modes=state.modes) return State(dm=fock.apply_choi_to_dm(choi, state.dm(), op_idx), modes=state.modes) - def value(self, shape: Tuple[int]): + def value(self, shape: tuple[int]): return self.choi(shape=shape) def __eq__(self, other): diff --git a/mrmustard/lab/circuit.py b/mrmustard/lab/circuit.py index 3b337b2b4..a1f617366 100644 --- a/mrmustard/lab/circuit.py +++ b/mrmustard/lab/circuit.py @@ -20,8 +20,6 @@ __all__ = ["Circuit"] -from typing import List, Optional, Tuple - import numpy as np from mrmustard import settings @@ -38,13 +36,13 @@ class Circuit(Transformation): ops (list or none): A list of operations comprising the circuit. """ - def __init__(self, ops: Optional[List] = None): + def __init__(self, ops: list | None = None): self._ops = list(ops) if ops is not None else [] super().__init__(name="Circuit") self.reset() @property - def ops(self) -> Optional[List]: + def ops(self) -> list | None: r""" The list of operations comprising the circuit. """ @@ -53,7 +51,7 @@ def ops(self) -> Optional[List]: def reset(self): """Resets the state of the circuit clearing the list of modes and setting the compiled flag to false.""" self._compiled: bool = False - self._modes: List[int] = [] + self._modes: list[int] = [] @property def num_modes(self) -> int: @@ -73,7 +71,7 @@ def dual(self, state: State) -> State: def XYd( self, allow_none: bool = True, - ) -> Tuple[ + ) -> tuple[ RealMatrix, RealMatrix, RealVector ]: # NOTE: Overriding Transformation.XYd for efficiency X = XPMatrix(like_1=True) @@ -105,7 +103,7 @@ def is_unitary(self): """Returns `true` if all operations in the circuit are unitary.""" return all(op.is_unitary for op in self._ops) - def value(self, shape: Tuple[int]): + def value(self, shape: tuple[int]): raise NotImplementedError def __len__(self): diff --git a/mrmustard/lab/detectors.py b/mrmustard/lab/detectors.py index 9b2c97ce3..3d8869ab9 100644 --- a/mrmustard/lab/detectors.py +++ b/mrmustard/lab/detectors.py @@ -16,7 +16,9 @@ This module implements the set of detector classes that perform measurements on quantum circuits. """ -from typing import Iterable, List, Optional, Tuple, Union +from __future__ import annotations + +from typing import Iterable from mrmustard import settings from mrmustard.physics import fock, gaussian @@ -60,15 +62,15 @@ class PNRDetector(FockMeasurement): def __init__( self, - efficiency: Union[float, List[float]] = 1.0, - dark_counts: Union[float, List[float]] = 0.0, + efficiency: float | list[float] = 1.0, + dark_counts: float | list[float] = 0.0, efficiency_trainable: bool = False, dark_counts_trainable: bool = False, - efficiency_bounds: Tuple[Optional[float], Optional[float]] = (0.0, 1.0), - dark_counts_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), + efficiency_bounds: tuple[float | None, float | None] = (0.0, 1.0), + dark_counts_bounds: tuple[float | None, float | None] = (0.0, None), stochastic_channel: RealMatrix = None, - modes: List[int] = None, - cutoffs: Union[int, List[int]] = None, + modes: list[int] = None, + cutoffs: int | list[int] = None, ): self._stochastic_channel = stochastic_channel self._should_recompute_stochastic_channel = efficiency_trainable or dark_counts_trainable @@ -99,7 +101,7 @@ def __init__( def should_recompute_stochastic_channel(self): return self._should_recompute_stochastic_channel - def recompute_stochastic_channel(self, cutoffs: List[int] = None): + def recompute_stochastic_channel(self, cutoffs: list[int] | None = None): """recompute belief using the defined `stochastic channel`""" if cutoffs is None: cutoffs = [settings.PNR_INTERNAL_CUTOFF] * len(self._modes) @@ -156,14 +158,14 @@ class ThresholdDetector(FockMeasurement): def __init__( self, - efficiency: Union[float, List[float]] = 1.0, - dark_count_prob: Union[float, List[float]] = 0.0, + efficiency: float | list[float] = 1.0, + dark_count_prob: float | list[float] = 0.0, efficiency_trainable: bool = False, dark_count_prob_trainable: bool = False, - efficiency_bounds: Tuple[Optional[float], Optional[float]] = (0.0, 1.0), - dark_count_prob_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), + efficiency_bounds: tuple[float | None, float | None] = (0.0, 1.0), + dark_count_prob_bounds: tuple[float | None, float | None] = (0.0, None), stochastic_channel=None, - modes: List[int] = None, + modes: list[int] = None, ): if modes is not None: num_modes = len(modes) @@ -204,7 +206,7 @@ def __init__( def should_recompute_stochastic_channel(self): return self._should_recompute_stochastic_channel - def recompute_stochastic_channel(self, cutoffs: List[int] = None): + def recompute_stochastic_channel(self, cutoffs: list[int] | None = None): """recompute belief using the defined `stochastic channel`""" if cutoffs is None: cutoffs = [settings.PNR_INTERNAL_CUTOFF] * len(self._modes) @@ -238,8 +240,8 @@ class Generaldyne(Measurement): def __init__( self, state: State, - outcome: Optional[RealVector] = None, - modes: Optional[Iterable[int]] = None, + outcome: RealVector | None = None, + modes: Iterable[int] | None = None, ) -> None: if not state.is_gaussian: raise TypeError("Generaldyne measurement state must be Gaussian.") @@ -262,14 +264,14 @@ def __init__( def outcome(self) -> RealVector: return self.state.means - def primal(self, other: State) -> Union[State, float]: + def primal(self, other: State) -> State | float: if self.postselected: # return the projection of self.state onto other return self.state.primal(other) return super().primal(other) - def _measure_gaussian(self, other) -> Union[State, float]: + def _measure_gaussian(self, other) -> State | float: remaining_modes = list(set(other.modes) - set(self.modes)) outcome, prob, new_cov, new_means = gaussian.general_dyne( @@ -283,7 +285,7 @@ def _measure_gaussian(self, other) -> Union[State, float]: else State(cov=new_cov, means=new_means, modes=remaining_modes, _norm=prob) ) - def _measure_fock(self, other) -> Union[State, float]: + def _measure_fock(self, other) -> State | float: raise NotImplementedError(f"Fock sampling not implemented for {self.__class__.__name__}") @@ -301,9 +303,9 @@ class Heterodyne(Generaldyne): def __init__( self, - x: Union[float, List[float]] = 0.0, - y: Union[float, List[float]] = 0.0, - modes: List[int] = None, + x: float | list[float] = 0.0, + y: float | list[float] = 0.0, + modes: list[int] | None = None, ): if (x is None) ^ (y is None): # XOR raise ValueError("Both `x` and `y` arguments should be defined or set to `None`.") @@ -338,10 +340,10 @@ class Homodyne(Generaldyne): def __init__( self, - quadrature_angle: Union[float, List[float]], - result: Optional[Union[float, List[float]]] = None, - modes: Optional[List[int]] = None, - r: Optional[Union[float, List[float]]] = None, + quadrature_angle: float | list[float], + result: float | list[float] | None = None, + modes: list[int] | None = None, + r: float | list[float] | None = None, ): self.r = r or settings.HOMODYNE_SQUEEZING self.quadrature_angle = math.atleast_1d(quadrature_angle, dtype="float64") @@ -371,7 +373,7 @@ def __init__( ) super().__init__(state=state, outcome=outcome, modes=modes) - def _measure_gaussian(self, other) -> Union[State, float]: + def _measure_gaussian(self, other) -> State | float: # rotate modes to be measured to the Homodyne basis other >>= Rgate(-self.quadrature_angle, modes=self.modes) self.state >>= Rgate(-self.quadrature_angle, modes=self.modes) @@ -390,7 +392,7 @@ def _measure_gaussian(self, other) -> Union[State, float]: return out - def _measure_fock(self, other) -> Union[State, float]: + def _measure_fock(self, other) -> State | float: if len(self.modes) > 1: raise NotImplementedError( "Multimode Homodyne sampling for Fock representation is not yet implemented." diff --git a/mrmustard/lab/gates.py b/mrmustard/lab/gates.py index c4712c852..9360bdb77 100644 --- a/mrmustard/lab/gates.py +++ b/mrmustard/lab/gates.py @@ -18,7 +18,9 @@ This module defines gates and operations that can be applied to quantum modes to construct a quantum circuit. """ -from typing import List, Optional, Sequence, Tuple, Union +from __future__ import annotations + +from typing import Sequence import numpy as np from mrmustard import settings @@ -79,13 +81,13 @@ class Dgate(Unitary): def __init__( self, - x: Union[float, List[float]] = 0.0, - y: Union[float, List[float]] = 0.0, + x: float | list[float] = 0.0, + y: float | list[float] = 0.0, x_trainable: bool = False, y_trainable: bool = False, - x_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - y_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[List[int]] = None, + x_bounds: tuple[float | None, float | None] = (None, None), + y_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): m = max(len(math.atleast_1d(x)), len(math.atleast_1d(y))) super().__init__( @@ -101,8 +103,8 @@ def d_vector(self): def U( self, - cutoffs: Optional[Sequence[int]] = None, - shape: Optional[Sequence[int]] = None, + cutoffs: Sequence[int] | None = None, + shape: Sequence[int] | None = None, ): r"""Returns the unitary representation of the Displacement gate using the Laguerre polynomials. @@ -183,13 +185,13 @@ class Sgate(Unitary): def __init__( self, - r: Union[float, list[float]] = 0.0, - phi: Union[float, list[float]] = 0.0, + r: float | list[float] = 0.0, + phi: float | list[float] = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[list[int]] = None, + r_bounds: tuple[float | None, float | None] = (0.0, None), + phi_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or list(range(len(math.atleast_1d(r)))), # type: ignore @@ -200,8 +202,8 @@ def __init__( def U( self, - cutoffs: Optional[Sequence[int]] = None, - shape: Optional[Sequence[int]] = None, + cutoffs: Sequence[int] | None = None, + shape: Sequence[int] | None = None, ): r"""Returns the unitary representation of the Squeezing gate. @@ -282,10 +284,10 @@ class Rgate(Unitary): def __init__( self, - angle: Union[float, list[float]] = 0.0, + angle: float | list[float] = 0.0, angle_trainable: bool = False, - angle_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[list[int]] = None, + angle_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or list(range(len(math.atleast_1d(angle)))), # type: ignore @@ -299,8 +301,8 @@ def X_matrix(self): def U( self, - cutoffs: Optional[Sequence[int]] = None, - shape: Optional[Sequence[int]] = None, + cutoffs: Sequence[int] | None = None, + shape: Sequence[int] | None = None, diag_only=False, ): r"""Returns the unitary representation of the Rotation gate. @@ -377,10 +379,10 @@ class Pgate(Unitary): def __init__( self, - shearing: Union[Optional[float], Optional[list[float]]] = 0.0, + shearing: float | list[float] | None = 0.0, shearing_trainable: bool = False, - shearing_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[list[int]] = None, + shearing_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or list(range(len(math.atleast_1d(shearing)))), @@ -414,10 +416,10 @@ class CXgate(Unitary): def __init__( self, - s: Optional[float] = 0.0, + s: float | None = 0.0, s_trainable: bool = False, - s_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[List[int]] = None, + s_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or [0, 1], @@ -449,10 +451,10 @@ class CZgate(Unitary): def __init__( self, - s: Optional[float] = 0.0, + s: float | None = 0.0, s_trainable: bool = False, - s_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[List[int]] = None, + s_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or [0, 1], @@ -491,9 +493,9 @@ def __init__( phi: float = 0.0, theta_trainable: bool = False, phi_trainable: bool = False, - theta_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[list[int]] = None, + theta_bounds: tuple[float | None, float | None] = (None, None), + phi_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or [0, 1], # type: ignore @@ -504,8 +506,8 @@ def __init__( def U( self, - cutoffs: Optional[List[int]] = None, - shape: Optional[Sequence[int]] = None, + cutoffs: list[int] | None = None, + shape: Sequence[int] | None = None, method=None, ): r"""Returns the unitary representation of the beam splitter. @@ -587,10 +589,10 @@ def __init__( phi_b: float = 0.0, phi_a_trainable: bool = False, phi_b_trainable: bool = False, - phi_a_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - phi_b_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + phi_a_bounds: tuple[float | None, float | None] = (None, None), + phi_b_bounds: tuple[float | None, float | None] = (None, None), internal: bool = False, - modes: Optional[List[int]] = None, + modes: list[int] | None = None, ): super().__init__( modes=modes or [0, 1], @@ -636,9 +638,9 @@ def __init__( phi: float = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[List[int]] = None, + r_bounds: tuple[float | None, float | None] = (0.0, None), + phi_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or [0, 1], @@ -675,9 +677,9 @@ class Interferometer(Unitary): def __init__( self, num_modes: int, - unitary: Optional[ComplexMatrix] = None, + unitary: ComplexMatrix | None = None, unitary_trainable: bool = False, - modes: Optional[list[int]] = None, + modes: list[int] | None = None, ): if modes is not None and num_modes != len(modes): raise ValueError(f"Invalid number of modes: got {len(modes)}, should be {num_modes}") @@ -729,9 +731,9 @@ class RealInterferometer(Unitary): def __init__( self, num_modes: int, - orthogonal: Optional[RealMatrix] = None, + orthogonal: RealMatrix | None = None, orthogonal_trainable: bool = False, - modes: Optional[List[int]] = None, + modes: list[int] | None = None, ): if modes is not None and (num_modes != len(modes)): raise ValueError(f"Invalid number of modes: got {len(modes)}, should be {num_modes}") @@ -792,9 +794,9 @@ class Ggate(Unitary): def __init__( self, num_modes: int, - symplectic: Optional[RealMatrix] = None, + symplectic: RealMatrix | None = None, symplectic_trainable: bool = False, - modes: Optional[list[int]] = None, + modes: list[int] | None = None, ): if modes is not None and (num_modes != len(modes)): raise ValueError(f"Invalid number of modes: got {len(modes)}, should be {num_modes}") @@ -873,13 +875,13 @@ class Attenuator(Channel): def __init__( self, - transmissivity: Union[Optional[float], Optional[List[float]]] = 1.0, + transmissivity: float | list[float] | None = 1.0, nbar: float = 0.0, transmissivity_trainable: bool = False, nbar_trainable: bool = False, - transmissivity_bounds: Tuple[Optional[float], Optional[float]] = (0.0, 1.0), - nbar_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), - modes: Optional[List[int]] = None, + transmissivity_bounds: tuple[float | None, float | None] = (0.0, 1.0), + nbar_bounds: tuple[float | None, float | None] = (0.0, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or list(range(len(math.atleast_1d(transmissivity)))), @@ -936,13 +938,13 @@ class Amplifier(Channel): def __init__( self, - gain: Union[Optional[float], Optional[List[float]]] = 1.0, + gain: float | list[float] | None = 1.0, nbar: float = 0.0, gain_trainable: bool = False, nbar_trainable: bool = False, - gain_bounds: Tuple[Optional[float], Optional[float]] = (1.0, None), - nbar_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), - modes: Optional[list[int]] = None, + gain_bounds: tuple[float | None, float | None] = (1.0, None), + nbar_bounds: tuple[float | None, float | None] = (0.0, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or list(range(len(math.atleast_1d(gain)))), @@ -993,10 +995,10 @@ class AdditiveNoise(Channel): def __init__( self, - noise: Union[float, list[float]] = 0.0, + noise: float | list[float] = 0.0, noise_trainable: bool = False, - noise_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), - modes: Optional[list[int]] = None, + noise_bounds: tuple[float | None, float | None] = (0.0, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or list(range(len(math.atleast_1d(noise)))), @@ -1023,10 +1025,10 @@ class PhaseNoise(Channel): def __init__( self, - phase_stdev: Union[Optional[float], Optional[List[float]]] = 0.0, + phase_stdev: float | list[float] | None = 0.0, phase_stdev_trainable: bool = False, - phase_stdev_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), - modes: Optional[List[int]] = None, + phase_stdev_bounds: tuple[float | None, float | None] = (0.0, None), + modes: list[int] | None = None, ): super().__init__( modes=modes or [0], diff --git a/mrmustard/lab/states.py b/mrmustard/lab/states.py index 43f29b0f6..60dfa493e 100644 --- a/mrmustard/lab/states.py +++ b/mrmustard/lab/states.py @@ -16,7 +16,9 @@ This module implements the quantum states upon which a quantum circuits acts on. """ -from typing import List, Optional, Sequence, Tuple, Union +from __future__ import annotations + +from typing import Sequence from mrmustard import math, settings from mrmustard.math.parameter_set import ParameterSet @@ -85,14 +87,14 @@ class Coherent(State): def __init__( self, - x: Union[Optional[float], Optional[List[float]]] = 0.0, - y: Union[Optional[float], Optional[List[float]]] = 0.0, + x: float | list[float] | None = 0.0, + y: float | list[float] | None = 0.0, x_trainable: bool = False, y_trainable: bool = False, - x_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - y_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[Sequence[int]] = None, - cutoffs: Optional[Sequence[int]] = None, + x_bounds: tuple[float | None, float | None] = (None, None), + y_bounds: tuple[float | None, float | None] = (None, None), + modes: Sequence[int] | None = None, + cutoffs: Sequence[int] | None = None, normalize: bool = False, ): self._normalize = normalize @@ -146,14 +148,14 @@ class SqueezedVacuum(State): def __init__( self, - r: Union[Scalar, Vector] = 0.0, - phi: Union[Scalar, Vector] = 0.0, + r: Scalar | Vector = 0.0, + phi: Scalar | Vector = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (0, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[Sequence[int]] = None, - cutoffs: Optional[Sequence[int]] = None, + r_bounds: tuple[float | None, float | None] = (0, None), + phi_bounds: tuple[float | None, float | None] = (None, None), + modes: Sequence[int] | None = None, + cutoffs: Sequence[int] | None = None, normalize: bool = False, ): self._modes = modes @@ -198,14 +200,14 @@ class TMSV(State): def __init__( self, - r: Union[Scalar, Vector] = 0.0, - phi: Union[Scalar, Vector] = 0.0, + r: Scalar | Vector = 0.0, + phi: Scalar | Vector = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (0, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[Sequence[int]] = (0, 1), - cutoffs: Optional[Sequence[int]] = None, + r_bounds: tuple[float | None, float | None] = (0, None), + phi_bounds: tuple[float | None, float | None] = (None, None), + modes: Sequence[int] | None = (0, 1), + cutoffs: Sequence[int] | None = None, normalize: bool = False, ): self._normalize = normalize @@ -251,11 +253,11 @@ class Thermal(State): def __init__( self, - nbar: Union[Scalar, Vector] = 0.0, + nbar: Scalar | Vector = 0.0, nbar_trainable: bool = False, - nbar_bounds: Tuple[Optional[float], Optional[float]] = (0, None), - modes: Optional[Sequence[int]] = None, - cutoffs: Optional[Sequence[int]] = None, + nbar_bounds: tuple[float | None, float | None] = (0, None), + modes: Sequence[int] | None = None, + cutoffs: Sequence[int] | None = None, normalize: bool = False, ): self._modes = modes @@ -317,20 +319,20 @@ class DisplacedSqueezed(State): def __init__( self, - r: Union[Scalar, Vector] = 0.0, - phi: Union[Scalar, Vector] = 0.0, - x: Union[Scalar, Vector] = 0.0, - y: Union[Scalar, Vector] = 0.0, + r: Scalar | Vector = 0.0, + phi: Scalar | Vector = 0.0, + x: Scalar | Vector = 0.0, + y: Scalar | Vector = 0.0, r_trainable: bool = False, phi_trainable: bool = False, x_trainable: bool = False, y_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (0, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - x_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - y_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: Optional[Sequence[int]] = None, - cutoffs: Optional[Sequence[int]] = None, + r_bounds: tuple[float | None, float | None] = (0, None), + phi_bounds: tuple[float | None, float | None] = (None, None), + x_bounds: tuple[float | None, float | None] = (None, None), + y_bounds: tuple[float | None, float | None] = (None, None), + modes: Sequence[int] | None = None, + cutoffs: Sequence[int] | None = None, normalize: bool = False, ): self._modes = modes @@ -392,9 +394,9 @@ def __init__( eigenvalues: Vector = None, symplectic_trainable: bool = False, eigenvalues_trainable: bool = False, - eigenvalues_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - modes: List[int] = None, - cutoffs: Optional[Sequence[int]] = None, + eigenvalues_bounds: tuple[float | None, float | None] = (None, None), + modes: list[int] = None, + cutoffs: Sequence[int] | None = None, normalize: bool = False, ): if symplectic is None: @@ -447,8 +449,8 @@ class Fock(State): def __init__( self, n: Sequence[int], - modes: Sequence[int] = None, - cutoffs: Sequence[int] = None, + modes: Sequence[int] | None = None, + cutoffs: Sequence[int] | None = None, normalize: bool = False, ): super().__init__(ket=fock.fock_state(n), cutoffs=cutoffs) diff --git a/mrmustard/lab/utils.py b/mrmustard/lab/utils.py index 9a7eb3f53..cf7f242dc 100644 --- a/mrmustard/lab/utils.py +++ b/mrmustard/lab/utils.py @@ -17,7 +17,10 @@ """ This module contains the utility functions used by the classes in ``mrmustard.lab``. """ -from typing import Callable, Optional, Tuple + +from __future__ import annotations + +from typing import Callable from mrmustard.math.parameters import update_euclidean, Constant, Variable @@ -26,7 +29,7 @@ def make_parameter( is_trainable: bool, value: any, name: str, - bounds: Tuple[Optional[float], Optional[float]], + bounds: tuple[float | None, float | None], update_fn: Callable = update_euclidean, ): r""" diff --git a/mrmustard/lab_dev/circuit_components.py b/mrmustard/lab_dev/circuit_components.py index 81ba571d1..4c53122c3 100644 --- a/mrmustard/lab_dev/circuit_components.py +++ b/mrmustard/lab_dev/circuit_components.py @@ -21,7 +21,7 @@ from inspect import signature from pydoc import locate -from typing import Optional, Sequence, Union, Any +from typing import Sequence import numbers from functools import cached_property @@ -66,9 +66,9 @@ class CircuitComponent: def __init__( self, - representation: Optional[Bargmann | Fock] = None, + representation: Bargmann | Fock | None = None, wires: Wires | Sequence[tuple[int]] | None = None, - name: Optional[str] = None, + name: str | None = None, ) -> None: self._name = name self._parameter_set = ParameterSet() @@ -107,7 +107,7 @@ def __init__( if self._representation: self._representation = self._representation.reorder(tuple(perm)) - def _serialize(self) -> tuple[dict[str, Any], dict[str, ArrayLike]]: + def _serialize(self) -> tuple[dict[str, any], dict[str, ArrayLike]]: """ Inner serialization to be used by Circuit.serialize(). @@ -185,7 +185,7 @@ def dual(self) -> CircuitComponent: return ret @cached_property - def manual_shape(self) -> list[Optional[int]]: + def manual_shape(self) -> list[int | None]: r""" The shape of this Component in the Fock representation. If not manually set, it is a list of M ``None``s where M is the number of wires of the component. @@ -257,7 +257,7 @@ def from_bargmann( modes_in_bra: Sequence[int] = (), modes_out_ket: Sequence[int] = (), modes_in_ket: Sequence[int] = (), - name: Optional[str] = None, + name: str | None = None, ) -> CircuitComponent: r""" Initializes a ``CircuitComponent`` object from its Bargmann (A,b,c) parametrization. @@ -286,7 +286,7 @@ def from_quadrature( modes_in_ket: Sequence[int], triple: tuple, phi: float = 0.0, - name: Optional[str] = None, + name: str | None = None, ) -> CircuitComponent: r""" Returns a circuit component from the given triple (A,b,c) that parametrizes the @@ -321,7 +321,7 @@ def _from_attributes( cls, representation: Representation, wires: Wires, - name: Optional[str] = None, + name: str | None = None, ) -> CircuitComponent: r""" Initializes a circuit component from a ``Representation``, a set of ``Wires``, a name. @@ -395,7 +395,7 @@ def bargmann_triple( except AttributeError as e: raise AttributeError("No Bargmann data for this component.") from e - def fock(self, shape: Optional[int | Sequence[int]] = None, batched=False) -> ComplexTensor: + def fock(self, shape: int | Sequence[int] | None = None, batched=False) -> ComplexTensor: r""" Returns an array representation of this component in the Fock basis with the given shape. If the shape is not given, it defaults to the ``auto_shape`` of the component if it is @@ -526,7 +526,7 @@ def to_fock(self, shape: int | Sequence[int] | None = None) -> CircuitComponent: ret = self._from_attributes(fock, self.wires, self.name) return ret - def _add_parameter(self, parameter: Union[Constant, Variable]): + def _add_parameter(self, parameter: Constant | Variable): r""" Adds a parameter to this circuit component and makes it accessible as an attribute. @@ -558,7 +558,7 @@ def _getitem_builtin(self, modes: set[int]): kwargs = self.parameter_set[items].to_dict() return self.__class__(modes=modes, **kwargs) - def _light_copy(self, wires: Optional[Wires] = None) -> CircuitComponent: + def _light_copy(self, wires: Wires | None = None) -> CircuitComponent: r""" Creates a "light" copy of this component by referencing its __dict__, except for the wires, which are a new object or the given one. diff --git a/mrmustard/lab_dev/circuit_components_utils/b_to_q.py b/mrmustard/lab_dev/circuit_components_utils/b_to_q.py index bbb6c58b9..ff4ebfee1 100644 --- a/mrmustard/lab_dev/circuit_components_utils/b_to_q.py +++ b/mrmustard/lab_dev/circuit_components_utils/b_to_q.py @@ -42,7 +42,7 @@ class BtoQ(Operation): def __init__( self, modes: Sequence[int], - phi: float, + phi: float = 0.0, ): repr = Bargmann.from_function( fn=triples.bargmann_to_quadrature_Abc, n_modes=len(modes), phi=phi diff --git a/mrmustard/lab_dev/circuits.py b/mrmustard/lab_dev/circuits.py index f7da1c8a6..c0778cac6 100644 --- a/mrmustard/lab_dev/circuits.py +++ b/mrmustard/lab_dev/circuits.py @@ -22,8 +22,7 @@ from collections import defaultdict from pydoc import locate -from typing import Optional, Sequence, Union - +from typing import Sequence from mrmustard import math, settings from mrmustard.utils.serialize import save from mrmustard.lab_dev.circuit_components import CircuitComponent @@ -74,7 +73,7 @@ class Circuit: components: A list of circuit components. """ - def __init__(self, components: Optional[Sequence[CircuitComponent]] = None) -> None: + def __init__(self, components: Sequence[CircuitComponent] | None = None) -> None: self._components = [c._light_copy() for c in components] if components else [] self._path = [] @@ -481,7 +480,7 @@ def __iter__(self): """ return iter(self.components) - def __rshift__(self, other: Union[CircuitComponent, Circuit]) -> Circuit: + def __rshift__(self, other: CircuitComponent | Circuit) -> Circuit: r""" Returns a ``Circuit`` that contains all the components of ``self`` as well as ``other`` if ``other`` is a ``CircuitComponent``, or ``other.components`` if diff --git a/mrmustard/lab_dev/states/base.py b/mrmustard/lab_dev/states/base.py index 20a906296..dac82730c 100644 --- a/mrmustard/lab_dev/states/base.py +++ b/mrmustard/lab_dev/states/base.py @@ -25,7 +25,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Union +from typing import Sequence from enum import Enum import warnings @@ -131,7 +131,7 @@ def from_bargmann( cls, modes: Sequence[int], triple: tuple[ComplexMatrix, ComplexVector, complex], - name: Optional[str] = None, + name: str | None = None, ) -> State: r""" Initializes a state of type ``cls`` from an ``(A, b, c)`` triple @@ -170,7 +170,7 @@ def from_fock( cls, modes: Sequence[int], array: ComplexTensor, - name: Optional[str] = None, + name: str | None = None, batched: bool = False, ) -> State: r""" @@ -212,8 +212,8 @@ def from_phase_space( modes: Sequence[int], cov: ComplexMatrix, means: ComplexMatrix, - name: Optional[str] = None, - atol_purity: Optional[float] = 1e-5, + name: str | None = None, + atol_purity: float | None = 1e-5, ) -> Ket | DM: # pylint: disable=abstract-method r""" Initializes a state from the covariance matrix and the vector of means of a state in @@ -249,7 +249,7 @@ def from_quadrature( modes: Sequence[int], triple: tuple[ComplexMatrix, ComplexVector, complex], phi: float = 0.0, - name: Optional[str] = None, + name: str | None = None, ) -> State: r""" Initializes a state from a triple (A,b,c) that parametrizes the wavefunction @@ -337,7 +337,7 @@ def visualize_2d( resolution: int = 200, colorscale: str = "RdBu", return_fig: bool = False, - ) -> Union[go.Figure, None]: + ) -> go.Figure | None: r""" 2D visualization of the Wigner function of this state. @@ -461,7 +461,7 @@ def visualize_3d( resolution: int = 200, colorscale: str = "RdBu", return_fig: bool = False, - ) -> Union[go.Figure, None]: + ) -> go.Figure | None: r""" 3D visualization of the Wigner function of this state on a surface plot. @@ -537,9 +537,9 @@ def visualize_3d( def visualize_dm( self, - cutoff: Optional[int] = None, + cutoff: int | None = None, return_fig: bool = False, - ) -> Union[go.Figure, None]: + ) -> go.Figure | None: r""" Plots the absolute value :math:`abs(\rho)` of the density matrix :math:`\rho` of this state on a heatmap. @@ -596,8 +596,8 @@ class DM(State): def __init__( self, modes: Sequence[int] = (), - representation: Optional[Bargmann | Fock] = None, - name: Optional[str] = None, + representation: Bargmann | Fock | None = None, + name: str | None = None, ): if representation and representation.ansatz.num_vars != 2 * len(modes): raise ValueError( @@ -654,7 +654,7 @@ def from_phase_space( cls, modes: Sequence[int], triple: tuple, - name: Optional[str] = None, + name: str | None = None, s: float = 0, # pylint: disable=unused-argument ) -> DM: r""" @@ -774,7 +774,7 @@ def __rshift__(self, other: CircuitComponent) -> CircuitComponent: return DM(w.modes, result.representation) return result - def __getitem__(self, modes: Union[int, Sequence[int]]) -> State: + def __getitem__(self, modes: int | Sequence[int]) -> State: r""" Traces out all the modes except those given. The result is returned with modes in increasing order. @@ -869,8 +869,8 @@ class Ket(State): def __init__( self, modes: Sequence[int] = (), - representation: Optional[Bargmann | Fock] = None, - name: Optional[str] = None, + representation: Bargmann | Fock | None = None, + name: str | None = None, ): if representation and representation.ansatz.num_vars != len(modes): raise ValueError( @@ -926,8 +926,8 @@ def from_phase_space( cls, modes: Sequence[int], triple: tuple, - name: Optional[str] = None, - atol_purity: Optional[float] = 1e-5, + name: str | None = None, + atol_purity: float | None = 1e-5, ) -> Ket: cov, means, coeff = triple cov = math.astensor(cov) @@ -1021,7 +1021,7 @@ def expectation(self, operator: CircuitComponent): return result - def __getitem__(self, modes: Union[int, Sequence[int]]) -> State: + def __getitem__(self, modes: int | Sequence[int]) -> State: r""" Reduced density matrix obtained by tracing out all the modes except those in the given ``modes``. Note that the result is returned with modes in increasing order. diff --git a/mrmustard/lab_dev/states/coherent.py b/mrmustard/lab_dev/states/coherent.py index 9f6f668dd..2b64ee045 100644 --- a/mrmustard/lab_dev/states/coherent.py +++ b/mrmustard/lab_dev/states/coherent.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from mrmustard.physics.representations import Bargmann from mrmustard.physics import triples @@ -70,12 +70,12 @@ class Coherent(Ket): def __init__( self, modes: Sequence[int], - x: Union[float, Sequence[float]] = 0.0, - y: Union[float, Sequence[float]] = 0.0, + x: float | Sequence[float] = 0.0, + y: float | Sequence[float] = 0.0, x_trainable: bool = False, y_trainable: bool = False, - x_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - y_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + x_bounds: tuple[float | None, float | None] = (None, None), + y_bounds: tuple[float | None, float | None] = (None, None), ): super().__init__(modes=modes, name="Coherent") xs, ys = list(reshape_params(len(modes), x=x, y=y)) diff --git a/mrmustard/lab_dev/states/displaced_squeezed.py b/mrmustard/lab_dev/states/displaced_squeezed.py index 6a7ccf666..83bb74808 100644 --- a/mrmustard/lab_dev/states/displaced_squeezed.py +++ b/mrmustard/lab_dev/states/displaced_squeezed.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from mrmustard.physics.representations import Bargmann from mrmustard.physics import triples @@ -63,18 +63,18 @@ class DisplacedSqueezed(Ket): def __init__( self, modes: Sequence[int], - x: Union[float, Sequence[float]] = 0.0, - y: Union[float, Sequence[float]] = 0.0, - r: Union[float, Sequence[float]] = 0.0, - phi: Union[float, Sequence[float]] = 0.0, + x: float | Sequence[float] = 0.0, + y: float | Sequence[float] = 0.0, + r: float | Sequence[float] = 0.0, + phi: float | Sequence[float] = 0.0, x_trainable: bool = False, y_trainable: bool = False, r_trainable: bool = False, phi_trainable: bool = False, - x_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - y_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - r_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + x_bounds: tuple[float | None, float | None] = (None, None), + y_bounds: tuple[float | None, float | None] = (None, None), + r_bounds: tuple[float | None, float | None] = (None, None), + phi_bounds: tuple[float | None, float | None] = (None, None), ): super().__init__(modes=modes, name="DisplacedSqueezed") params = reshape_params(len(modes), x=x, y=y, r=r, phi=phi) diff --git a/mrmustard/lab_dev/states/quadrature_eigenstate.py b/mrmustard/lab_dev/states/quadrature_eigenstate.py index 17ba16434..f7f6b89d2 100644 --- a/mrmustard/lab_dev/states/quadrature_eigenstate.py +++ b/mrmustard/lab_dev/states/quadrature_eigenstate.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Sequence, Tuple +from typing import Sequence import numpy as np @@ -60,8 +60,8 @@ def __init__( phi: float | Sequence[float] = 0.0, x_trainable: bool = False, phi_trainable: bool = False, - x_bounds: Tuple[float | None, float | None] = (None, None), - phi_bounds: Tuple[float | None, float | None] = (None, None), + x_bounds: tuple[float | None, float | None] = (None, None), + phi_bounds: tuple[float | None, float | None] = (None, None), ): super().__init__(modes=modes, name="QuadratureEigenstate") xs, phis = list(reshape_params(len(modes), x=x, phi=phi)) diff --git a/mrmustard/lab_dev/states/squeezed_vacuum.py b/mrmustard/lab_dev/states/squeezed_vacuum.py index e8bc3e4c2..79ee503a0 100644 --- a/mrmustard/lab_dev/states/squeezed_vacuum.py +++ b/mrmustard/lab_dev/states/squeezed_vacuum.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from mrmustard.physics.representations import Bargmann from mrmustard.physics import triples @@ -57,12 +57,12 @@ class SqueezedVacuum(Ket): def __init__( self, modes: Sequence[int], - r: Union[float, Sequence[float]] = 0.0, - phi: Union[float, Sequence[float]] = 0.0, + r: float | Sequence[float] = 0.0, + phi: float | Sequence[float] = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + r_bounds: tuple[float | None, float | None] = (None, None), + phi_bounds: tuple[float | None, float | None] = (None, None), ): super().__init__(modes=modes, name="SqueezedVacuum") rs, phis = list(reshape_params(len(modes), r=r, phi=phi)) diff --git a/mrmustard/lab_dev/states/thermal.py b/mrmustard/lab_dev/states/thermal.py index fce981b9b..bce628435 100644 --- a/mrmustard/lab_dev/states/thermal.py +++ b/mrmustard/lab_dev/states/thermal.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from mrmustard.physics.representations import Bargmann from mrmustard.physics import triples @@ -54,9 +54,9 @@ class Thermal(DM): def __init__( self, modes: Sequence[int], - nbar: Union[int, Sequence[int]] = 0, + nbar: int | Sequence[int] = 0, nbar_trainable: bool = False, - nbar_bounds: Tuple[Optional[float], Optional[float]] = (0, None), + nbar_bounds: tuple[float | None, float | None] = (0, None), ) -> None: super().__init__(modes=modes, name="Thermal") (nbars,) = list(reshape_params(len(modes), nbar=nbar)) diff --git a/mrmustard/lab_dev/states/two_mode_squeezed_vacuum.py b/mrmustard/lab_dev/states/two_mode_squeezed_vacuum.py index b002aba13..e04f74686 100644 --- a/mrmustard/lab_dev/states/two_mode_squeezed_vacuum.py +++ b/mrmustard/lab_dev/states/two_mode_squeezed_vacuum.py @@ -18,8 +18,6 @@ from __future__ import annotations -from typing import Optional, Tuple - from mrmustard.physics.representations import Bargmann from mrmustard.physics import triples from .base import Ket @@ -54,13 +52,13 @@ class TwoModeSqueezedVacuum(Ket): def __init__( self, - modes: Tuple[int, int], + modes: tuple[int, int], r: float = 0.0, phi: float = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + r_bounds: tuple[float | None, float | None] = (None, None), + phi_bounds: tuple[float | None, float | None] = (None, None), ): super().__init__(modes=modes, name="TwoModeSqueezedVacuum") rs, phis = list(reshape_params(int(len(modes) / 2), r=r, phi=phi)) diff --git a/mrmustard/lab_dev/transformations/amplifier.py b/mrmustard/lab_dev/transformations/amplifier.py index bf3cf7a43..e1e7f8cf1 100644 --- a/mrmustard/lab_dev/transformations/amplifier.py +++ b/mrmustard/lab_dev/transformations/amplifier.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from .base import Channel from ...physics.representations import Bargmann @@ -79,9 +79,9 @@ class Amplifier(Channel): def __init__( self, modes: Sequence[int], - gain: Union[Optional[float], Optional[list[float]]] = 1.0, + gain: float | Sequence[float] | None = 1.0, gain_trainable: bool = False, - gain_bounds: Tuple[Optional[float], Optional[float]] = (1.0, None), + gain_bounds: tuple[float | None, float | None] = (1.0, None), ): super().__init__(modes_out=modes, modes_in=modes, name="Amp") (gs,) = list(reshape_params(len(modes), gain=gain)) diff --git a/mrmustard/lab_dev/transformations/attenuator.py b/mrmustard/lab_dev/transformations/attenuator.py index b776a0f80..72b88864e 100644 --- a/mrmustard/lab_dev/transformations/attenuator.py +++ b/mrmustard/lab_dev/transformations/attenuator.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from .base import Channel from ...physics.representations import Bargmann @@ -80,9 +80,9 @@ class Attenuator(Channel): def __init__( self, modes: Sequence[int], - transmissivity: Union[Optional[float], Optional[list[float]]] = 1.0, + transmissivity: float | Sequence[float] | None = 1.0, transmissivity_trainable: bool = False, - transmissivity_bounds: Tuple[Optional[float], Optional[float]] = (0.0, 1.0), + transmissivity_bounds: tuple[float | None, float | None] = (0.0, 1.0), ): super().__init__(modes_out=modes, modes_in=modes, name="Att") (etas,) = list(reshape_params(len(modes), transmissivity=transmissivity)) diff --git a/mrmustard/lab_dev/transformations/base.py b/mrmustard/lab_dev/transformations/base.py index 591a1f69a..68c0ba02f 100644 --- a/mrmustard/lab_dev/transformations/base.py +++ b/mrmustard/lab_dev/transformations/base.py @@ -25,7 +25,7 @@ # pylint: disable=import-outside-toplevel from __future__ import annotations -from typing import Optional, Sequence +from typing import Sequence from mrmustard import math, settings from mrmustard.physics.representations import Bargmann, Fock from mrmustard.physics.bargmann import au2Symplectic, symplectic2Au @@ -46,7 +46,7 @@ def from_quadrature( modes_in: Sequence[int], triple: tuple, phi: float = 0, - name: Optional[str] = None, + name: str | None = None, ) -> Operation: r""" Initialize an Operation from the given quadrature triple (A, b, c). @@ -67,7 +67,7 @@ def from_bargmann( modes_out: Sequence[int], modes_in: Sequence[int], triple: tuple, - name: Optional[str] = None, + name: str | None = None, ) -> Operation: r""" Initialize a Transformation from the given Bargmann triple (A,b,c) @@ -123,8 +123,8 @@ def __init__( self, modes_out: tuple[int, ...] = (), modes_in: tuple[int, ...] = (), - representation: Optional[Bargmann | Fock] = None, - name: Optional[str] = None, + representation: Bargmann | Fock | None = None, + name: str | None = None, ): super().__init__( representation=representation, @@ -228,8 +228,8 @@ def __init__( self, modes_out: tuple[int, ...] = (), modes_in: tuple[int, ...] = (), - representation: Optional[Bargmann | Fock] = None, - name: Optional[str] = None, + representation: Bargmann | Fock | None = None, + name: str | None = None, ): super().__init__( representation=representation, diff --git a/mrmustard/lab_dev/transformations/bsgate.py b/mrmustard/lab_dev/transformations/bsgate.py index db1d67ddd..d6fb8ea95 100644 --- a/mrmustard/lab_dev/transformations/bsgate.py +++ b/mrmustard/lab_dev/transformations/bsgate.py @@ -18,8 +18,6 @@ from __future__ import annotations -from typing import Optional, Tuple - from .base import Unitary from ...physics.representations import Bargmann from ...physics import triples @@ -90,13 +88,13 @@ class BSgate(Unitary): def __init__( self, - modes: Tuple[int, int], + modes: tuple[int, int], theta: float = 0.0, phi: float = 0.0, theta_trainable: bool = False, phi_trainable: bool = False, - theta_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + theta_bounds: tuple[float | None, float | None] = (None, None), + phi_bounds: tuple[float | None, float | None] = (None, None), ): if len(modes) != 2: raise ValueError(f"Expected a pair of modes, found {modes}.") diff --git a/mrmustard/lab_dev/transformations/dgate.py b/mrmustard/lab_dev/transformations/dgate.py index 08e00b51b..bf3de06d2 100644 --- a/mrmustard/lab_dev/transformations/dgate.py +++ b/mrmustard/lab_dev/transformations/dgate.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from .base import Unitary from ...physics.representations import Bargmann @@ -80,12 +80,12 @@ class Dgate(Unitary): def __init__( self, modes: Sequence[int] = None, - x: Union[float, Sequence[float]] = 0.0, - y: Union[float, Sequence[float]] = 0.0, + x: float | Sequence[float] = 0.0, + y: float | Sequence[float] = 0.0, x_trainable: bool = False, y_trainable: bool = False, - x_bounds: Tuple[Optional[float], Optional[float]] = (None, None), - y_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + x_bounds: tuple[float | None, float | None] = (None, None), + y_bounds: tuple[float | None, float | None] = (None, None), ) -> None: super().__init__(modes_out=modes, modes_in=modes, name="Dgate") xs, ys = list(reshape_params(len(modes), x=x, y=y)) diff --git a/mrmustard/lab_dev/transformations/fockdamping.py b/mrmustard/lab_dev/transformations/fockdamping.py index 19859d7df..8bcf98991 100644 --- a/mrmustard/lab_dev/transformations/fockdamping.py +++ b/mrmustard/lab_dev/transformations/fockdamping.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Sequence, Tuple +from typing import Sequence from .base import Operation from ...physics.representations import Bargmann @@ -46,7 +46,7 @@ class FockDamping(Operation): >>> assert operator.modes == [0] >>> assert np.allclose(operator.damping.value, [0.1, 0.1]) >>> assert output_state.L2_norm < 1 - + Args: modes: The modes this gate is applied to. damping: The damping parameter. @@ -55,7 +55,7 @@ class FockDamping(Operation): .. details:: - Its ``(A,b,c)`` triple is given by + Its ``(A,b,c)`` triple is given by .. math:: A &= e^{-\beta}\begin{bmatrix} @@ -72,7 +72,7 @@ def __init__( modes: Sequence[int], damping: float | Sequence[float] | None = 0.0, damping_trainable: bool = False, - damping_bounds: Tuple[float | None, float | None] = (0.0, None), + damping_bounds: tuple[float | None, float | None] = (0.0, None), ): super().__init__(modes_out=modes, modes_in=modes, name="FockDamping") (betas,) = list(reshape_params(len(modes), damping=damping)) diff --git a/mrmustard/lab_dev/transformations/rgate.py b/mrmustard/lab_dev/transformations/rgate.py index 173852bbe..405f225d4 100644 --- a/mrmustard/lab_dev/transformations/rgate.py +++ b/mrmustard/lab_dev/transformations/rgate.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from .base import Unitary from ...physics.representations import Bargmann @@ -55,9 +55,9 @@ class Rgate(Unitary): def __init__( self, modes: Sequence[int], - phi: Union[float, list[float]] = 0.0, + phi: float | Sequence[float] = 0.0, phi_trainable: bool = False, - phi_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), + phi_bounds: tuple[float | None, float | None] = (0.0, None), ): super().__init__(modes_out=modes, modes_in=modes, name="Rgate") (phis,) = list(reshape_params(len(modes), phi=phi)) diff --git a/mrmustard/lab_dev/transformations/s2gate.py b/mrmustard/lab_dev/transformations/s2gate.py index 66924e7e3..1d1f8f756 100644 --- a/mrmustard/lab_dev/transformations/s2gate.py +++ b/mrmustard/lab_dev/transformations/s2gate.py @@ -18,8 +18,6 @@ from __future__ import annotations -from typing import Optional, Tuple - from .base import Unitary from ...physics.representations import Bargmann from ...physics import triples @@ -73,13 +71,13 @@ class S2gate(Unitary): def __init__( self, - modes: Tuple[int, int], + modes: tuple[int, int], r: float = 0.0, phi: float = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (0, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + r_bounds: tuple[float | None, float | None] = (0, None), + phi_bounds: tuple[float | None, float | None] = (None, None), ): if len(modes) != 2: raise ValueError(f"Expected a pair of modes, found {modes}.") diff --git a/mrmustard/lab_dev/transformations/sgate.py b/mrmustard/lab_dev/transformations/sgate.py index 4a93d7a6f..6610a6863 100644 --- a/mrmustard/lab_dev/transformations/sgate.py +++ b/mrmustard/lab_dev/transformations/sgate.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Optional, Sequence, Tuple, Union +from typing import Sequence from .base import Unitary from ...physics.representations import Bargmann @@ -83,12 +83,12 @@ class Sgate(Unitary): def __init__( self, modes: Sequence[int], - r: Union[float, list[float]] = 0.0, - phi: Union[float, list[float]] = 0.0, + r: float | Sequence[float] = 0.0, + phi: float | Sequence[float] = 0.0, r_trainable: bool = False, phi_trainable: bool = False, - r_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None), - phi_bounds: Tuple[Optional[float], Optional[float]] = (None, None), + r_bounds: tuple[float | None, float | None] = (0.0, None), + phi_bounds: tuple[float | None, float | None] = (None, None), ): super().__init__(modes_out=modes, modes_in=modes, name="Sgate") rs, phis = list(reshape_params(len(modes), r=r, phi=phi)) diff --git a/mrmustard/lab_dev/utils.py b/mrmustard/lab_dev/utils.py index d660f3b4c..37a57e48b 100644 --- a/mrmustard/lab_dev/utils.py +++ b/mrmustard/lab_dev/utils.py @@ -18,7 +18,7 @@ This module contains the utility functions used by the classes in ``mrmustard.lab``. """ -from typing import Callable, Generator, Optional, Tuple +from typing import Callable, Generator from mrmustard import math from mrmustard.math.parameters import update_euclidean, Constant, Variable @@ -28,7 +28,7 @@ def make_parameter( is_trainable: bool, value: any, name: str, - bounds: Tuple[Optional[float], Optional[float]], + bounds: tuple[float | None, float | None], update_fn: Callable = update_euclidean, dtype: any = None, ): diff --git a/mrmustard/math/backend_manager.py b/mrmustard/math/backend_manager.py index e79b5ce3a..74d151428 100644 --- a/mrmustard/math/backend_manager.py +++ b/mrmustard/math/backend_manager.py @@ -14,12 +14,13 @@ """This module contains the backend manager.""" +from __future__ import annotations import importlib.util import sys from functools import lru_cache from itertools import product -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple +from typing import Callable, Sequence import numpy as np from scipy.special import binom @@ -88,7 +89,7 @@ class BackendManager: # pylint: disable=too-many-public-methods, fixme _backend = BackendNumpy() # the configured Euclidean optimizer. - _euclidean_opt: Optional[type] = None + _euclidean_opt: type | None = None # whether or not the backend can be changed _is_immutable = False @@ -97,7 +98,7 @@ def __init__(self) -> None: # binding types and decorators of numpy backend self._bind() - def _apply(self, fn: str, args: Optional[Sequence[Any]] = ()) -> Any: + def _apply(self, fn: str, args: Sequence[any] | None = ()) -> any: r""" Applies a function ``fn`` from the backend in use to the given ``args``. """ @@ -230,7 +231,7 @@ def any(self, array: Tensor) -> bool: """ return self._apply("any", (array,)) - def arange(self, start: int, limit: int = None, delta: int = 1, dtype: Any = None) -> Tensor: + def arange(self, start: int, limit: int = None, delta: int = 1, dtype: any = None) -> Tensor: r"""Returns an array of evenly spaced values within a given interval. Args: @@ -347,7 +348,7 @@ def boolean_mask(self, tensor: Tensor, mask: Tensor) -> Tensor: """ return self._apply("boolean_mask", (tensor, mask)) - def block(self, blocks: List[List[Tensor]], axes=(-2, -1)) -> Tensor: + def block(self, blocks: list[list[Tensor]], axes=(-2, -1)) -> Tensor: r"""Returns a matrix made from the given blocks. Args: @@ -408,9 +409,7 @@ def conj(self, array: Tensor) -> Tensor: """ return self._apply("conj", (array,)) - def constraint_func( - self, bounds: Tuple[Optional[float], Optional[float]] - ) -> Optional[Callable]: + def constraint_func(self, bounds: tuple[float | None, float | None]) -> Callable | None: r"""Returns a constraint function for the given bounds. A constraint function will clip the value to the interval given by the bounds. @@ -432,7 +431,7 @@ def convolution( self, array: Tensor, filters: Tensor, - padding: Optional[str] = None, + padding: str | None = None, data_format="NWC", ) -> Tensor: # TODO: remove strides and data_format? r"""Performs a convolution on array with filters. @@ -598,7 +597,7 @@ def eye_like(self, array: Tensor) -> Tensor: """ return self._apply("eye_like", (array,)) - def from_backend(self, value: Any) -> bool: + def from_backend(self, value: any) -> bool: r"""Whether the given tensor is a tensor of the concrete backend. Args: @@ -609,7 +608,7 @@ def from_backend(self, value: Any) -> bool: """ return self._apply("from_backend", (value,)) - def gather(self, array: Tensor, indices: Batch[int], axis: Optional[int] = None) -> Tensor: + def gather(self, array: Tensor, indices: Batch[int], axis: int | None = None) -> Tensor: r"""The values of the array at the given indices. Args: @@ -630,7 +629,7 @@ def gather(self, array: Tensor, indices: Batch[int], axis: Optional[int] = None) ) def hermite_renormalized_batch( - self, A: Tensor, B: Tensor, C: Tensor, shape: Tuple[int] + self, A: Tensor, B: Tensor, C: Tensor, shape: tuple[int] ) -> Tensor: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)` @@ -650,7 +649,7 @@ def hermite_renormalized_batch( return self._apply("hermite_renormalized_batch", (A, B, C, shape)) def hermite_renormalized_diagonal( - self, A: Tensor, B: Tensor, C: Tensor, cutoffs: Tuple[int] + self, A: Tensor, B: Tensor, C: Tensor, cutoffs: tuple[int] ) -> Tensor: r"""Firsts, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock~ Then, calculates the required renormalized multidimensional Hermite polynomial. @@ -658,7 +657,7 @@ def hermite_renormalized_diagonal( return self._apply("hermite_renormalized_diagonal", (A, B, C, cutoffs)) def hermite_renormalized_diagonal_batch( - self, A: Tensor, B: Tensor, C: Tensor, cutoffs: Tuple[int] + self, A: Tensor, B: Tensor, C: Tensor, cutoffs: tuple[int] ) -> Tensor: r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock~ Then, calculates the required renormalized multidimensional Hermite polynomial. @@ -666,7 +665,7 @@ def hermite_renormalized_diagonal_batch( return self._apply("hermite_renormalized_diagonal_batch", (A, B, C, cutoffs)) def hermite_renormalized_1leftoverMode( - self, A: Tensor, B: Tensor, C: Tensor, cutoffs: Tuple[int] + self, A: Tensor, B: Tensor, C: Tensor, cutoffs: tuple[int] ) -> Tensor: r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock~ Then, calculate the required renormalized multidimensional Hermite polynomial. @@ -825,7 +824,7 @@ def moveaxis(self, array: Tensor, old: Tensor, new: Tensor) -> Tensor: def new_variable( self, value: Tensor, - bounds: Tuple[Optional[float], Optional[float]], + bounds: tuple[float | None, float | None], name: str, dtype=None, ) -> Tensor: @@ -905,7 +904,7 @@ def outer(self, array1: Tensor, array2: Tensor) -> Tensor: def pad( self, array: Tensor, - paddings: Sequence[Tuple[int, int]], + paddings: Sequence[tuple[int, int]], mode="CONSTANT", constant_values=0, ) -> Tensor: @@ -1181,8 +1180,8 @@ def update_add_tensor(self, tensor: Tensor, indices: Tensor, values: Tensor) -> return self._apply("update_add_tensor", (tensor, indices, values)) def value_and_gradients( - self, cost_fn: Callable, parameters: Dict[str, List[Trainable]] - ) -> Tuple[Tensor, Dict[str, List[Tensor]]]: + self, cost_fn: Callable, parameters: dict[str, list[Trainable]] + ) -> tuple[Tensor, dict[str, list[Tensor]]]: r"""The loss and gradients of the given cost function. Args: @@ -1239,7 +1238,7 @@ def map_fn(self, fn: Callable, elements: Tensor) -> Tensor: """ return self._apply("map_fn", (fn, elements)) - def squeeze(self, tensor: Tensor, axis: Optional[List[int]]) -> Tensor: + def squeeze(self, tensor: Tensor, axis: list[int] | None) -> Tensor: """Removes dimensions of size 1 from the shape of a tensor. Args: @@ -1446,7 +1445,7 @@ def J(num_modes: int): return np.block([[O, I], [-I, O]]) def add_at_modes( - self, old: Tensor, new: Optional[Tensor], modes: Sequence[int] + self, old: Tensor, new: Tensor | None, modes: Sequence[int] ) -> Tensor: # NOTE: To be deprecated (XPTensor) """Adds two phase-space tensors (cov matrices, displacement vectors, etc..) on the specified modes.""" if new is None: @@ -1506,7 +1505,7 @@ def right_matmul_at_modes( ) def matvec_at_modes( - self, mat: Optional[Tensor], vec: Tensor, modes: Sequence[int] + self, mat: Tensor | None, vec: Tensor, modes: Sequence[int] ) -> Tensor: # NOTE: To be deprecated (XPTensor) """Matrix-vector multiplication between a phase-space matrix and a vector in the specified modes.""" if mat is None: @@ -1542,7 +1541,7 @@ def binomial_conditional_prob(self, success_prob: Tensor, dim_out: int, dim_in: * self.pow(1.0 - success_prob, self.maximum(in_ - out_, 0.0)) ) - def convolve_probs_1d(self, prob: Tensor, other_probs: List[Tensor]) -> Tensor: + def convolve_probs_1d(self, prob: Tensor, other_probs: list[Tensor]) -> Tensor: """Convolution of a joint probability with a list of single-index probabilities.""" if prob.ndim > 3 or len(other_probs) > 3: diff --git a/mrmustard/math/backend_numpy.py b/mrmustard/math/backend_numpy.py index c56f3e0b4..ede9b3e6e 100644 --- a/mrmustard/math/backend_numpy.py +++ b/mrmustard/math/backend_numpy.py @@ -16,8 +16,10 @@ # pylint: disable = missing-function-docstring, missing-class-docstring, fixme +from __future__ import annotations + from math import lgamma as mlgamma -from typing import List, Optional, Sequence, Tuple, Union +from typing import Sequence import numpy as np import scipy as sp @@ -69,7 +71,7 @@ def any(self, array: np.ndarray) -> np.ndarray: return np.any(array) def arange( - self, start: int, limit: Optional[int] = None, delta: int = 1, dtype=np.float64 + self, start: int, limit: int | None = None, delta: int = 1, dtype=np.float64 ) -> np.ndarray: return np.arange(start, limit, delta, dtype=dtype) @@ -82,7 +84,7 @@ def assign(self, tensor: np.ndarray, value: np.ndarray) -> np.ndarray: tensor = value return tensor - def astensor(self, array: Union[np.ndarray, np.ndarray], dtype=None) -> np.ndarray: + def astensor(self, array: np.ndarray, dtype=None) -> np.ndarray: array = np.array(array) return self.cast(array, dtype=dtype or array.dtype) @@ -98,11 +100,11 @@ def atleast_3d(self, array: np.ndarray, dtype=None) -> np.ndarray: array = array[None, ...] return array - def block(self, blocks: List[List[np.ndarray]], axes=(-2, -1)) -> np.ndarray: + def block(self, blocks: list[list[np.ndarray]], axes=(-2, -1)) -> np.ndarray: rows = [self.concat(row, axis=axes[1]) for row in blocks] return self.concat(rows, axis=axes[0]) - def block_diag(self, *blocks: List[np.ndarray]) -> np.ndarray: + def block_diag(self, *blocks: list[np.ndarray]) -> np.ndarray: return sp.linalg.block_diag(*blocks) def boolean_mask(self, tensor: np.ndarray, mask: np.ndarray) -> np.ndarray: @@ -118,7 +120,7 @@ def cast(self, array: np.ndarray, dtype=None) -> np.ndarray: def clip(self, array, a_min, a_max) -> np.ndarray: return np.clip(array, a_min, a_max) - def concat(self, values: List[np.ndarray], axis: int) -> np.ndarray: + def concat(self, values: list[np.ndarray], axis: int) -> np.ndarray: # tf.concat can concatenate lists of scalars, while np.concatenate errors try: return np.concatenate(values, axis) @@ -180,7 +182,7 @@ def set_diag(self, array: np.ndarray, diag: np.ndarray, k: int) -> np.ndarray: return array - def einsum(self, string: str, *tensors) -> Optional[np.ndarray]: + def einsum(self, string: str, *tensors) -> np.ndarray | None: if type(string) is str: return np.einsum(string, *tensors) return None # provide same functionality as numpy.einsum or upgrade to opt_einsum @@ -244,14 +246,14 @@ def minimum(self, a: np.ndarray, b: np.ndarray) -> np.ndarray: return np.minimum(a, b) def moveaxis( - self, array: np.ndarray, old: Union[int, Sequence[int]], new: Union[int, Sequence[int]] + self, array: np.ndarray, old: int | Sequence[int], new: int | Sequence[int] ) -> np.ndarray: return np.moveaxis(array, old, new) def new_variable( self, value, - bounds: Union[Tuple[Optional[float], Optional[float]], None], + bounds: tuple[float | None, float | None] | None, name: str, dtype=np.float64, ): # pylint: disable=unused-argument @@ -276,7 +278,7 @@ def outer(self, array1: np.ndarray, array2: np.ndarray) -> np.ndarray: def pad( self, array: np.ndarray, - paddings: Sequence[Tuple[int, int]], + paddings: Sequence[tuple[int, int]], mode="CONSTANT", constant_values=0, ) -> np.ndarray: @@ -295,7 +297,7 @@ def pow(self, x: np.ndarray, y: float) -> np.ndarray: def kron(self, tensor1: np.ndarray, tensor2: np.ndarray): return np.kron(tensor1, tensor2) - def prod(self, x: np.ndarray, axis: Union[None, int]): + def prod(self, x: np.ndarray, axis: int | None): return np.prod(x, axis=axis) def real(self, array: np.ndarray) -> np.ndarray: @@ -335,7 +337,7 @@ def sum(self, array: np.ndarray, axes: Sequence[int] = None): return ret @Autocast() - def tensordot(self, a: np.ndarray, b: np.ndarray, axes: List[int]) -> np.ndarray: + def tensordot(self, a: np.ndarray, b: np.ndarray, axes: list[int]) -> np.ndarray: return np.tensordot(a, b, axes) def tile(self, array: np.ndarray, repeats: Sequence[int]) -> np.ndarray: @@ -344,7 +346,7 @@ def tile(self, array: np.ndarray, repeats: Sequence[int]) -> np.ndarray: def trace(self, array: np.ndarray, dtype=None) -> np.ndarray: return self.cast(np.trace(array, axis1=-1, axis2=-2), dtype) - def transpose(self, a: np.ndarray, perm: Sequence[int] = None) -> Optional[np.ndarray]: + def transpose(self, a: np.ndarray, perm: Sequence[int] = None) -> np.ndarray | None: if a is None: return None # TODO: remove and address None inputs where tranpose is used return np.transpose(a, axes=perm) @@ -442,7 +444,7 @@ def DefaultEuclideanOptimizer() -> None: return None def hermite_renormalized( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: tuple[int] ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)` @@ -477,7 +479,7 @@ def hermite_renormalized( return G def hermite_renormalized_batch( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, shape: tuple[int] ) -> np.ndarray: G = vanilla_batch(tuple(shape), A, B, C) return G @@ -487,9 +489,9 @@ def hermite_renormalized_binomial( A: np.ndarray, B: np.ndarray, C: np.ndarray, - shape: Tuple[int], - max_l2: Optional[float], - global_cutoff: Optional[int], + shape: tuple[int], + max_l2: float | None, + global_cutoff: int | None, ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)` @@ -519,7 +521,7 @@ def hermite_renormalized_binomial( return G - def reorder_AB_bargmann(self, A: np.ndarray, B: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def reorder_AB_bargmann(self, A: np.ndarray, B: np.ndarray) -> tuple[np.ndarray, np.ndarray]: r"""In mrmustard.math.numba.compactFock~ dimensions of the Fock representation are ordered like [mode0,mode0,mode1,mode1,...] while in mrmustard.physics.bargmann the ordering is [mode0,mode1,...,mode0,mode1,...]. Here we reorder A and B. """ @@ -530,7 +532,7 @@ def reorder_AB_bargmann(self, A: np.ndarray, B: np.ndarray) -> Tuple[np.ndarray, return A, B def hermite_renormalized_diagonal( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: tuple[int] ) -> np.ndarray: r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~ Then, calculate the required renormalized multidimensional Hermite polynomial. @@ -539,7 +541,7 @@ def hermite_renormalized_diagonal( return self.hermite_renormalized_diagonal_reorderedAB(A, B, C, cutoffs=cutoffs) def hermite_renormalized_diagonal_reorderedAB( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: tuple[int] ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the @@ -562,14 +564,14 @@ def hermite_renormalized_diagonal_reorderedAB( return poly0 def hermite_renormalized_diagonal_batch( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: tuple[int] ) -> np.ndarray: r"""Same as hermite_renormalized_diagonal but works for a batch of different B's.""" A, B = self.reorder_AB_bargmann(A, B) return self.hermite_renormalized_diagonal_reorderedAB_batch(A, B, C, cutoffs=cutoffs) def hermite_renormalized_diagonal_reorderedAB_batch( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: tuple[int] ) -> np.ndarray: r"""Same as hermite_renormalized_diagonal_reorderedAB but works for a batch of different B's. @@ -587,7 +589,7 @@ def hermite_renormalized_diagonal_reorderedAB_batch( return poly0 def hermite_renormalized_1leftoverMode( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: tuple[int] ) -> np.ndarray: r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.numba.compactFock~ Then, calculate the required renormalized multidimensional Hermite polynomial. @@ -596,7 +598,7 @@ def hermite_renormalized_1leftoverMode( return self.hermite_renormalized_1leftoverMode_reorderedAB(A, B, C, cutoffs=cutoffs) def hermite_renormalized_1leftoverMode_reorderedAB( - self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: Tuple[int] + self, A: np.ndarray, B: np.ndarray, C: np.ndarray, cutoffs: tuple[int] ) -> np.ndarray: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the diff --git a/mrmustard/math/backend_tensorflow.py b/mrmustard/math/backend_tensorflow.py index 85c2a35e9..7fe9da23e 100644 --- a/mrmustard/math/backend_tensorflow.py +++ b/mrmustard/math/backend_tensorflow.py @@ -16,7 +16,8 @@ # pylint: disable = missing-function-docstring, missing-class-docstring, wrong-import-position -from typing import Callable, List, Optional, Sequence, Tuple, Union +from __future__ import annotations +from typing import Callable, Sequence from importlib import metadata import os @@ -91,7 +92,7 @@ def assign(self, tensor: tf.Tensor, value: tf.Tensor) -> tf.Tensor: tensor.assign(value) return tensor - def astensor(self, array: Union[np.ndarray, tf.Tensor], dtype=None) -> tf.Tensor: + def astensor(self, array: np.ndarray | tf.Tensor, dtype=None) -> tf.Tensor: dtype = dtype or np.array(array).dtype.name return tf.convert_to_tensor(array, dtype) @@ -115,7 +116,7 @@ def block_diag(self, mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor: axis=-2, ) - def block(self, blocks: List[List[tf.Tensor]], axes=(-2, -1)) -> tf.Tensor: + def block(self, blocks: list[list[tf.Tensor]], axes=(-2, -1)) -> tf.Tensor: rows = [self.concat(row, axis=axes[1]) for row in blocks] return self.concat(rows, axis=axes[0]) @@ -138,9 +139,7 @@ def concat(self, values: Sequence[tf.Tensor], axis: int) -> tf.Tensor: def conj(self, array: tf.Tensor) -> tf.Tensor: return tf.math.conj(array) - def constraint_func( - self, bounds: Tuple[Optional[float], Optional[float]] - ) -> Optional[Callable]: + def constraint_func(self, bounds: tuple[float | None, float | None]) -> Callable | None: bounds = ( -np.inf if bounds[0] is None else bounds[0], np.inf if bounds[1] is None else bounds[1], @@ -160,7 +159,7 @@ def convolution( self, array: tf.Tensor, filters: tf.Tensor, - padding: Optional[str] = None, + padding: str | None = None, data_format="NWC", ) -> tf.Tensor: padding = padding or "VALID" @@ -246,14 +245,14 @@ def minimum(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor: return tf.minimum(a, b) def moveaxis( - self, array: tf.Tensor, old: Union[int, Sequence[int]], new: Union[int, Sequence[int]] + self, array: tf.Tensor, old: int | Sequence[int], new: int | Sequence[int] ) -> tf.Tensor: return tf.experimental.numpy.moveaxis(array, old, new) def new_variable( self, value, - bounds: Union[Tuple[Optional[float], Optional[float]], None], + bounds: tuple[float | None, float | None] | None, name: str, dtype=None, ): @@ -285,7 +284,7 @@ def outer(self, array1: tf.Tensor, array2: tf.Tensor) -> tf.Tensor: def pad( self, array: tf.Tensor, - paddings: Sequence[Tuple[int, int]], + paddings: Sequence[tuple[int, int]], mode="CONSTANT", constant_values=0, ) -> tf.Tensor: @@ -302,7 +301,7 @@ def pow(self, x: tf.Tensor, y: float) -> tf.Tensor: def kron(self, tensor1: tf.Tensor, tensor2: tf.Tensor): return tf.experimental.numpy.kron(tensor1, tensor2) - def prod(self, x: tf.Tensor, axis: Union[None, int]): + def prod(self, x: tf.Tensor, axis: int | None): return tf.math.reduce_prod(x, axis=axis) def real(self, array: tf.Tensor) -> tf.Tensor: @@ -339,7 +338,7 @@ def sum(self, array: tf.Tensor, axes: Sequence[int] = None): return tf.reduce_sum(array, axes) @Autocast() - def tensordot(self, a: tf.Tensor, b: tf.Tensor, axes: List[int]) -> tf.Tensor: + def tensordot(self, a: tf.Tensor, b: tf.Tensor, axes: list[int]) -> tf.Tensor: return tf.tensordot(a, b, axes) def tile(self, array: tf.Tensor, repeats: Sequence[int]) -> tf.Tensor: @@ -424,8 +423,8 @@ def DefaultEuclideanOptimizer(self) -> tf.keras.optimizers.legacy.Optimizer: return AdamOpt(learning_rate=0.001) def value_and_gradients( - self, cost_fn: Callable, parameters: List[Trainable] - ) -> Tuple[tf.Tensor, List[tf.Tensor]]: + self, cost_fn: Callable, parameters: list[Trainable] + ) -> tuple[tf.Tensor, list[tf.Tensor]]: r"""Computes the loss and gradients of the given cost function. Args: @@ -442,8 +441,8 @@ def value_and_gradients( @tf.custom_gradient def hermite_renormalized( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, shape: Tuple[int] - ) -> Tuple[tf.Tensor, Callable]: + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, shape: tuple[int] + ) -> tuple[tf.Tensor, Callable]: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the denominator rather than :math:`n!`. It computes all the amplitudes within the @@ -486,7 +485,7 @@ def grad(dLdGconj): return G, grad def hermite_renormalized_batch( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, shape: Tuple[int] + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, shape: tuple[int] ) -> tf.Tensor: _A, _B, _C = self.asnumpy(A), self.asnumpy(B), self.asnumpy(C) @@ -499,9 +498,9 @@ def hermite_renormalized_binomial( A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, - shape: Tuple[int], - max_l2: Optional[float], - global_cutoff: Optional[int], + shape: tuple[int], + max_l2: float | None, + global_cutoff: int | None, ) -> tf.Tensor: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx + 1/2*Ax^2)` at zero, where the series has :math:`sqrt(n!)` @@ -536,7 +535,7 @@ def grad(dLdGconj): return G, grad - def reorder_AB_bargmann(self, A: tf.Tensor, B: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: + def reorder_AB_bargmann(self, A: tf.Tensor, B: tf.Tensor) -> tuple[tf.Tensor, tf.Tensor]: r"""In mrmustard.math.compactFock.compactFock~ dimensions of the Fock representation are ordered like [mode0,mode0,mode1,mode1,...] while in mrmustard.physics.bargmann the ordering is [mode0,mode1,...,mode0,mode1,...]. Here we reorder A and B. """ @@ -549,14 +548,14 @@ def reorder_AB_bargmann(self, A: tf.Tensor, B: tf.Tensor) -> Tuple[tf.Tensor, tf return A, B def hermite_renormalized_diagonal( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int] + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: tuple[int] ) -> tf.Tensor: A, B = self.reorder_AB_bargmann(A, B) return self.hermite_renormalized_diagonal_reorderedAB(A, B, C, cutoffs=cutoffs) @tf.custom_gradient def hermite_renormalized_diagonal_reorderedAB( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int] + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: tuple[int] ) -> tf.Tensor: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the @@ -614,14 +613,14 @@ def grad(dLdpoly): return poly0, grad def hermite_renormalized_diagonal_batch( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int] + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: tuple[int] ) -> tf.Tensor: r"""Same as hermite_renormalized_diagonal but works for a batch of different B's.""" A, B = self.reorder_AB_bargmann(A, B) return self.hermite_renormalized_diagonal_reorderedAB_batch(A, B, C, cutoffs=cutoffs) def hermite_renormalized_diagonal_reorderedAB_batch( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int] + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: tuple[int] ) -> tf.Tensor: r"""Same as hermite_renormalized_diagonal_reorderedAB but works for a batch of different B's. @@ -643,7 +642,7 @@ def hermite_renormalized_diagonal_reorderedAB_batch( return poly0 def hermite_renormalized_1leftoverMode( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int] + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: tuple[int] ) -> tf.Tensor: r"""First, reorder A and B parameters of Bargmann representation to match conventions in mrmustard.math.compactFock.compactFock~ Then, calculate the required renormalized multidimensional Hermite polynomial. @@ -653,7 +652,7 @@ def hermite_renormalized_1leftoverMode( @tf.custom_gradient def hermite_renormalized_1leftoverMode_reorderedAB( - self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: Tuple[int] + self, A: tf.Tensor, B: tf.Tensor, C: tf.Tensor, cutoffs: tuple[int] ) -> tf.Tensor: r"""Renormalized multidimensional Hermite polynomial given by the "exponential" Taylor series of :math:`exp(C + Bx - Ax^2)` at zero, where the series has :math:`sqrt(n!)` at the diff --git a/mrmustard/math/lattice/strategies/binomial.py b/mrmustard/math/lattice/strategies/binomial.py index d7b867e4b..1ad24f746 100644 --- a/mrmustard/math/lattice/strategies/binomial.py +++ b/mrmustard/math/lattice/strategies/binomial.py @@ -14,7 +14,7 @@ " This module contains binomial strategies " -from typing import Optional +from __future__ import annotations import numpy as np from numba import njit, typed, types @@ -77,8 +77,8 @@ def binomial_dict( A: ComplexMatrix, b: ComplexVector, c: complex, - max_prob: Optional[float] = None, - global_cutoff: Optional[int] = None, + max_prob: float | None = None, + global_cutoff: int | None = None, ) -> dict[tuple[int, ...], complex]: r"""Factorial speedup strategy (fill ket by weight), python version with numba function/loop. Uses a dictionary to store the output. @@ -133,7 +133,7 @@ def binomial_numba( c: complex, FP: dict[tuple[tuple[int, ...], int], list[tuple[int, ...]]], max_prob: float = 0.999, - global_cutoff: Optional[int] = None, + global_cutoff: int | None = None, ) -> ComplexTensor: # pragma: no cover r"""Binomial strategy (fill by weight), fully numba version.""" if global_cutoff is None: diff --git a/mrmustard/math/parameters.py b/mrmustard/math/parameters.py index 6829e8f7b..12bc295ed 100644 --- a/mrmustard/math/parameters.py +++ b/mrmustard/math/parameters.py @@ -14,7 +14,9 @@ """This module contains the classes to describe constant and variable parameters used in Mr Mustard.""" -from typing import Callable, Optional, Tuple +from __future__ import annotations + +from typing import Callable from mrmustard.math.backend_manager import BackendManager @@ -147,7 +149,7 @@ def __init__( self, value: any, name: str, - bounds: Tuple[Optional[float], Optional[float]] = (None, None), + bounds: tuple[float | None, float | None] = (None, None), update_fn: Callable = update_euclidean, dtype: any = None, ): @@ -168,7 +170,7 @@ def _get_value(self, value, bounds, name, dtype=None): return math.new_variable(value, bounds, name, dtype) @property - def bounds(self) -> Tuple[Optional[float], Optional[float]]: + def bounds(self) -> tuple[float | None, float | None]: r""" The numerical bounds of this variable. """ @@ -182,7 +184,7 @@ def name(self) -> str: return self._name @property - def update_fn(self) -> Optional[Callable]: + def update_fn(self) -> Callable | None: r""" The function used to update this variable during training. """ @@ -205,9 +207,9 @@ def value(self, value): @staticmethod def orthogonal( - value: Optional[any], + value: any | None, name: str, - bounds: Tuple[Optional[float], Optional[float]] = (None, None), + bounds: tuple[float | None, float | None] = (None, None), N: int = 1, ): r""" @@ -231,7 +233,7 @@ def orthogonal( def symplectic( value: any, name: str, - bounds: Tuple[Optional[float], Optional[float]] = (None, None), + bounds: tuple[float | None, float | None] = (None, None), N: int = 1, ): r""" @@ -255,7 +257,7 @@ def symplectic( def unitary( value: any, name: str, - bounds: Tuple[Optional[float], Optional[float]] = (None, None), + bounds: tuple[float | None, float | None] = (None, None), N: int = 1, ): r""" diff --git a/mrmustard/math/tensor_networks/networks.py b/mrmustard/math/tensor_networks/networks.py index 44d4eef9c..b14cf26e7 100644 --- a/mrmustard/math/tensor_networks/networks.py +++ b/mrmustard/math/tensor_networks/networks.py @@ -16,7 +16,6 @@ from __future__ import annotations -from typing import Optional import numpy as np from opt_einsum import contract as opt_contract @@ -26,7 +25,7 @@ from .tensors import Wire, Tensor -def connect(wire1: Wire, wire2: Wire, dim: Optional[int] = None): +def connect(wire1: Wire, wire2: Wire, dim: int | None = None): r""" Connects two wires in a tensor network. diff --git a/mrmustard/math/tensor_networks/tensors.py b/mrmustard/math/tensor_networks/tensors.py index 3c2de3145..fa2671556 100644 --- a/mrmustard/math/tensor_networks/tensors.py +++ b/mrmustard/math/tensor_networks/tensors.py @@ -18,7 +18,6 @@ from abc import ABC, abstractmethod from dataclasses import dataclass, field -from typing import List, Optional, Tuple import uuid @@ -143,20 +142,20 @@ def value(self, shape): def __init__( self, name: str, - modes_in_ket: Optional[list[int]] = None, - modes_out_ket: Optional[list[int]] = None, - modes_in_bra: Optional[list[int]] = None, - modes_out_bra: Optional[list[int]] = None, + modes_in_ket: list[int] | None = None, + modes_out_ket: list[int] | None = None, + modes_in_bra: list[int] | None = None, + modes_out_bra: list[int] | None = None, ) -> None: self._name = name self._update_modes(modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra) def _update_modes( self, - modes_in_ket: Optional[list[int]] = None, - modes_out_ket: Optional[list[int]] = None, - modes_in_bra: Optional[list[int]] = None, - modes_out_bra: Optional[list[int]] = None, + modes_in_ket: list[int] | None = None, + modes_out_ket: list[int] | None = None, + modes_in_bra: list[int] | None = None, + modes_out_bra: list[int] | None = None, ) -> None: r""" Updates the modes in this tensor by setting: @@ -232,7 +231,7 @@ def modes(self) -> list[int]: raise ValueError("modes are ambiguous for this Tensor.") @property - def modes_in(self) -> List[int]: + def modes_in(self) -> list[int]: r""" The list of input modes that are used by this Tensor. @@ -245,7 +244,7 @@ def modes_in(self) -> List[int]: return self._modes_in_bra @property - def modes_out(self) -> List[int]: + def modes_out(self) -> list[int]: r""" The list of output modes that are used by this Tensor. @@ -271,7 +270,7 @@ def output(self): """ return self._output - def unpack_shape(self, shape: Tuple[int]): + def unpack_shape(self, shape: tuple[int]): r""" Unpack the given ``shape`` into the shapes of the input and output wires on ket and bra sides. @@ -303,7 +302,7 @@ def unpack_shape(self, shape: Tuple[int]): return shape_in_ket, shape_out_ket, shape_in_bra, shape_out_bra @property - def wires(self) -> List[Wire]: + def wires(self) -> list[Wire]: r""" The list of all wires in this tensor, sorted as ``[ket_in, ket_out, bra_in, bra_out]``. """ @@ -315,7 +314,7 @@ def wires(self) -> List[Wire]: ) @abstractmethod - def value(self, shape: Tuple[int]): + def value(self, shape: tuple[int]): r"""The value of this tensor. Args: @@ -327,10 +326,10 @@ def value(self, shape: Tuple[int]): def change_modes( self, - modes_in_ket: Optional[list[int]] = None, - modes_out_ket: Optional[list[int]] = None, - modes_in_bra: Optional[list[int]] = None, - modes_out_bra: Optional[list[int]] = None, + modes_in_ket: list[int] | None = None, + modes_out_ket: list[int] | None = None, + modes_in_bra: list[int] | None = None, + modes_out_bra: list[int] | None = None, ) -> None: r""" Changes the modes in this tensor. @@ -353,7 +352,7 @@ def change_modes( raise ValueError(msg) self._update_modes(modes_in_ket, modes_out_ket, modes_in_bra, modes_out_bra) - def shape(self, default_dim: Optional[int] = None, out_in=False): + def shape(self, default_dim: int | None = None, out_in=False): r""" Returns the shape of the underlying tensor, as inferred from the dimensions of the individual wires. @@ -399,7 +398,7 @@ def __init__(self, tensor): modes_out_bra=self._original.output.ket.keys(), ) - def value(self, shape: Tuple[int]): + def value(self, shape: tuple[int]): r"""The value of this tensor. Args: @@ -436,7 +435,7 @@ def __init__(self, tensor): modes_out_bra=self._original.input.bra.keys(), ) - def value(self, shape: Tuple[int]): + def value(self, shape: tuple[int]): r"""The value of this tensor. Args: diff --git a/mrmustard/math/tensor_wrappers/mmtensor.py b/mrmustard/math/tensor_wrappers/mmtensor.py index 84ebba12e..14b756a10 100644 --- a/mrmustard/math/tensor_wrappers/mmtensor.py +++ b/mrmustard/math/tensor_wrappers/mmtensor.py @@ -17,9 +17,11 @@ """ This module contains the implementation of a tensor wrapper class. """ + +from __future__ import annotations + import string from numbers import Number -from typing import List, Optional, Union from mrmustard.math.backend_manager import BackendManager @@ -136,7 +138,7 @@ def __matmul__(self, other): new_axis_labels, ) - def contract(self, relabeling: Optional[List[str]] = None): + def contract(self, relabeling: list[str] | None = None): r""" Contract *this* tensor along the specified indices using einsum. @@ -173,7 +175,7 @@ def contract(self, relabeling: Optional[List[str]] = None): [label for label in unique_labels if label not in repeated], ) - def transpose(self, perm: Union[List[int], List[str]]): + def transpose(self, perm: list[int] | list[str]): """Transpose the tensor using a list of axis labels or indices.""" if set(perm) == set(self.axis_labels): perm = [self.axis_labels.index(label) for label in perm] diff --git a/mrmustard/math/tensor_wrappers/xptensor.py b/mrmustard/math/tensor_wrappers/xptensor.py index c6101b79f..9f3eb1a6a 100644 --- a/mrmustard/math/tensor_wrappers/xptensor.py +++ b/mrmustard/math/tensor_wrappers/xptensor.py @@ -19,12 +19,6 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import ( - List, - Optional, - Tuple, - Union, -) from mrmustard.utils.typing import Matrix, Scalar, Tensor, Vector from mrmustard.math.backend_manager import BackendManager @@ -68,10 +62,10 @@ class XPTensor(ABC): @abstractmethod # so that XPTensor can't be instantiated directly def __init__( self, - tensor: Optional[Tensor], + tensor: Tensor | None, like_0: bool, is_vector: bool, - modes: Union[Tuple[List[int], List[int]], None], + modes: tuple[list[int], list[int]] | None, ): self.like_0 = like_0 self.shape = ( @@ -101,11 +95,11 @@ def dtype(self): return None if self.tensor is None else self.tensor.dtype @property - def outmodes(self) -> List[int]: + def outmodes(self) -> list[int]: return self.modes[0] @property - def inmodes(self) -> List[int]: + def inmodes(self) -> list[int]: return self.modes[1] @property @@ -113,11 +107,11 @@ def num_modes(self) -> int: return len(self.outmodes) @property - def isMatrix(self) -> Optional[bool]: + def isMatrix(self) -> bool | None: return not self.is_vector @property - def isCoherence(self) -> Optional[bool]: + def isCoherence(self) -> bool | None: return self.isMatrix and self.outmodes != self.inmodes @property @@ -137,7 +131,7 @@ def T(self) -> XPMatrix: (self.inmodes, self.outmodes), ) - def to_xpxp(self) -> Optional[Union[Matrix, Vector]]: + def to_xpxp(self) -> Matrix | Vector | None: if self.tensor is None: return None tensor = math.transpose( @@ -145,7 +139,7 @@ def to_xpxp(self) -> Optional[Union[Matrix, Vector]]: ) # from NN22 to N2N2 or from N2 to N2 return math.reshape(tensor, [2 * s for s in self.shape]) - def to_xxpp(self) -> Optional[Union[Matrix, Vector]]: + def to_xxpp(self) -> Matrix | Vector | None: if self.tensor is None: return None tensor = math.transpose( @@ -156,10 +150,10 @@ def to_xxpp(self) -> Optional[Union[Matrix, Vector]]: def __array__(self): return self.to_xxpp() - def modes_first(self) -> Optional[Tensor]: + def modes_first(self) -> Tensor | None: return self.tensor - def modes_last(self) -> Optional[Tensor]: + def modes_last(self) -> Tensor | None: if self.tensor is None: return None return math.transpose(self.tensor, (2, 3, 0, 1) if self.isMatrix else (0, 1)) # 22NM or 2N @@ -232,10 +226,10 @@ def __rmul__(self, other: Scalar) -> XPTensor: self.tensor = other * self.tensor return self - def __mul__(self, other: Scalar) -> Optional[XPTensor]: + def __mul__(self, other: Scalar) -> XPTensor | None: return other * self - def __matmul__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVector, Scalar]: + def __matmul__(self, other: XPMatrix | XPVector) -> XPMatrix | XPVector | Scalar: if not isinstance(other, (XPMatrix, XPVector)): raise TypeError( f"Unsupported operand type(s) for @: '{self.__class__.__qualname__}' and '{other.__class__.__qualname__}'" @@ -269,8 +263,8 @@ def __matmul__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVect # pylint: disable=too-many-statements def _mode_aware_matmul( - self, other: Union[XPMatrix, XPVector] - ) -> Tuple[Tensor, Tuple[List[int], List[int]]]: + self, other: XPMatrix | XPVector + ) -> tuple[Tensor, tuple[list[int], list[int]]]: r"""Performs matrix multiplication only on the necessary modes and takes care of keeping only the modes that are needed, in case of mismatch. @@ -369,7 +363,7 @@ def _mode_aware_vecvec(self, other: XPVector) -> Scalar: ) # only the common modes (the others are like 0) return math.sum(self.tensor[common] * other.tensor[common]) - def __add__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVector]: + def __add__(self, other: XPMatrix | XPVector) -> XPMatrix | XPVector: if not isinstance(other, (XPMatrix, XPVector)): raise TypeError( f"unsupported operand type(s) for +: '{self.__class__.__qualname__}' and '{other.__class__.__qualname__}'" @@ -454,13 +448,13 @@ def __add__(self, other: Union[XPMatrix, XPVector]) -> Union[XPMatrix, XPVector] return XPVector(to_update, outmodes) - def __sub__(self, other: Union[XPMatrix, XPVector]) -> Optional[XPTensor]: + def __sub__(self, other: XPMatrix | XPVector) -> XPTensor | None: return self + (-1) * other - def __truediv__(self, other: Scalar) -> Optional[XPTensor]: + def __truediv__(self, other: Scalar) -> XPTensor | None: return (1 / other) * self - def __getitem__(self, modes: Union[int, slice, List[int], Tuple]) -> Union[XPMatrix, XPVector]: + def __getitem__(self, modes: int | slice | list[int] | tuple) -> XPMatrix | XPVector: r"""Returns modes or subsets of modes from the XPTensor or coherences between modes using an intuitive notation. @@ -535,7 +529,7 @@ def __init__( tensor: Tensor = None, like_0: bool = None, like_1: bool = None, - modes: Tuple[List[int], List[int]] = ([], []), + modes: tuple[list[int], list[int]] = ([], []), ): if like_0 is None and like_1 is None: raise ValueError("At least one of like_0 or like_1 must be set") @@ -559,10 +553,10 @@ def __init__( @classmethod def from_xxpp( cls, - tensor: Optional[Union[Matrix, Vector]], - like_0: Optional[bool] = None, - like_1: Optional[bool] = None, - modes: Tuple[List[int], List[int]] = ([], []), + tensor: Matrix | Vector | None, + like_0: bool | None = None, + like_1: bool | None = None, + modes: tuple[list[int], list[int]] = ([], []), ) -> XPMatrix: if tensor is not None: tensor = math.reshape(tensor, [_ for n in tensor.shape for _ in (2, n // 2)]) @@ -572,10 +566,10 @@ def from_xxpp( @classmethod def from_xpxp( cls, - tensor: Optional[Union[Matrix, Vector]], + tensor: Matrix | Vector | None, like_0: bool = None, like_1: bool = None, - modes: Tuple[List[int], List[int]] = ([], []), + modes: tuple[list[int], list[int]] = ([], []), ) -> XPMatrix: if tensor is not None: tensor = math.reshape(tensor, [_ for n in tensor.shape for _ in (n // 2, 2)]) @@ -594,7 +588,7 @@ class XPVector(XPTensor): modes: a list of modes for a diagonal matrix or a vector and a tuple of two lists for a coherence (not optional for a coherence) """ - def __init__(self, tensor: Tensor = None, modes: Union[List[int], None] = None): + def __init__(self, tensor: Tensor = None, modes: list[int] | None = None): if modes is None and tensor is not None: modes = list(range(tensor.shape[0])) if modes is None and tensor is None: @@ -608,8 +602,8 @@ def __init__(self, tensor: Tensor = None, modes: Union[List[int], None] = None): @classmethod def from_xxpp( cls, - tensor: Optional[Union[Matrix, Vector]], - modes: Union[List[int], None] = None, + tensor: Matrix | Vector | None, + modes: list[int] | None = None, ) -> XPMatrix: if tensor is not None: tensor = math.reshape(tensor, (2, -1)) @@ -619,8 +613,8 @@ def from_xxpp( @classmethod def from_xpxp( cls, - tensor: Optional[Union[Matrix, Vector]], - modes: Union[List[int], None] = None, + tensor: Matrix | Vector | None, + modes: list[int] | None = None, ) -> XPMatrix: if tensor is not None: tensor = math.reshape(tensor, (-1, 2)) diff --git a/mrmustard/physics/ansatze.py b/mrmustard/physics/ansatze.py index 6f9d27d90..7135a7e7c 100644 --- a/mrmustard/physics/ansatze.py +++ b/mrmustard/physics/ansatze.py @@ -20,7 +20,7 @@ import itertools from abc import ABC, abstractmethod -from typing import Any, Callable, Union, Optional, Sequence +from typing import Callable, Sequence from warnings import warn import numpy as np @@ -66,7 +66,7 @@ def __init__(self) -> None: self._kwargs = {} @abstractmethod - def from_function(cls, fn: Callable, **kwargs: Any) -> Ansatz: + def from_function(cls, fn: Callable, **kwargs: any) -> Ansatz: r""" Returns an ansatz from a function and kwargs. """ @@ -99,19 +99,19 @@ def __sub__(self, other: Ansatz) -> Ansatz: raise TypeError(f"Cannot subtract {self.__class__} and {other.__class__}.") from e @abstractmethod - def __call__(self, point: Any) -> Scalar: + def __call__(self, point: any) -> Scalar: r""" Evaluates this ansatz at a given point in the domain. """ @abstractmethod - def __truediv__(self, other: Union[Scalar, Ansatz]) -> Ansatz: + def __truediv__(self, other: Scalar | Ansatz) -> Ansatz: r""" Divides this ansatz by another ansatz or by a scalar. """ @abstractmethod - def __mul__(self, other: Union[Scalar, Ansatz]) -> Ansatz: + def __mul__(self, other: Scalar | Ansatz) -> Ansatz: r""" Multiplies this ansatz by another ansatz. """ @@ -514,8 +514,8 @@ class PolyExpAnsatz(PolyExpBase): def __init__( self, - A: Optional[Batch[Matrix]] = None, - b: Optional[Batch[Vector]] = None, + A: Batch[Matrix] | None = None, + b: Batch[Vector] | None = None, c: Batch[Tensor | Scalar] = np.array([[1.0]]), name: str = "", ): @@ -547,7 +547,7 @@ def c(self) -> Batch[ComplexTensor]: return self.array @classmethod - def from_function(cls, fn: Callable, **kwargs: Any) -> PolyExpAnsatz: + def from_function(cls, fn: Callable, **kwargs: any) -> PolyExpAnsatz: r""" Returns a PolyExpAnsatz object from a generator function. """ @@ -556,7 +556,7 @@ def from_function(cls, fn: Callable, **kwargs: Any) -> PolyExpAnsatz: ret._kwargs = kwargs return ret - def __call__(self, z: Batch[Vector]) -> Union[Scalar, PolyExpAnsatz]: + def __call__(self, z: Batch[Vector]) -> Scalar | PolyExpAnsatz: r""" Returns either the value of the ansatz or a new ansatz depending on the argument. If the argument contains None, returns a new ansatz. @@ -718,7 +718,7 @@ def _call_none(self, z: Batch[Vector]) -> PolyExpAnsatz: A, b, c = zip(*Abc) return self.__class__(A=A, b=b, c=c) - def __mul__(self, other: Union[Scalar, PolyExpAnsatz]) -> PolyExpAnsatz: + def __mul__(self, other: Scalar | PolyExpAnsatz) -> PolyExpAnsatz: r"""Multiplies this ansatz by a scalar or another ansatz or a plain scalar. Args: @@ -795,7 +795,7 @@ def mul_c(c1, c2): except Exception as e: raise TypeError(f"Cannot multiply {self.__class__} and {other.__class__}.") from e - def __truediv__(self, other: Union[Scalar, PolyExpAnsatz]) -> PolyExpAnsatz: + def __truediv__(self, other: Scalar | PolyExpAnsatz) -> PolyExpAnsatz: r"""Multiplies this ansatz by a scalar or another ansatz or a plain scalar. Args: @@ -1017,7 +1017,7 @@ def num_vars(self) -> int: return len(self.array.shape) - 1 @classmethod - def from_function(cls, fn: Callable, **kwargs: Any) -> ArrayAnsatz: + def from_function(cls, fn: Callable, **kwargs: any) -> ArrayAnsatz: r""" Returns an ArrayAnsatz object from a generator function. """ @@ -1104,7 +1104,7 @@ def __and__(self, other: ArrayAnsatz) -> ArrayAnsatz: new_array = [math.outer(a, b) for a in self.array for b in other.array] return self.__class__(array=new_array) - def __call__(self, point: Any) -> Scalar: + def __call__(self, point: any) -> Scalar: r""" Evaluates this ansatz at a given point in the domain. """ @@ -1122,7 +1122,7 @@ def __eq__(self, other: Ansatz) -> bool: ) return np.allclose(self.array[slices], other.array[slices], atol=1e-10) - def __mul__(self, other: Union[Scalar, ArrayAnsatz]) -> ArrayAnsatz: + def __mul__(self, other: Scalar | ArrayAnsatz) -> ArrayAnsatz: r""" Multiplies this ansatz by another ansatz. @@ -1158,7 +1158,7 @@ def __neg__(self) -> ArrayAnsatz: """ return self.__class__(array=-self.array) - def __truediv__(self, other: Union[Scalar, ArrayAnsatz]) -> ArrayAnsatz: + def __truediv__(self, other: Scalar | ArrayAnsatz) -> ArrayAnsatz: r""" Divides this ansatz by another ansatz. diff --git a/mrmustard/physics/fock.py b/mrmustard/physics/fock.py index c4b981684..bfb0d80dd 100644 --- a/mrmustard/physics/fock.py +++ b/mrmustard/physics/fock.py @@ -18,8 +18,10 @@ This module contains functions for performing calculations on objects in the Fock representations. """ +from __future__ import annotations + from functools import lru_cache -from typing import List, Optional, Sequence, Tuple, Union +from typing import Sequence import numpy as np @@ -42,7 +44,7 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -def fock_state(n: Sequence[int], cutoffs: Optional[Union[int, Sequence[int]]] = None) -> Tensor: +def fock_state(n: Sequence[int], cutoffs: int | Sequence[int] | None = None) -> Tensor: r""" The Fock array of a tensor product of one-mode ``Number`` states. @@ -113,7 +115,7 @@ def wigner_to_fock_state( means: Vector, shape: Sequence[int], max_prob: float = 1.0, - max_photons: Optional[int] = None, + max_photons: int | None = None, return_dm: bool = True, ) -> Tensor: r"""Returns the Fock representation of a Gaussian state. @@ -265,7 +267,7 @@ def dm_to_probs(dm: Tensor) -> Tensor: return math.all_diagonals(dm, real=True) -def U_to_choi(U: Tensor, Udual: Optional[Tensor] = None) -> Tensor: +def U_to_choi(U: Tensor, Udual: Tensor | None = None) -> Tensor: r"""Converts a unitary transformation to a Choi tensor. Args: @@ -488,7 +490,7 @@ def apply_choi_to_dm( choi: ComplexTensor, dm: ComplexTensor, choi_in_modes: Sequence[int], - choi_out_modes: Sequence[int] = None, + choi_out_modes: Sequence[int] | None = None, ): r"""Applies a choi operator to a density matrix. It assumes that the density matrix is indexed as left_1, ..., left_n, right_1, ..., right_n. @@ -588,7 +590,7 @@ def apply_choi_to_ket(choi, ket, choi_in_modes, choi_out_modes=None): def contract_states( - stateA, stateB, a_is_dm: bool, b_is_dm: bool, modes: List[int], normalize: bool + stateA, stateB, a_is_dm: bool, b_is_dm: bool, modes: list[int], normalize: bool ): r"""Contracts two states in the specified modes. Assumes that the modes of B are a subset of the modes of A. @@ -673,7 +675,7 @@ def is_mixed_dm(dm): return not np.isclose(math.sum(square * math.transpose(square)), 1.0) -def trace(dm, keep: List[int]): +def trace(dm, keep: list[int]): r"""Computes the partial trace of a density matrix. The indices of the density matrix are in the order (out0, ..., outN-1, in0, ..., inN-1). The indices to keep are a subset of the N 'out' indices @@ -819,7 +821,7 @@ def estimate_quadrature_axis(cutoff, minimum=5, period_resolution=20): def quadrature_distribution( state: Tensor, quadrature_angle: float = 0.0, - x: Optional[Vector] = None, + x: Vector | None = None, ): r"""Given the ket or density matrix of a single-mode state, it generates the probability density distribution :math:`\tr [ \rho |x_\phi> Tuple[float, float]: +def sample_homodyne(state: Tensor, quadrature_angle: float = 0.0) -> tuple[float, float]: r"""Given a single-mode state, it generates the pdf of :math:`\tr [ \rho |x_\phi> Matrix: return math.eye(num_modes * 2, dtype=math.float64) * settings.HBAR / 2 -def vacuum_means(num_modes: int) -> Tuple[Matrix, Vector]: +def vacuum_means(num_modes: int) -> tuple[Matrix, Vector]: r"""Returns the real covariance matrix and real means vector of the vacuum state. Args: @@ -70,7 +72,7 @@ def squeezed_vacuum_cov(r: Vector, phi: Vector) -> Matrix: return math.matmul(S, math.transpose(S)) * settings.HBAR / 2 -def thermal_cov(nbar: Vector) -> Tuple[Matrix, Vector]: +def thermal_cov(nbar: Vector) -> tuple[Matrix, Vector]: r"""Returns the real covariance matrix and real means vector of a thermal state. The dimension depends on the dimensions of ``nbar``. @@ -126,7 +128,7 @@ def gaussian_cov(symplectic: Matrix, eigenvalues: Vector = None) -> Matrix: # ~~~~~~~~~~~~~~~~~~~~~~~~ -def rotation_symplectic(angle: Union[Scalar, Vector]) -> Matrix: +def rotation_symplectic(angle: Scalar | Vector) -> Matrix: r"""Symplectic matrix of a rotation gate. The dimension depends on the dimension of the angle. @@ -148,7 +150,7 @@ def rotation_symplectic(angle: Union[Scalar, Vector]) -> Matrix: ) -def squeezing_symplectic(r: Union[Scalar, Vector], phi: Union[Scalar, Vector]) -> Matrix: +def squeezing_symplectic(r: Scalar | Vector, phi: Scalar | Vector) -> Matrix: r"""Symplectic matrix of a squeezing gate. The dimension depends on the dimension of ``r`` and ``phi``. @@ -180,7 +182,7 @@ def squeezing_symplectic(r: Union[Scalar, Vector], phi: Union[Scalar, Vector]) - ) -def displacement(x: Union[Scalar, Vector], y: Union[Scalar, Vector]) -> Vector: +def displacement(x: Scalar | Vector, y: Scalar | Vector) -> Vector: r"""Returns the displacement vector for a displacement by :math:`alpha = x + iy`. The dimension depends on the dimensions of ``x`` and ``y``. @@ -390,7 +392,7 @@ def CPTP( d: Vector, state_modes: Sequence[int], transf_modes: Sequence[int], -) -> Tuple[Matrix, Vector]: +) -> tuple[Matrix, Vector]: r"""Returns the cov matrix and means vector of a state after undergoing a CPTP channel. Computed as ``cov = X \cdot cov \cdot X^T + Y`` and ``d = X \cdot means + d``. @@ -430,9 +432,7 @@ def CPTP( return cov, means -def loss_XYd( - transmissivity: Union[Scalar, Vector], nbar: Union[Scalar, Vector] -) -> Tuple[Matrix, Matrix, None]: +def loss_XYd(transmissivity: Scalar | Vector, nbar: Scalar | Vector) -> tuple[Matrix, Matrix, None]: r"""Returns the ``X``, ``Y`` matrices and the ``d`` vector for the noisy loss (attenuator) channel. .. math:: @@ -459,7 +459,7 @@ def loss_XYd( return X, Y, None -def amp_XYd(gain: Union[Scalar, Vector], nbar: Union[Scalar, Vector]) -> Matrix: +def amp_XYd(gain: Scalar | Vector, nbar: Scalar | Vector) -> Matrix: r"""Returns the ``X``, ``Y`` matrices and the d vector for the noisy amplifier channel. .. math:: @@ -489,7 +489,7 @@ def amp_XYd(gain: Union[Scalar, Vector], nbar: Union[Scalar, Vector]) -> Matrix: return X, Y, None -def noise_Y(noise: Union[Scalar, Vector]) -> Matrix: +def noise_Y(noise: Scalar | Vector) -> Matrix: r"""Returns the ``X``, ``Y`` matrices and the d vector for the additive noise channel ``(Y = noise * (\hbar / 2) * I)`` Args: @@ -503,7 +503,7 @@ def noise_Y(noise: Union[Scalar, Vector]) -> Matrix: def compose_channels_XYd( X1: Matrix, Y1: Matrix, d1: Vector, X2: Matrix, Y2: Matrix, d2: Vector -) -> Tuple[Matrix, Matrix, Vector]: +) -> tuple[Matrix, Matrix, Vector]: r"""Returns the combined ``X``, ``Y``, and ``d`` for two CPTP channels. Args: @@ -547,9 +547,9 @@ def general_dyne( cov: Matrix, means: Vector, proj_cov: Matrix, - proj_means: Optional[Vector] = None, - modes: Optional[Sequence[int]] = None, -) -> Tuple[Scalar, Matrix, Vector]: + proj_means: Vector | None = None, + modes: Sequence[int] | None = None, +) -> tuple[Scalar, Matrix, Vector]: r"""Returns the results of a general-dyne measurement. If ``proj_means`` are not provided (as ``None``), they are sampled from the probability distribution. @@ -656,7 +656,7 @@ def number_cov(cov: Matrix, means: Vector) -> Matrix: ) -def trace(cov: Matrix, means: Vector, Bmodes: Sequence[int]) -> Tuple[Matrix, Vector]: +def trace(cov: Matrix, means: Vector, Bmodes: Sequence[int]) -> tuple[Matrix, Vector]: r"""Returns the covariances and means after discarding the specified modes. Args: @@ -677,7 +677,7 @@ def trace(cov: Matrix, means: Vector, Bmodes: Sequence[int]) -> Tuple[Matrix, Ve return A_cov_block, A_means_vec -def partition_cov(cov: Matrix, Amodes: Sequence[int]) -> Tuple[Matrix, Matrix, Matrix]: +def partition_cov(cov: Matrix, Amodes: Sequence[int]) -> tuple[Matrix, Matrix, Matrix]: r"""Partitions the covariance matrix into the ``A`` and ``B`` subsystems and the AB coherence block. Args: @@ -699,7 +699,7 @@ def partition_cov(cov: Matrix, Amodes: Sequence[int]) -> Tuple[Matrix, Matrix, M return A_block, B_block, AB_block -def partition_means(means: Vector, Amodes: Sequence[int]) -> Tuple[Vector, Vector]: +def partition_means(means: Vector, Amodes: Sequence[int]) -> tuple[Vector, Vector]: r"""Partitions the means vector into the ``A`` and ``B`` subsystems. Args: @@ -730,7 +730,7 @@ def purity(cov: Matrix) -> Scalar: return 1 / math.sqrt(math.det((2 / settings.HBAR) * cov)) -def symplectic_eigenvals(cov: Matrix) -> Any: +def symplectic_eigenvals(cov: Matrix) -> any: r"""Returns the sympletic eigenspectrum of a covariance matrix. For a pure state, we expect the sympletic eigenvalues to be 1. @@ -867,7 +867,7 @@ def log_negativity(cov: Matrix) -> float: return 0 -def join_covs(covs: Sequence[Matrix]) -> Tuple[Matrix, Vector]: +def join_covs(covs: Sequence[Matrix]) -> tuple[Matrix, Vector]: r"""Joins the given covariance matrices into a single covariance matrix. Args: diff --git a/mrmustard/physics/gaussian_integrals.py b/mrmustard/physics/gaussian_integrals.py index 12348cfc4..707b8d08b 100644 --- a/mrmustard/physics/gaussian_integrals.py +++ b/mrmustard/physics/gaussian_integrals.py @@ -16,7 +16,7 @@ This module contains gaussian integral functions and related helper functions. """ -from typing import Sequence, Tuple +from typing import Sequence import numpy as np from mrmustard import math from mrmustard.utils.typing import ComplexMatrix, ComplexVector, ComplexTensor @@ -156,8 +156,8 @@ def complex_gaussian_integral( def join_Abc( - Abc1: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], - Abc2: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc1: tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc2: tuple[ComplexMatrix, ComplexVector, ComplexTensor], ): r"""Joins two ``(A,b,c)`` triples into a single ``(A,b,c)`` triple by block addition of the ``A`` matrices and concatenating the ``b`` vectors. @@ -180,8 +180,8 @@ def join_Abc( def join_Abc_real( - Abc1: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], - Abc2: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc1: tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc2: tuple[ComplexMatrix, ComplexVector, ComplexTensor], idx1: Sequence[int], idx2: Sequence[int], ): @@ -272,8 +272,8 @@ def reorder_abc(Abc: tuple, order: Sequence[int]): def contract_two_Abc( - Abc1: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], - Abc2: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc1: tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc2: tuple[ComplexMatrix, ComplexVector, ComplexTensor], idx1: Sequence[int], idx2: Sequence[int], ): @@ -381,8 +381,8 @@ def complex_gaussian_integral_2( def join_Abc_poly( - Abc1: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], - Abc2: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc1: tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc2: tuple[ComplexMatrix, ComplexVector, ComplexTensor], ): r"""Joins two ``(A,b,c)`` triples into a single ``(A,b,c)`` triple by block addition of the ``A`` matrices and concatenating the ``b`` vectors. @@ -441,8 +441,8 @@ def join_Abc_poly( def contract_two_Abc_poly( - Abc1: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], - Abc2: Tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc1: tuple[ComplexMatrix, ComplexVector, ComplexTensor], + Abc2: tuple[ComplexMatrix, ComplexVector, ComplexTensor], idx1: Sequence[int], idx2: Sequence[int], ): diff --git a/mrmustard/training/callbacks.py b/mrmustard/training/callbacks.py index 16f8e7e9d..f79c2464d 100644 --- a/mrmustard/training/callbacks.py +++ b/mrmustard/training/callbacks.py @@ -69,12 +69,13 @@ def rolling_cost_cb(optimizer, cost, **kwargs): # pylint: disable = wrong-import-position +from __future__ import annotations from dataclasses import dataclass from datetime import datetime import hashlib from pathlib import Path -from typing import Callable, Optional, Mapping, Sequence, Union +from typing import Callable, Mapping, Sequence import os import numpy as np @@ -130,13 +131,13 @@ def _should_call(self, **kwargs) -> bool: def trigger(self, **kwargs) -> bool: # pylint: disable=unused-argument """User implemented custom trigger conditions.""" - def call(self, **kwargs) -> Optional[Mapping]: # pylint: disable=unused-argument + def call(self, **kwargs) -> Mapping | None: # pylint: disable=unused-argument """User implemented main callback logic.""" - def update_cost_fn(self, **kwargs) -> Optional[Callable]: # pylint: disable=unused-argument + def update_cost_fn(self, **kwargs) -> Callable | None: # pylint: disable=unused-argument """User implemented cost_fn modifier.""" - def update_grads(self, **kwargs) -> Optional[Sequence]: # pylint: disable=unused-argument + def update_grads(self, **kwargs) -> Sequence | None: # pylint: disable=unused-argument """User implemented gradient modifier.""" def update_optimizer(self, optimizer, **kwargs): # pylint: disable=unused-argument @@ -190,17 +191,17 @@ class TensorboardCallback(Callback): # pylint: disable=too-many-instance-attrib """ #: The root logdir for tensorboard logging. - root_logdir: Union[str, Path] = "./tb_logdir" + root_logdir: str | Path = "./tb_logdir" #: The tag for experiment subfolder to group similar optimizations together for easy comparisons. #: Defaults to the hash of all trainable variables' names. - experiment_tag: Optional[str] = None + experiment_tag: str | None = None #: Extra prefix to name the optimization experiment. - prefix: Optional[str] = None + prefix: str | None = None #: Transformation on cost for the purpose of better interpretation. - cost_converter: Optional[Callable] = None + cost_converter: Callable | None = None #: Whether to track gradients as well as the values for trainable parameters. track_grads: bool = False diff --git a/mrmustard/training/parameter_update.py b/mrmustard/training/parameter_update.py index 0a1be7c9a..24b0e46e9 100644 --- a/mrmustard/training/parameter_update.py +++ b/mrmustard/training/parameter_update.py @@ -15,14 +15,14 @@ """TODO: document this module """ -from typing import Tuple, Sequence +from typing import Sequence from mrmustard.utils.typing import Tensor from mrmustard import math from .parameter import Trainable -def update_symplectic(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], symplectic_lr: float): +def update_symplectic(grads_and_vars: Sequence[tuple[Tensor, Trainable]], symplectic_lr: float): r"""Updates the symplectic parameters using the given symplectic gradients. Implemented from: Wang J, Sun H, Fiori S. A Riemannian-steepest-descent approach @@ -38,7 +38,7 @@ def update_symplectic(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], symple math.assign(S, new_value) -def update_orthogonal(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], orthogonal_lr: float): +def update_orthogonal(grads_and_vars: Sequence[tuple[Tensor, Trainable]], orthogonal_lr: float): r"""Updates the orthogonal parameters using the given orthogonal gradients. Implemented from: Y Yao, F Miatto, N Quesada - arXiv preprint arXiv:2209.06069, 2022. @@ -49,7 +49,7 @@ def update_orthogonal(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], orthog math.assign(O, new_value) -def update_unitary(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], unitary_lr: float): +def update_unitary(grads_and_vars: Sequence[tuple[Tensor, Trainable]], unitary_lr: float): r"""Updates the unitary parameters using the given unitary gradients. Implemented from: Y Yao, F Miatto, N Quesada - arXiv preprint arXiv:2209.06069, 2022. @@ -60,7 +60,7 @@ def update_unitary(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], unitary_l math.assign(U, new_value) -def update_euclidean(grads_and_vars: Sequence[Tuple[Tensor, Trainable]], euclidean_lr: float): +def update_euclidean(grads_and_vars: Sequence[tuple[Tensor, Trainable]], euclidean_lr: float): """Updates the parameters using the euclidian gradients.""" math.euclidean_opt.lr = euclidean_lr math.euclidean_opt.apply_gradients(grads_and_vars) From c5417f9221986923f28ae0dd1f71302c71912fd1 Mon Sep 17 00:00:00 2001 From: Anthony Date: Tue, 20 Aug 2024 17:27:54 -0400 Subject: [PATCH 2/3] thanks kasper --- mrmustard/lab_dev/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mrmustard/lab_dev/utils.py b/mrmustard/lab_dev/utils.py index 37a57e48b..82e25925c 100644 --- a/mrmustard/lab_dev/utils.py +++ b/mrmustard/lab_dev/utils.py @@ -17,6 +17,7 @@ """ This module contains the utility functions used by the classes in ``mrmustard.lab``. """ +from __future__ import annotations from typing import Callable, Generator From 5718df6ee4bf398a956e5116d56dd411108fc8c7 Mon Sep 17 00:00:00 2001 From: Anthony Date: Wed, 21 Aug 2024 09:22:31 -0400 Subject: [PATCH 3/3] Filippo CR --- mrmustard/lab/utils.py | 4 ++-- mrmustard/lab_dev/circuit_components.py | 4 ++-- mrmustard/lab_dev/utils.py | 6 +++--- mrmustard/math/backend_manager.py | 8 ++++---- mrmustard/math/parameter_set.py | 4 ++-- mrmustard/math/parameters.py | 18 +++++++++--------- mrmustard/physics/ansatze.py | 12 ++++++------ mrmustard/physics/gaussian.py | 4 ++-- 8 files changed, 30 insertions(+), 30 deletions(-) diff --git a/mrmustard/lab/utils.py b/mrmustard/lab/utils.py index cf7f242dc..c7ea508de 100644 --- a/mrmustard/lab/utils.py +++ b/mrmustard/lab/utils.py @@ -20,14 +20,14 @@ from __future__ import annotations -from typing import Callable +from typing import Any, Callable from mrmustard.math.parameters import update_euclidean, Constant, Variable def make_parameter( is_trainable: bool, - value: any, + value: Any, name: str, bounds: tuple[float | None, float | None], update_fn: Callable = update_euclidean, diff --git a/mrmustard/lab_dev/circuit_components.py b/mrmustard/lab_dev/circuit_components.py index f83be9e44..989b53b67 100644 --- a/mrmustard/lab_dev/circuit_components.py +++ b/mrmustard/lab_dev/circuit_components.py @@ -21,7 +21,7 @@ from inspect import signature from pydoc import locate -from typing import Sequence +from typing import Any, Sequence import numbers from functools import cached_property @@ -107,7 +107,7 @@ def __init__( if self._representation: self._representation = self._representation.reorder(tuple(perm)) - def _serialize(self) -> tuple[dict[str, any], dict[str, ArrayLike]]: + def _serialize(self) -> tuple[dict[str, Any], dict[str, ArrayLike]]: """ Inner serialization to be used by Circuit.serialize(). diff --git a/mrmustard/lab_dev/utils.py b/mrmustard/lab_dev/utils.py index 82e25925c..e9d12017c 100644 --- a/mrmustard/lab_dev/utils.py +++ b/mrmustard/lab_dev/utils.py @@ -19,7 +19,7 @@ """ from __future__ import annotations -from typing import Callable, Generator +from typing import Any, Callable, Generator from mrmustard import math from mrmustard.math.parameters import update_euclidean, Constant, Variable @@ -27,11 +27,11 @@ def make_parameter( is_trainable: bool, - value: any, + value: Any, name: str, bounds: tuple[float | None, float | None], update_fn: Callable = update_euclidean, - dtype: any = None, + dtype: Any = None, ): r""" Returns a constant or variable parameter with given name, value, bounds, and update function. diff --git a/mrmustard/math/backend_manager.py b/mrmustard/math/backend_manager.py index 74d151428..cdaafda02 100644 --- a/mrmustard/math/backend_manager.py +++ b/mrmustard/math/backend_manager.py @@ -20,7 +20,7 @@ import sys from functools import lru_cache from itertools import product -from typing import Callable, Sequence +from typing import Any, Callable, Sequence import numpy as np from scipy.special import binom @@ -98,7 +98,7 @@ def __init__(self) -> None: # binding types and decorators of numpy backend self._bind() - def _apply(self, fn: str, args: Sequence[any] | None = ()) -> any: + def _apply(self, fn: str, args: Sequence[Any] | None = ()) -> Any: r""" Applies a function ``fn`` from the backend in use to the given ``args``. """ @@ -231,7 +231,7 @@ def any(self, array: Tensor) -> bool: """ return self._apply("any", (array,)) - def arange(self, start: int, limit: int = None, delta: int = 1, dtype: any = None) -> Tensor: + def arange(self, start: int, limit: int = None, delta: int = 1, dtype: Any = None) -> Tensor: r"""Returns an array of evenly spaced values within a given interval. Args: @@ -597,7 +597,7 @@ def eye_like(self, array: Tensor) -> Tensor: """ return self._apply("eye_like", (array,)) - def from_backend(self, value: any) -> bool: + def from_backend(self, value: Any) -> bool: r"""Whether the given tensor is a tensor of the concrete backend. Args: diff --git a/mrmustard/math/parameter_set.py b/mrmustard/math/parameter_set.py index 5c00cc070..58ff5b3a5 100644 --- a/mrmustard/math/parameter_set.py +++ b/mrmustard/math/parameter_set.py @@ -14,7 +14,7 @@ """This module contains the classes to describe sets of parameters.""" -from typing import Sequence, Union +from typing import Any, Sequence, Union from mrmustard.math.backend_manager import BackendManager @@ -116,7 +116,7 @@ def tagged_variables(self, tag: str) -> dict[str, Variable]: ret[f"{tag}/{k}"] = v return ret - def to_dict(self) -> dict[str, any]: + def to_dict(self) -> dict[str, Any]: r""" Returns a dictionary representation of this parameter set such that it is compatible with the signature of built-in circuit components. diff --git a/mrmustard/math/parameters.py b/mrmustard/math/parameters.py index 12bc295ed..14294d743 100644 --- a/mrmustard/math/parameters.py +++ b/mrmustard/math/parameters.py @@ -16,7 +16,7 @@ from __future__ import annotations -from typing import Callable +from typing import Any, Callable from mrmustard.math.backend_manager import BackendManager @@ -98,7 +98,7 @@ class Constant: dtype: The dtype of this constant. """ - def __init__(self, value: any, name: str, dtype: any = None): + def __init__(self, value: Any, name: str, dtype: Any = None): if math.from_backend(value) and not math.is_trainable(value): self._value = value elif hasattr(value, "dtype"): @@ -115,7 +115,7 @@ def name(self) -> str: return self._name @property - def value(self) -> any: + def value(self) -> Any: r""" The value of this constant. """ @@ -147,11 +147,11 @@ class Variable: def __init__( self, - value: any, + value: Any, name: str, bounds: tuple[float | None, float | None] = (None, None), update_fn: Callable = update_euclidean, - dtype: any = None, + dtype: Any = None, ): self._value = self._get_value(value, bounds, name, dtype) self._name = name @@ -195,7 +195,7 @@ def update_fn(self, value): self._update_fn = value @property - def value(self) -> any: + def value(self) -> Any: r""" The value of this variable. """ @@ -207,7 +207,7 @@ def value(self, value): @staticmethod def orthogonal( - value: any | None, + value: Any | None, name: str, bounds: tuple[float | None, float | None] = (None, None), N: int = 1, @@ -231,7 +231,7 @@ def orthogonal( @staticmethod def symplectic( - value: any, + value: Any, name: str, bounds: tuple[float | None, float | None] = (None, None), N: int = 1, @@ -255,7 +255,7 @@ def symplectic( @staticmethod def unitary( - value: any, + value: Any, name: str, bounds: tuple[float | None, float | None] = (None, None), N: int = 1, diff --git a/mrmustard/physics/ansatze.py b/mrmustard/physics/ansatze.py index 7135a7e7c..9517979e5 100644 --- a/mrmustard/physics/ansatze.py +++ b/mrmustard/physics/ansatze.py @@ -20,7 +20,7 @@ import itertools from abc import ABC, abstractmethod -from typing import Callable, Sequence +from typing import Any, Callable, Sequence from warnings import warn import numpy as np @@ -66,7 +66,7 @@ def __init__(self) -> None: self._kwargs = {} @abstractmethod - def from_function(cls, fn: Callable, **kwargs: any) -> Ansatz: + def from_function(cls, fn: Callable, **kwargs: Any) -> Ansatz: r""" Returns an ansatz from a function and kwargs. """ @@ -99,7 +99,7 @@ def __sub__(self, other: Ansatz) -> Ansatz: raise TypeError(f"Cannot subtract {self.__class__} and {other.__class__}.") from e @abstractmethod - def __call__(self, point: any) -> Scalar: + def __call__(self, point: Any) -> Scalar: r""" Evaluates this ansatz at a given point in the domain. """ @@ -547,7 +547,7 @@ def c(self) -> Batch[ComplexTensor]: return self.array @classmethod - def from_function(cls, fn: Callable, **kwargs: any) -> PolyExpAnsatz: + def from_function(cls, fn: Callable, **kwargs: Any) -> PolyExpAnsatz: r""" Returns a PolyExpAnsatz object from a generator function. """ @@ -1017,7 +1017,7 @@ def num_vars(self) -> int: return len(self.array.shape) - 1 @classmethod - def from_function(cls, fn: Callable, **kwargs: any) -> ArrayAnsatz: + def from_function(cls, fn: Callable, **kwargs: Any) -> ArrayAnsatz: r""" Returns an ArrayAnsatz object from a generator function. """ @@ -1104,7 +1104,7 @@ def __and__(self, other: ArrayAnsatz) -> ArrayAnsatz: new_array = [math.outer(a, b) for a in self.array for b in other.array] return self.__class__(array=new_array) - def __call__(self, point: any) -> Scalar: + def __call__(self, point: Any) -> Scalar: r""" Evaluates this ansatz at a given point in the domain. """ diff --git a/mrmustard/physics/gaussian.py b/mrmustard/physics/gaussian.py index 976dde1b4..41eca0e4a 100644 --- a/mrmustard/physics/gaussian.py +++ b/mrmustard/physics/gaussian.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Sequence +from typing import Any, Sequence from mrmustard import math, settings from mrmustard.math.tensor_wrappers.xptensor import XPMatrix, XPVector @@ -730,7 +730,7 @@ def purity(cov: Matrix) -> Scalar: return 1 / math.sqrt(math.det((2 / settings.HBAR) * cov)) -def symplectic_eigenvals(cov: Matrix) -> any: +def symplectic_eigenvals(cov: Matrix) -> Any: r"""Returns the sympletic eigenspectrum of a covariance matrix. For a pure state, we expect the sympletic eigenvalues to be 1.