diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index af786e561..abe71be77 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -17,17 +17,28 @@ from __future__ import print_function import numpy as np from tensornetwork.backends import backend_factory -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index +from tensornetwork.block_tensor.index import Index, fuse_index_pair from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, fuse_ndarray_charges, intersect import numpy as np import scipy as sp import itertools import time -from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable, Sequence +from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable, Sequence, Text Tensor = Any -def get_flat_order(indices, order): +def get_flat_order(indices: List[Index], + order: Union[List[int], np.ndarray]) -> np.ndarray: + """ + Compute the flat order of the + flattened `indices` corresponding to `order`. + Args: + indices: A list of `Index` objects. + order: An order. + Returns: + The flat order of the flat indices correspondint + to the `order` of `indices`. + """ flat_charges, _ = get_flat_meta_data(indices) flat_labels = np.arange(len(flat_charges)) cum_num_legs = np.append(0, np.cumsum([len(i.flat_charges) for i in indices])) @@ -38,6 +49,12 @@ def get_flat_order(indices, order): def get_flat_meta_data(indices): + """ + Return charges and flows of flattened `indices`. + Args: + indices: A list of `Index` objects. + + """ charges = [] flows = [] for i in indices: @@ -818,6 +835,29 @@ def flat_flows(self): flat.extend(i.flat_flows) return flat + def __matmul__(self, other): + + if self.rank != 2: + raise ValueError('__matmul__ only implemented for matrices') + + if other.rank != 2: + raise ValueError('__matmul__ only implemented for matrices') + return tensordot(self, other, ([1], [0])) + + def conj(self): + """ + Transpose the tensor in place into the new order `order`. + Args: + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ + indices = [ + Index(i.flat_charges, list(np.logical_not(i.flat_flows)), i.name) + for i in self.indices + ] + return BlockSparseTensor(np.conj(self.data), indices) + def transpose( self, order: Union[List[int], np.ndarray], @@ -839,7 +879,6 @@ def transpose( return BlockSparseTensor(self.data, self.indices) flat_charges, flat_flows = get_flat_meta_data(self.indices) flat_order = get_flat_order(self.indices, order) - print(flat_order) tr_partition = _find_best_partition( [len(flat_charges[n]) for n in flat_order]) @@ -938,8 +977,9 @@ def reshape(tensor: BlockSparseTensor, Reshape `tensor` into `shape`. `reshape` works essentially the same as the dense version, with the notable exception that the tensor can only be reshaped into a form - compatible with its elementary indices. The elementary indices are - the indices at the leaves of the `Index` objects `tensors.indices`. + compatible with its elementary shape. The elementary shape is + the shape determined by the flattened charges of all `Index` objects + in `tensors.indices`. For example, while the following reshaping is possible for regular dense numpy tensor, ``` @@ -948,14 +988,14 @@ def reshape(tensor: BlockSparseTensor, ``` the same code for BlockSparseTensor ``` - q1 = np.random.randint(0,10,6) - q2 = np.random.randint(0,10,6) - q3 = np.random.randint(0,10,6) - i1 = Index(charges=q1,flow=1) - i2 = Index(charges=q2,flow=-1) - i3 = Index(charges=q3,flow=1) + q1 = U1Charge(np.random.randint(0,10,6)) + q2 = U1Charge(np.random.randint(0,10,6)) + q3 = U1Charge(np.random.randint(0,10,6)) + i1 = Index(charges=q1,flow=False) + i2 = Index(charges=q2,flow=True) + i3 = Index(charges=q3,flow=False) A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(nA.shape) #prints (6,6,6) + print(A.shape) #prints (6,6,6) reshape(A, (2,3,6,6)) #raises ValueError ``` raises a `ValueError` since (2,3,6,6) @@ -975,8 +1015,8 @@ def reshape(tensor: BlockSparseTensor, def transpose(tensor: BlockSparseTensor, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ - Transpose `tensor` into the new order `order`. This routine currently shuffles - data. + Transpose `tensor` into the new order `order`. + This routine currently shuffles data. Args: tensor: The tensor to be transposed. order: The new order of indices. @@ -1207,7 +1247,8 @@ def tensordot( def svd(matrix: BlockSparseTensor, full_matrices: Optional[bool] = True, compute_uv: Optional[bool] = True, - hermitian: Optional[bool] = False): + hermitian: Optional[bool] = False + ) -> Tuple[BlockSparseTensor, BlockSparseTensor, BlockSparseTensor]: """ Compute the singular value decomposition of `matrix`. The matrix if factorized into `u * s * vh`, with @@ -1220,10 +1261,14 @@ def svd(matrix: BlockSparseTensor, and `v.shape[0]=s.shape[1]` compute_yv: If `True`, return `u` and `v`. hermitian: If `True`, assume hermiticity of `matrix`. + Returns: + If `compute_uv` is `True`: Three BlockSparseTensors `U,S,V`. + If `compute_uv` is `False`: A BlockSparseTensors `S` containing the + singular values. """ if matrix.rank != 2: - raise NotImplementedError("SVD currently supports only rank-2 tensors.") + raise NotImplementedError("svd currently supports only rank-2 tensors.") flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges flat_flows = matrix.flat_flows @@ -1296,3 +1341,186 @@ def svd(matrix: BlockSparseTensor, np.concatenate([np.ravel(v) for v in v_blocks]), indices_v) return S + + +def qr(matrix: BlockSparseTensor, mode: Optional[Text] = 'reduced' + ) -> [BlockSparseTensor, BlockSparseTensor]: + """ + Compute the qr decomposition of an `M` by `N` matrix `matrix`. + The matrix is factorized into `q*r`, with + `q` an orthogonal matrix and `r` an upper triangular matrix. + Args: + matrix: A matrix (i.e. a rank-2 tensor) of type `BlockSparseTensor` + mode : Can take values {'reduced', 'complete', 'r', 'raw'}. + If K = min(M, N), then + + * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) + * 'complete' : returns q, r with dimensions (M, M), (M, N) + * 'r' : returns r only with dimensions (K, N) + + Returns: + (BlockSparseTensor,BlockSparseTensor): If mode = `reduced` or `complete` + BlockSparseTensor: If mode = `r`. + """ + if mode == 'raw': + raise NotImplementedError('mode `raw` currenntly not supported') + if matrix.rank != 2: + raise NotImplementedError("qr currently supports only rank-2 tensors.") + + flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges + flat_flows = matrix.flat_flows + partition = len(matrix.indices[0].flat_charges) + blocks, charges, shapes = _find_diagonal_sparse_blocks( + flat_charges, flat_flows, partition) + + q_blocks = [] + r_blocks = [] + for n in range(len(blocks)): + out = np.linalg.qr(np.reshape(matrix.data[blocks[n]], shapes[:, n]), mode) + if mode in ('reduced', 'complete'): + q_blocks.append(out[0]) + r_blocks.append(out[1]) + elif mode == 'r': + r_blocks.append(out) + else: + raise ValueError('unknown value {} for input `mode`'.format(mode)) + + left_r_charge = charges.__new__(type(charges)) + left_r_charge_labels = np.concatenate([ + np.full(r_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(r_blocks)) + ]) + + left_r_charge.__init__(charges.unique_charges, left_r_charge_labels, + charges.charge_types) + indices_r = [Index(left_r_charge, False), matrix.indices[1]] + + R = BlockSparseTensor( + np.concatenate([np.ravel(r) for r in r_blocks]), indices_r) + if mode in ('reduced', 'complete'): + right_q_charge = charges.__new__(type(charges)) + right_q_charge_labels = np.concatenate([ + np.full(q_blocks[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(q_blocks)) + ]) + right_q_charge.__init__(charges.unique_charges, right_q_charge_labels, + charges.charge_types) + + indices_q = [Index(right_q_charge, True), matrix.indices[0]] + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(q.T) for q in q_blocks]), indices_q).transpose( + (1, 0)), R + + return R + + +def eigh(matrix: BlockSparseTensor, + UPLO: Optional[Text] = 'L') -> [BlockSparseTensor, BlockSparseTensor]: + """ + Compute the eigen decomposition of a hermitian `M` by `M` matrix `matrix`. + Args: + matrix: A matrix (i.e. a rank-2 tensor) of type `BlockSparseTensor` + + Returns: + (BlockSparseTensor,BlockSparseTensor): The eigenvalues and eigenvectors + + """ + if matrix.rank != 2: + raise NotImplementedError("qr currently supports only rank-2 tensors.") + + flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges + flat_flows = matrix.flat_flows + partition = len(matrix.indices[0].flat_charges) + blocks, charges, shapes = _find_diagonal_sparse_blocks( + flat_charges, flat_flows, partition) + + eigvals = [] + v_blocks = [] + for n in range(len(blocks)): + e, v = np.linalg.eigh( + np.reshape(matrix.data[blocks[n]], shapes[:, n]), UPLO) + eigvals.append(np.diag(e)) + v_blocks.append(v) + + left_v_charge = charges.__new__(type(charges)) + left_v_charge_labels = np.concatenate([ + np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(v_blocks)) + ]) + + left_v_charge.__init__(charges.unique_charges, left_v_charge_labels, + charges.charge_types) + indices_v = [Index(left_v_charge, False), matrix.indices[1]] + + V = BlockSparseTensor( + np.concatenate([np.ravel(v) for v in v_blocks]), indices_v) + eigvalscharge = charges.__new__(type(charges)) + eigvalscharge_labels = np.concatenate([ + np.full(eigvals[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(eigvals)) + ]) + eigvalscharge.__init__(charges.unique_charges, eigvalscharge_labels, + charges.charge_types) + + indices_q = [Index(eigvalscharge, True), matrix.indices[0]] + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(q.T) for q in eigvals]), indices_q).transpose( + (1, 0)), V + + +def eig(matrix: BlockSparseTensor) -> [BlockSparseTensor, BlockSparseTensor]: + """ + Compute the eigen decomposition of an `M` by `M` matrix `matrix`. + Args: + matrix: A matrix (i.e. a rank-2 tensor) of type `BlockSparseTensor` + + Returns: + (BlockSparseTensor,BlockSparseTensor): The eigenvalues and eigenvectors + + """ + if matrix.rank != 2: + raise NotImplementedError("qr currently supports only rank-2 tensors.") + + flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges + flat_flows = matrix.flat_flows + partition = len(matrix.indices[0].flat_charges) + blocks, charges, shapes = _find_diagonal_sparse_blocks( + flat_charges, flat_flows, partition) + + eigvals = [] + v_blocks = [] + for n in range(len(blocks)): + e, v = np.linalg.eig(np.reshape(matrix.data[blocks[n]], shapes[:, n])) + eigvals.append(np.diag(e)) + v_blocks.append(v) + + left_v_charge = charges.__new__(type(charges)) + left_v_charge_labels = np.concatenate([ + np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(v_blocks)) + ]) + + left_v_charge.__init__(charges.unique_charges, left_v_charge_labels, + charges.charge_types) + indices_v = [Index(left_v_charge, False), matrix.indices[1]] + + V = BlockSparseTensor( + np.concatenate([np.ravel(v) for v in v_blocks]), indices_v) + eigvalscharge = charges.__new__(type(charges)) + eigvalscharge_labels = np.concatenate([ + np.full(eigvals[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(eigvals)) + ]) + eigvalscharge.__init__(charges.unique_charges, eigvalscharge_labels, + charges.charge_types) + + indices_q = [Index(eigvalscharge, True), matrix.indices[0]] + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(q.T) for q in eigvals]), indices_q).transpose( + (1, 0)), V diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index adc77fc6a..3baf0cd8d 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -3,12 +3,13 @@ from tensornetwork.block_tensor.charge import U1Charge, fuse_charges from tensornetwork.block_tensor.index import Index -from tensornetwork.block_tensor.block_tensor import compute_num_nonzero, reduce_charges, BlockSparseTensor, fuse_ndarrays, tensordot +from tensornetwork.block_tensor.block_tensor import compute_num_nonzero, reduce_charges, BlockSparseTensor, fuse_ndarrays, tensordot, svd, qr np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] +np_tensordot_dtypes = [np.float16, np.float64, np.complex128] -def get_contractable_tensors(R1, R2, cont): +def get_contractable_tensors(R1, R2, cont, dtype): DsA = np.random.randint(5, 10, R1) DsB = np.random.randint(5, 10, R2) assert R1 >= cont @@ -49,8 +50,8 @@ def get_contractable_tensors(R1, R2, cont): for n in sorted(compB): indices_final.append(indicesB[n]) shapes = tuple([i.dim for i in indices_final]) - A = BlockSparseTensor.random(indices=indicesA) - B = BlockSparseTensor.random(indices=indicesB) + A = BlockSparseTensor.random(indices=indicesA, dtype=dtype) + B = BlockSparseTensor.random(indices=indicesB, dtype=dtype) return A, B, indsA, indsB @@ -133,9 +134,10 @@ def test_reshape_transpose(): np.testing.assert_allclose(dense, B.todense()) +@pytest.mark.parametrize("dtype", np_tensordot_dtypes) @pytest.mark.parametrize("R1, R2, cont", [(4, 4, 2), (4, 3, 3), (3, 4, 3)]) -def test_tensordot(R1, R2, cont): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont) +def test_tensordot(R1, R2, cont, dtype): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont, dtype) res = tensordot(A, B, (indsA, indsB)) dense_res = np.tensordot(A.todense(), B.todense(), (indsA, indsB)) np.testing.assert_allclose(dense_res, res.todense()) @@ -168,9 +170,10 @@ def test_tensordot_reshape(): np.testing.assert_allclose(dense, res.todense()) +@pytest.mark.parametrize("dtype", np_tensordot_dtypes) @pytest.mark.parametrize("R1, R2, cont", [(4, 4, 2), (4, 3, 3), (3, 4, 3)]) -def test_tensordot_final_order(R1, R2, cont): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont) +def test_tensordot_final_order(R1, R2, cont, dtype): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont, dtype) final_order = np.arange(R1 + R2 - 2 * cont) np.random.shuffle(final_order) res = tensordot(A, B, (indsA, indsB), final_order=final_order) @@ -179,18 +182,63 @@ def test_tensordot_final_order(R1, R2, cont): np.testing.assert_allclose(dense_res, res.todense()) +@pytest.mark.parametrize("dtype", np_dtypes) @pytest.mark.parametrize("R1, R2", [(2, 2), (3, 3), (4, 4), (1, 1)]) -def test_tensordot_inner(R1, R2): +def test_tensordot_inner(R1, R2, dtype): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0) + A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0, dtype) res = tensordot(A, B, (indsA, indsB)) dense_res = np.tensordot(A.todense(), B.todense(), (indsA, indsB)) np.testing.assert_allclose(dense_res, res.todense()) +@pytest.mark.parametrize("dtype", np_dtypes) @pytest.mark.parametrize("R1, R2", [(2, 2), (2, 1), (1, 2), (1, 1)]) -def test_tensordot_outer(R1, R2): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0) +def test_tensordot_outer(R1, R2, dtype): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0, dtype) res = tensordot(A, B, axes=0) dense_res = np.tensordot(A.todense(), B.todense(), axes=0) np.testing.assert_allclose(dense_res, res.todense()) + + +@pytest.mark.parametrize("dtype", np_dtypes) +@pytest.mark.parametrize("R, R1, R2", [(2, 1, 1), (3, 2, 1), (3, 1, 2)]) +def test_svd_prod(dtype, R, R1, R2): + D = 30 + charges = [U1Charge.random(-5, 5, D) for n in range(R)] + flows = [True] * R + A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)]) + A = A.reshape([D**R1, D**R2]) + U, S, V = svd(A, full_matrices=False) + A_ = U @ S @ V + np.testing.assert_allclose(A.data, A_.data) + + +@pytest.mark.parametrize("dtype", np_dtypes) +@pytest.mark.parametrize("R, R1, R2", [(2, 1, 1), (3, 2, 1), (3, 1, 2)]) +def test_svd_singvals(dtype, R, R1, R2): + D = 30 + charges = [U1Charge.random(-5, 5, D) for n in range(R)] + flows = [True] * R + A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)]) + A = A.reshape([D**R1, D**R2]) + U1, S1, V1 = svd(A, full_matrices=False) + S2 = svd(A, full_matrices=False, compute_uv=False) + np.testing.assert_allclose(S1.data, S2.data) + Sdense = np.linalg.svd(A.todense(), compute_uv=False) + np.testing.assert_allclose( + np.sort(Sdense[Sdense > 1E-15]), np.sort(S2.data[S2.data > 0.0])) + + +@pytest.mark.parametrize("mode", ['complete', 'reduced']) +@pytest.mark.parametrize("dtype", np_dtypes) +@pytest.mark.parametrize("R, R1, R2", [(2, 1, 1), (3, 2, 1), (3, 1, 2)]) +def test_qr_prod(dtype, R, R1, R2, mode): + D = 30 + charges = [U1Charge.random(-5, 5, D) for n in range(R)] + flows = [True] * R + A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)]) + A = A.reshape([D**R1, D**R2]) + Q, R = qr(A, mode=mode) + A_ = Q @ R + np.testing.assert_allclose(A.data, A_.data) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 209ef477d..422e5d0c0 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -254,17 +254,3 @@ def fuse_indices(indices: List[Index], flow: Optional[int] = False) -> Index: for n in range(1, len(indices)): index = fuse_index_pair(index, indices[n], flow=flow) return index - - -def split_index(index: Index) -> Tuple[Index, Index]: - """ - Split an index (leg) of a symmetric tensor into two legs. - Args: - index: A tensor Index. - Returns: - Tuple[Index, Index]: The result of splitting `index`. - """ - if index.is_leave: - raise ValueError("cannot split an elementary index") - - return index.left_child, index.right_child diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 438984952..2d0bf846a 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,6 +1,6 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_indices +from tensornetwork.block_tensor.index import Index, fuse_index_pair, fuse_indices from tensornetwork.block_tensor.charge import U1Charge, BaseCharge @@ -17,8 +17,6 @@ def test_index_fusion_mul(): i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 i12 = i1 * i2 - assert i12.left_child is i1 - assert i12.right_child is i2 for n in range(len(i12.charges.charges)): assert np.all(i12.charges.charges == (q1 + q2).charges) @@ -35,96 +33,10 @@ def test_fuse_indices(): i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 i12 = fuse_indices([i1, i2]) - assert i12.left_child is i1 - assert i12.right_child is i2 for n in range(len(i12.charges.charges)): assert np.all(i12.charges.charges == (q1 + q2).charges) -def test_split_index(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=False, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 - - i12 = i1 * i2 - i1_, i2_ = split_index(i12) - assert i1 is i1_ - assert i2 is i2_ - np.testing.assert_allclose(q1.charges, i1.charges.charges) - np.testing.assert_allclose(q2.charges, i2.charges.charges) - np.testing.assert_allclose(q1.charges, i1_.charges.charges) - np.testing.assert_allclose(q2.charges, i2_.charges.charges) - assert i1_.name == 'index1' - assert i2_.name == 'index2' - assert i1_.flow == i1.flow - assert i2_.flow == i2.flow - - -def test_elementary_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - q3 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - q4 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - i1 = Index(charges=q1, flow=False, name='index1') - i2 = Index(charges=q2, flow=False, name='index2') - i3 = Index(charges=q3, flow=False, name='index3') - i4 = Index(charges=q4, flow=False, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - elmt12 = i12.get_elementary_indices() - assert elmt12[0] is i1 - assert elmt12[1] is i2 - - i1234 = i12 * i34 - elmt1234 = i1234.get_elementary_indices() - assert elmt1234[0] is i1 - assert elmt1234[1] is i2 - assert elmt1234[2] is i3 - assert elmt1234[3] is i4 - assert elmt1234[0].name == 'index1' - assert elmt1234[1].name == 'index2' - assert elmt1234[2].name == 'index3' - assert elmt1234[3].name == 'index4' - assert elmt1234[0].flow == i1.flow - assert elmt1234[1].flow == i2.flow - assert elmt1234[2].flow == i3.flow - assert elmt1234[3].flow == i4.flow - - np.testing.assert_allclose(q1.charges, i1.charges.charges) - np.testing.assert_allclose(q2.charges, i2.charges.charges) - np.testing.assert_allclose(q3.charges, i3.charges.charges) - np.testing.assert_allclose(q4.charges, i4.charges.charges) - - -def test_leave(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=False, name='index1') - i2 = Index(charges=q2, flow=False, name='index2') - assert i1.is_leave - assert i2.is_leave - - i12 = i1 * i2 - assert not i12.is_leave - - def test_copy(): D = 10 B = 4 @@ -144,8 +56,8 @@ def test_copy(): i1234 = i12 * i34 i1234_copy = i1234.copy() - elmt1234 = i1234_copy.get_elementary_indices() - assert elmt1234[0] is not i1 - assert elmt1234[1] is not i2 - assert elmt1234[2] is not i3 - assert elmt1234[3] is not i4 + flat1234 = i1234_copy.flat_charges + assert flat1234[0] is not i1.flat_charges[0] + assert flat1234[1] is not i2.flat_charges[0] + assert flat1234[2] is not i3.flat_charges[0] + assert flat1234[3] is not i4.flat_charges[0]