Skip to content
This repository was archived by the owner on Nov 7, 2024. It is now read-only.
Merged
Changes from all commits
Commits
Show all changes
63 commits
Select commit Hold shift + click to select a range
63cddc4
started implementing block-sparse tensors
mganahl Oct 22, 2019
2910b27
removed files
mganahl Oct 22, 2019
6dafdd7
Merge remote-tracking branch 'upstream/master' into block_sparse
mganahl Oct 24, 2019
46f1e10
working on AbelianIndex
mganahl Oct 25, 2019
9ba1d21
Merge remote-tracking branch 'upstream/master' into block_sparse
mganahl Nov 28, 2019
91f32a6
working in block sparisty
mganahl Nov 29, 2019
58feabc
added reshape
mganahl Nov 30, 2019
307f2dc
added Index, an index type for symmetric tensors
mganahl Nov 30, 2019
1ebbc7f
added small tutorial
mganahl Nov 30, 2019
1eb3d6f
added docstring
mganahl Nov 30, 2019
d25d8aa
fixed bug in retrieve_diagonal_blocks
mganahl Nov 30, 2019
ae8cda6
TODO added
mganahl Nov 30, 2019
bbac9c4
improved initialization a bit
mganahl Nov 30, 2019
db828c7
more efficient initialization
mganahl Dec 1, 2019
99204f7
just formatting
mganahl Dec 1, 2019
73a9628
added random
mganahl Dec 1, 2019
efa64a4
added fuse_degeneracies
mganahl Dec 1, 2019
7619162
fixed bug in reshape
mganahl Dec 1, 2019
2be30a9
dosctring, typing
mganahl Dec 1, 2019
742824f
removed TODO
mganahl Dec 1, 2019
2e6c395
removed confusing code line
mganahl Dec 1, 2019
ab13d4a
bug removed
mganahl Dec 1, 2019
d375b1d
comment
mganahl Dec 1, 2019
2727cd0
added __mul__ to Index
mganahl Dec 2, 2019
283e364
added sparse_shape
mganahl Dec 2, 2019
7328ad4
more in tutorial
mganahl Dec 2, 2019
e5b6147
comment
mganahl Dec 2, 2019
eb91c79
added new test function
mganahl Dec 2, 2019
a544dbc
testing function hacking
mganahl Dec 2, 2019
0457cca
docstring
mganahl Dec 2, 2019
95958a7
small speed up
mganahl Dec 3, 2019
ac3d980
Remove gui directory (migrated to another repo) (#399)
coryell Dec 3, 2019
5d2d2ba
a slightly more elegant code
mganahl Dec 7, 2019
04eadf3
use one more np function
mganahl Dec 7, 2019
2ea5674
removed some crazy slow code
mganahl Dec 7, 2019
5d8c86a
faster code
mganahl Dec 7, 2019
22a642e
Merge remote-tracking branch 'upstream/experimental_blocksparse' into…
mganahl Dec 7, 2019
4eae410
Update README.md (#404)
Dec 9, 2019
04c8573
add return_data
mganahl Dec 9, 2019
7c2d5a0
Merge remote-tracking branch 'upstream/experimental_blocksparse' into…
mganahl Dec 9, 2019
29bb154
Merge remote-tracking branch 'upstream/master' into block_sparse
mganahl Dec 9, 2019
33d1a40
doc
mganahl Dec 9, 2019
fb1978a
bug fix
mganahl Dec 9, 2019
5228f56
Merge remote-tracking branch 'upstream/experimental_blocksparse' into…
mganahl Dec 9, 2019
0d4a625
a little faster
mganahl Dec 11, 2019
82a4148
substantial speedup
mganahl Dec 11, 2019
7bd7be7
renaming
mganahl Dec 11, 2019
7a8c7df
Merge remote-tracking branch 'upstream/experimental_blocksparse' into…
mganahl Dec 11, 2019
d9c094b
removed todo
mganahl Dec 11, 2019
06c3f3c
some comments
mganahl Dec 11, 2019
426fd1a
comments
mganahl Dec 11, 2019
7f3e148
fixed some bug in reshape
mganahl Dec 11, 2019
19c3fe8
comments
mganahl Dec 11, 2019
5c8fd3e
default value changed
mganahl Dec 11, 2019
94c8c2c
fixed bug, old version is now faster again
mganahl Dec 12, 2019
7eec7f0
cleaned up reshape
mganahl Dec 12, 2019
c188ab9
started adding tests
mganahl Dec 12, 2019
d7ab7ab
Merge remote-tracking branch 'upstream/experimental_blocksparse' into…
mganahl Dec 12, 2019
d228f61
Merge remote-tracking branch 'upstream/experimental_blocksparse' into…
mganahl Dec 14, 2019
46aeec1
replace kron with broadcasting
mganahl Dec 14, 2019
c73a511
Merge remote-tracking branch 'upstream/experimental_blocksparse' into…
mganahl Dec 15, 2019
6844f2c
column-major -> row-major
mganahl Dec 15, 2019
4f4ba93
documentation
mganahl Dec 15, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 111 additions & 6 deletions tensornetwork/block_tensor/block_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def compute_num_nonzero(charges: List[np.ndarray],
#compute the degeneracies of `fused_charges` charges
fused_degeneracies = fuse_degeneracies(accumulated_degeneracies,
leg_degeneracies)
#compute the new degeneracies resulting of fusing the vectors of unique charges
#compute the new degeneracies resulting from fusing
#`accumulated_charges` and `leg_charge_2`
accumulated_charges = np.unique(fused_charges)
accumulated_degeneracies = []
Expand Down Expand Up @@ -107,6 +107,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
dict: Dictionary mapping a tuple of charges to a shape tuple.
Each element corresponds to a non-zero valued block of the tensor.
"""
#FIXME: this routine is slow
check_flows(flows)
degeneracies = []
unique_charges = []
Expand Down Expand Up @@ -189,6 +190,108 @@ def retrieve_non_zero_diagonal_blocks(
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))

# we only care about charges common to row and columns
mask = np.isin(row_charges, common_charges)
relevant_row_charges = row_charges[mask]

#some numpy magic to get the index locations of the blocks
#we generate a vector of `len(relevant_row_charges) which,
#for each charge `c` in `relevant_row_charges` holds the
#column-degeneracy of charge `c`
degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64)
#for each charge `c` in `common_charges` we generate a boolean mask
#for indexing the positions where `relevant_column_charges` has a value of `c`.
masks = {}
for c in common_charges:
mask = relevant_row_charges == c
masks[c] = mask
degeneracy_vector[mask] = column_degeneracies[-c]

# the result of the cumulative sum is a vector containing
# the stop positions of the non-zero values of each row
# within the data vector.
# E.g. for `relevant_row_charges` = [0,1,0,0,3], and
# column_degeneracies[0] = 10
# column_degeneracies[1] = 20
# column_degeneracies[3] = 30
# we have
# `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30]
# The starting positions of consecutive elements (in row-major order) in
# each row with charge `c=0` within the data vector are then simply obtained using
# masks[0] = [True, False, True, True, False]
# and `stop_positions[masks[0]] - column_degeneracies[0]`
stop_positions = np.cumsum(degeneracy_vector)
blocks = {}

for c in common_charges:
#numpy broadcasting is substantially faster than kron!
a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0)
b = np.expand_dims(np.arange(column_degeneracies[-c]), 1)
if not return_data:
blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
else:
blocks[c] = np.reshape(data[a + b],
(row_degeneracies[c], column_degeneracies[-c]))
return blocks


def retrieve_non_zero_diagonal_blocks_column_major(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
return_data: Optional[bool] = True) -> Dict:
"""
Deprecated

Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict, assuming column-major
ordering.
Args:
data: An np.ndarray of the data. The number of elements in `data`
has to match the number of non-zero elements defined by `charges`
and `flows`
charges: List of np.ndarray, one for each leg.
Each np.ndarray `charges[leg]` is of shape `(D[leg],)`.
The bond dimension `D[leg]` can vary on each leg.
flows: A list of integers, one for each leg,
with values `1` or `-1`, denoting the flow direction
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
return_data: If `True`, the return dictionary maps quantum numbers `q` to
actual `np.ndarray` with the data. This involves a copy of data.
If `False`, the returned dict maps quantum numbers of a list
[locations, shape], where `locations` is an np.ndarray of type np.int64
containing the locations of the tensor elements within A.data, i.e.
`A.data[locations]` contains the elements belonging to the tensor with
quantum numbers `(q,q). `shape` is the shape of the corresponding array.

Returns:
dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
or a python list of locations and shapes, depending on the value of `return_data`.
"""
if len(charges) != 2:
raise ValueError("input has to be a two-dimensional symmetric matrix")
check_flows(flows)
if len(flows) != len(charges):
raise ValueError("`len(flows)` is different from `len(charges)`")

#we multiply the flows into the charges
row_charges = flows[0] * charges[0] # a list of charges on each row
column_charges = flows[1] * charges[1] # a list of charges on each column

#get the unique charges
unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
unique_column_charges, column_dims = np.unique(
column_charges, return_counts=True)
#get the charges common to rows and columns (only those matter)
common_charges = np.intersect1d(
unique_row_charges, -unique_column_charges, assume_unique=True)

#convenience container for storing the degeneracies of each
#row and column charge
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))

# we only care about charges common to row and columns
mask = np.isin(column_charges, -common_charges)
relevant_column_charges = column_charges[mask]
Expand Down Expand Up @@ -240,6 +343,8 @@ def retrieve_non_zero_diagonal_blocks_deprecated(
flows: List[Union[bool, int]],
return_data: Optional[bool] = False) -> Dict:
"""
Deprecated

Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
This is a deprecated version which in general performs worse than the
Expand Down Expand Up @@ -298,14 +403,14 @@ def retrieve_non_zero_diagonal_blocks_deprecated(
#for each charge `c` in `relevant_column_charges` holds the
#row-degeneracy of charge `c`

degeneracy_vector = row_dims[column_locations]
degeneracy_vector = column_dims[row_locations]
stop_positions = np.cumsum(degeneracy_vector)
blocks = {}
for c in common_charges:
#numpy broadcasting is substantially faster than kron!
a = np.expand_dims(
stop_positions[column_charges == -c] - row_degeneracies[c], 0)
b = np.expand_dims(np.arange(row_degeneracies[c]), 1)
stop_positions[row_charges == c] - column_degeneracies[-c], 0)
b = np.expand_dims(np.arange(column_degeneracies[-c]), 1)
if not return_data:
blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
else:
Expand Down Expand Up @@ -344,6 +449,7 @@ class BlockSparseTensor:
The class design follows Glen's proposal (Design 0).
The class currently only supports a single U(1) symmetry
and only numpy.ndarray.

Attributes:
* self.data: A 1d np.ndarray storing the underlying
data of the tensor
Expand Down Expand Up @@ -448,8 +554,7 @@ def rank(self):
def sparse_shape(self) -> Tuple:
"""
The sparse shape of the tensor.
Returns a copy of self.indices. Note that copying
can be relatively expensive for deeply nested indices.
Returns a copy of self.indices.
Returns:
Tuple: A tuple of `Index` objects.
"""
Expand Down