From 63cddc41db2b1ff45a984515dd1a299503c5f89a Mon Sep 17 00:00:00 2001
From: Martin
Date: Tue, 22 Oct 2019 13:11:38 -0400
Subject: [PATCH 01/60] started implementing block-sparse tensors
---
tensornetwork/block_tensor/#block_tensor.py# | 131 +++++++++++++++++++
tensornetwork/block_tensor/.#block_tensor.py | 1 +
tensornetwork/block_tensor/block_tensor.py | 130 ++++++++++++++++++
tensornetwork/block_tensor/block_tensor.py~ | 95 ++++++++++++++
4 files changed, 357 insertions(+)
create mode 100644 tensornetwork/block_tensor/#block_tensor.py#
create mode 120000 tensornetwork/block_tensor/.#block_tensor.py
create mode 100644 tensornetwork/block_tensor/block_tensor.py
create mode 100644 tensornetwork/block_tensor/block_tensor.py~
diff --git a/tensornetwork/block_tensor/#block_tensor.py# b/tensornetwork/block_tensor/#block_tensor.py#
new file mode 100644
index 000000000..64356f38e
--- /dev/null
+++ b/tensornetwork/block_tensor/#block_tensor.py#
@@ -0,0 +1,131 @@
+import collections
+import numpy as np
+import operator
+import warnings
+import os
+import sys
+#import qutilities as qutils
+#import utils as cutils
+import functools as fct
+import copy
+
+
+class AbelianIndex:
+ """
+ An index object for creation of abelian, block-sparse tensors
+ `AbelianIndex` is a storage class for storing abelian quantum numbers
+ of a tensor index. `AbelianIndex` is a wrapper for a python `dict`
+ mapping quantum numbers to integers (the dimension of the block)
+
+ """
+
+ @classmethod
+ def fromlist(cls, quantumnumbers, dimensions, flow, label=None):
+ if all(map(np.isscalar, quantumnumbers)):
+ QNs = list(quantumnumbers)
+ elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))):
+ QNs = list(map(np.asarray, quantumnumbers))
+ else:
+ raise TypeError(
+ "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
+ )
+ return cls(QNs, dimensions, flow, label)
+
+ @classmethod
+ def fromdict(cls, dictionary, flow, label=None):
+ if all(map(np.isscalar, dictionary.keys())):
+ QNs = list(dictionary.keys())
+ elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))):
+ QNs = list(map(np.asarray, dictionary.keys()))
+ else:
+ raise TypeError(
+ "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
+ )
+
+ return cls(QNs, list(dictionary.values()), flow, label)
+
+ def __init__(self, quantumnumbers, dimensions, flow, label=None):
+ if __debug__:
+ if len(quantumnumbers) != len(dimensions):
+ raise ValueError(
+ "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)")
+
+ try:
+ unique = dict(zip(quantumnumbers, dimensions))
+ except TypeError:
+ unique = dict(zip(map(tuple, quantumnumbers), dimensions))
+
+ if __debug__:
+ if len(unique) != len(quantumnumbers):
+ warnings.warn(
+ "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed"
+ )
+
+ if __debug__:
+ try:
+ mask = np.asarray(list(map(len, unique.keys()))) == len(
+ list(unique.keys())[0])
+ if not all(mask):
+ raise ValueError(
+ "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length"
+ .format(list(map(len, unique.keys()))))
+ except TypeError:
+ if not all(list(map(np.isscalar, unique.keys()))):
+ raise TypeError(
+ "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables"
+ )
+ self._data = np.array(
+ list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object)
+
+ self._flow = flow
+ self.label = label
+
+ def __getitem__(self, n):
+ return self._data[n[0], n[1]]
+
+ def Q(self, n):
+ return self._data[n, 0]
+
+ def D(self, n):
+ return self._data[n, 1]
+
+ def __len__(self):
+ return self._data.shape[0]
+
+ def setflow(self, val):
+ if val == 0:
+ raise ValueError(
+ "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
+ )
+ self._flow = np.sign(val)
+ return self
+
+ def rename(self, label):
+ self.label = label
+ return self
+
+ @property
+ def flow(self):
+ return self._flow
+
+ @flow.setter
+ def flow(self, val):
+ if val == 0:
+ raise ValueError(
+ "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
+ )
+ self._flow = np.sign(val)
+
+ @property
+ def shape(self):
+ return self._data.shape
+
+ @property
+ def DataFrame(self):
+ return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D'])
+
+ def __str__(self):
+ print('')
+ print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow))
+ print(self.DataFrame)
+ return ''
diff --git a/tensornetwork/block_tensor/.#block_tensor.py b/tensornetwork/block_tensor/.#block_tensor.py
new file mode 120000
index 000000000..be400a111
--- /dev/null
+++ b/tensornetwork/block_tensor/.#block_tensor.py
@@ -0,0 +1 @@
+martin@Mister-Pickle.local.14868
\ No newline at end of file
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
new file mode 100644
index 000000000..b070c1ec4
--- /dev/null
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -0,0 +1,130 @@
+import collections
+import numpy as np
+import operator
+import warnings
+import os
+import sys
+#import qutilities as qutils
+#import utils as cutils
+import functools as fct
+import copy
+
+
+class AbelianIndex:
+ """
+ An index object for creation of abelian, block-sparse tensors
+ `AbelianIndex` is a storage class for storing abelian quantum numbers
+ of a tensor index. `AbelianIndex` is a wrapper for a python `dict`
+ mapping quantum numbers to integers (the dimension of the block)
+ """
+
+ @classmethod
+ def fromlist(cls, quantumnumbers, dimensions, flow, label=None):
+ if all(map(np.isscalar, quantumnumbers)):
+ QNs = list(quantumnumbers)
+ elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))):
+ QNs = list(map(np.asarray, quantumnumbers))
+ else:
+ raise TypeError(
+ "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
+ )
+ return cls(QNs, dimensions, flow, label)
+
+ @classmethod
+ def fromdict(cls, dictionary, flow, label=None):
+ if all(map(np.isscalar, dictionary.keys())):
+ QNs = list(dictionary.keys())
+ elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))):
+ QNs = list(map(np.asarray, dictionary.keys()))
+ else:
+ raise TypeError(
+ "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
+ )
+
+ return cls(QNs, list(dictionary.values()), flow, label)
+
+ def __init__(self, quantumnumbers, dimensions, flow, label=None):
+ if __debug__:
+ if len(quantumnumbers) != len(dimensions):
+ raise ValueError(
+ "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)")
+
+ try:
+ unique = dict(zip(quantumnumbers, dimensions))
+ except TypeError:
+ unique = dict(zip(map(tuple, quantumnumbers), dimensions))
+
+ if __debug__:
+ if len(unique) != len(quantumnumbers):
+ warnings.warn(
+ "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed"
+ )
+
+ if __debug__:
+ try:
+ mask = np.asarray(list(map(len, unique.keys()))) == len(
+ list(unique.keys())[0])
+ if not all(mask):
+ raise ValueError(
+ "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length"
+ .format(list(map(len, unique.keys()))))
+ except TypeError:
+ if not all(list(map(np.isscalar, unique.keys()))):
+ raise TypeError(
+ "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables"
+ )
+ self._data = np.array(
+ list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object)
+
+ self._flow = flow
+ self.label = label
+
+ def __getitem__(self, n):
+ return self._data[n[0], n[1]]
+
+ def Q(self, n):
+ return self._data[n, 0]
+
+ def D(self, n):
+ return self._data[n, 1]
+
+ def __len__(self):
+ return self._data.shape[0]
+
+ def setflow(self, val):
+ if val == 0:
+ raise ValueError(
+ "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
+ )
+ self._flow = np.sign(val)
+ return self
+
+ def rename(self, label):
+ self.label = label
+ return self
+
+ @property
+ def flow(self):
+ return self._flow
+
+ @flow.setter
+ def flow(self, val):
+ if val == 0:
+ raise ValueError(
+ "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
+ )
+ self._flow = np.sign(val)
+
+ @property
+ def shape(self):
+ return self._data.shape
+
+ @property
+ def DataFrame(self):
+ return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D'])
+
+ def __str__(self):
+ print('')
+ print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow))
+ print(self.DataFrame)
+ return ''
diff --git a/tensornetwork/block_tensor/block_tensor.py~ b/tensornetwork/block_tensor/block_tensor.py~
new file mode 100644
index 000000000..90e848755
--- /dev/null
+++ b/tensornetwork/block_tensor/block_tensor.py~
@@ -0,0 +1,95 @@
+class TensorIndex(object):
+ @classmethod
+ def fromlist(cls,quantumnumbers,dimensions,flow,label=None):
+ if all(map(np.isscalar,quantumnumbers)):
+ QNs=list(quantumnumbers)
+ elif all(list(map(lambda x: not np.isscalar(x),quantumnumbers))):
+ QNs=list(map(np.asarray,quantumnumbers))
+ else:
+ raise TypeError("TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types")
+ return cls(QNs,dimensions,flow,label)
+
+ @classmethod
+ def fromdict(cls,dictionary,flow,label=None):
+ if all(map(np.isscalar,dictionary.keys())):
+ QNs=list(dictionary.keys())
+ elif all(list(map(lambda x: not np.isscalar(x),dictionary.keys()))):
+ QNs=list(map(np.asarray,dictionary.keys()))
+ else:
+ raise TypeError("TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types")
+
+ return cls(QNs,list(dictionary.values()),flow,label)
+
+ def __init__(self,quantumnumbers,dimensions,flow,label=None):
+ if __debug__:
+ if len(quantumnumbers)!=len(dimensions):
+ raise ValueError("TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)")
+
+ try:
+ unique=dict(zip(quantumnumbers,dimensions))
+ except TypeError:
+ unique=dict(zip(map(tuple,quantumnumbers),dimensions))
+
+
+ if __debug__:
+ if len(unique)!=len(quantumnumbers):
+ warnings.warn("in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed")
+
+ if __debug__:
+ try:
+ mask=np.asarray(list(map(len,unique.keys())))==len(list(unique.keys())[0])
+ if not all(mask):
+ raise ValueError("in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length".format(list(map(len,unique.keys()))))
+ except TypeError:
+ if not all(list(map(np.isscalar,unique.keys()))):
+ raise TypeError("in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables")
+ self._data=np.array(list(zip(map(np.asarray,unique.keys()),dimensions)),dtype=object)
+
+ self._flow=flow
+ self.label=label
+
+ def __getitem__(self,n):
+ return self._data[n[0],n[1]]
+
+ def Q(self,n):
+ return self._data[n,0]
+
+ def D(self,n):
+ return self._data[n,1]
+
+ def __len__(self):
+ return self._data.shape[0]
+
+ def setflow(self,val):
+ if val==0:
+ raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only")
+ self._flow=np.sign(val)
+ return self
+
+ def rename(self,label):
+ self.label=label
+ return self
+
+ @property
+ def flow(self):
+ return self._flow
+
+ @flow.setter
+ def flow(self,val):
+ if val==0:
+ raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only")
+ self._flow=np.sign(val)
+
+ @property
+ def shape(self):
+ return self._data.shape
+
+ @property
+ def DataFrame(self):
+ return pd.DataFrame.from_records(data=self._data,columns=['qn','D'])
+
+ def __str__(self):
+ print('')
+ print('TensorIndex, label={0}, flow={1}'.format(self.label,self.flow))
+ print(self.DataFrame)
+ return ''
From 2910b27316180b720445693676f5b62067e75388 Mon Sep 17 00:00:00 2001
From: Martin
Date: Tue, 22 Oct 2019 13:12:13 -0400
Subject: [PATCH 02/60] removed files
---
tensornetwork/block_tensor/#block_tensor.py# | 131 -------------------
tensornetwork/block_tensor/block_tensor.py~ | 95 --------------
2 files changed, 226 deletions(-)
delete mode 100644 tensornetwork/block_tensor/#block_tensor.py#
delete mode 100644 tensornetwork/block_tensor/block_tensor.py~
diff --git a/tensornetwork/block_tensor/#block_tensor.py# b/tensornetwork/block_tensor/#block_tensor.py#
deleted file mode 100644
index 64356f38e..000000000
--- a/tensornetwork/block_tensor/#block_tensor.py#
+++ /dev/null
@@ -1,131 +0,0 @@
-import collections
-import numpy as np
-import operator
-import warnings
-import os
-import sys
-#import qutilities as qutils
-#import utils as cutils
-import functools as fct
-import copy
-
-
-class AbelianIndex:
- """
- An index object for creation of abelian, block-sparse tensors
- `AbelianIndex` is a storage class for storing abelian quantum numbers
- of a tensor index. `AbelianIndex` is a wrapper for a python `dict`
- mapping quantum numbers to integers (the dimension of the block)
-
- """
-
- @classmethod
- def fromlist(cls, quantumnumbers, dimensions, flow, label=None):
- if all(map(np.isscalar, quantumnumbers)):
- QNs = list(quantumnumbers)
- elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))):
- QNs = list(map(np.asarray, quantumnumbers))
- else:
- raise TypeError(
- "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
- )
- return cls(QNs, dimensions, flow, label)
-
- @classmethod
- def fromdict(cls, dictionary, flow, label=None):
- if all(map(np.isscalar, dictionary.keys())):
- QNs = list(dictionary.keys())
- elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))):
- QNs = list(map(np.asarray, dictionary.keys()))
- else:
- raise TypeError(
- "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
- )
-
- return cls(QNs, list(dictionary.values()), flow, label)
-
- def __init__(self, quantumnumbers, dimensions, flow, label=None):
- if __debug__:
- if len(quantumnumbers) != len(dimensions):
- raise ValueError(
- "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)")
-
- try:
- unique = dict(zip(quantumnumbers, dimensions))
- except TypeError:
- unique = dict(zip(map(tuple, quantumnumbers), dimensions))
-
- if __debug__:
- if len(unique) != len(quantumnumbers):
- warnings.warn(
- "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed"
- )
-
- if __debug__:
- try:
- mask = np.asarray(list(map(len, unique.keys()))) == len(
- list(unique.keys())[0])
- if not all(mask):
- raise ValueError(
- "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length"
- .format(list(map(len, unique.keys()))))
- except TypeError:
- if not all(list(map(np.isscalar, unique.keys()))):
- raise TypeError(
- "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables"
- )
- self._data = np.array(
- list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object)
-
- self._flow = flow
- self.label = label
-
- def __getitem__(self, n):
- return self._data[n[0], n[1]]
-
- def Q(self, n):
- return self._data[n, 0]
-
- def D(self, n):
- return self._data[n, 1]
-
- def __len__(self):
- return self._data.shape[0]
-
- def setflow(self, val):
- if val == 0:
- raise ValueError(
- "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
- )
- self._flow = np.sign(val)
- return self
-
- def rename(self, label):
- self.label = label
- return self
-
- @property
- def flow(self):
- return self._flow
-
- @flow.setter
- def flow(self, val):
- if val == 0:
- raise ValueError(
- "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
- )
- self._flow = np.sign(val)
-
- @property
- def shape(self):
- return self._data.shape
-
- @property
- def DataFrame(self):
- return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D'])
-
- def __str__(self):
- print('')
- print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow))
- print(self.DataFrame)
- return ''
diff --git a/tensornetwork/block_tensor/block_tensor.py~ b/tensornetwork/block_tensor/block_tensor.py~
deleted file mode 100644
index 90e848755..000000000
--- a/tensornetwork/block_tensor/block_tensor.py~
+++ /dev/null
@@ -1,95 +0,0 @@
-class TensorIndex(object):
- @classmethod
- def fromlist(cls,quantumnumbers,dimensions,flow,label=None):
- if all(map(np.isscalar,quantumnumbers)):
- QNs=list(quantumnumbers)
- elif all(list(map(lambda x: not np.isscalar(x),quantumnumbers))):
- QNs=list(map(np.asarray,quantumnumbers))
- else:
- raise TypeError("TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types")
- return cls(QNs,dimensions,flow,label)
-
- @classmethod
- def fromdict(cls,dictionary,flow,label=None):
- if all(map(np.isscalar,dictionary.keys())):
- QNs=list(dictionary.keys())
- elif all(list(map(lambda x: not np.isscalar(x),dictionary.keys()))):
- QNs=list(map(np.asarray,dictionary.keys()))
- else:
- raise TypeError("TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types")
-
- return cls(QNs,list(dictionary.values()),flow,label)
-
- def __init__(self,quantumnumbers,dimensions,flow,label=None):
- if __debug__:
- if len(quantumnumbers)!=len(dimensions):
- raise ValueError("TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)")
-
- try:
- unique=dict(zip(quantumnumbers,dimensions))
- except TypeError:
- unique=dict(zip(map(tuple,quantumnumbers),dimensions))
-
-
- if __debug__:
- if len(unique)!=len(quantumnumbers):
- warnings.warn("in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed")
-
- if __debug__:
- try:
- mask=np.asarray(list(map(len,unique.keys())))==len(list(unique.keys())[0])
- if not all(mask):
- raise ValueError("in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length".format(list(map(len,unique.keys()))))
- except TypeError:
- if not all(list(map(np.isscalar,unique.keys()))):
- raise TypeError("in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables")
- self._data=np.array(list(zip(map(np.asarray,unique.keys()),dimensions)),dtype=object)
-
- self._flow=flow
- self.label=label
-
- def __getitem__(self,n):
- return self._data[n[0],n[1]]
-
- def Q(self,n):
- return self._data[n,0]
-
- def D(self,n):
- return self._data[n,1]
-
- def __len__(self):
- return self._data.shape[0]
-
- def setflow(self,val):
- if val==0:
- raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only")
- self._flow=np.sign(val)
- return self
-
- def rename(self,label):
- self.label=label
- return self
-
- @property
- def flow(self):
- return self._flow
-
- @flow.setter
- def flow(self,val):
- if val==0:
- raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only")
- self._flow=np.sign(val)
-
- @property
- def shape(self):
- return self._data.shape
-
- @property
- def DataFrame(self):
- return pd.DataFrame.from_records(data=self._data,columns=['qn','D'])
-
- def __str__(self):
- print('')
- print('TensorIndex, label={0}, flow={1}'.format(self.label,self.flow))
- print(self.DataFrame)
- return ''
From 46f1e10144675c4c123bd6b9fc48deeb0c07bc8a Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 25 Oct 2019 08:51:48 -0400
Subject: [PATCH 03/60] working on AbelianIndex
---
tensornetwork/block_tensor/block_tensor.py | 79 ++++++++--------------
1 file changed, 30 insertions(+), 49 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index b070c1ec4..f99bef8f8 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -8,6 +8,7 @@
#import utils as cutils
import functools as fct
import copy
+from typing import Iterable, Optional, Text
class AbelianIndex:
@@ -15,68 +16,50 @@ class AbelianIndex:
An index object for creation of abelian, block-sparse tensors
`AbelianIndex` is a storage class for storing abelian quantum numbers
of a tensor index. `AbelianIndex` is a wrapper for a python `dict`
- mapping quantum numbers to integers (the dimension of the block)
+ mapping quantum numbers to integers (the dimension of the block).
+ `AbelianIndex` can have a `flow` denoting the "flow of charge".
"""
@classmethod
- def fromlist(cls, quantumnumbers, dimensions, flow, label=None):
+ def fromlist(cls,
+ quantumnumbers: Iterable,
+ dimensions: Iterable[int],
+ flow: int,
+ label: Optional[Text] = None):
if all(map(np.isscalar, quantumnumbers)):
QNs = list(quantumnumbers)
elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))):
- QNs = list(map(np.asarray, quantumnumbers))
+ QNs = list(map(np.asarray,
+ quantumnumbers)) #turn quantum numbers into np.ndarray
else:
- raise TypeError(
- "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
- )
+ raise TypeError("quantum numbers have inconsistent types")
return cls(QNs, dimensions, flow, label)
- @classmethod
- def fromdict(cls, dictionary, flow, label=None):
- if all(map(np.isscalar, dictionary.keys())):
- QNs = list(dictionary.keys())
- elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))):
- QNs = list(map(np.asarray, dictionary.keys()))
- else:
- raise TypeError(
- "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types"
- )
-
- return cls(QNs, list(dictionary.values()), flow, label)
-
- def __init__(self, quantumnumbers, dimensions, flow, label=None):
- if __debug__:
- if len(quantumnumbers) != len(dimensions):
- raise ValueError(
- "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)")
-
+ def __init__(self,
+ quantumnumbers: Iterable,
+ dimensions: Iterable[int],
+ flow: int,
+ label: Optional[Text] = None):
try:
unique = dict(zip(quantumnumbers, dimensions))
except TypeError:
unique = dict(zip(map(tuple, quantumnumbers), dimensions))
-
- if __debug__:
- if len(unique) != len(quantumnumbers):
- warnings.warn(
- "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed"
+ if len(unique) != len(quantumnumbers):
+ warnings.warn("removing duplicate quantum numbers")
+ try:
+ lengths = np.asarray([len(k) for k in unique.keys()])
+ if not all(lengths == lenghts[0])
+ raise ValueError(
+ "quantum number have differing lengths")
+ except TypeError:
+ if not all(list(map(np.isscalar, unique.keys()))):
+ raise TypeError(
+ "quantum numbers have mixed types")
)
-
- if __debug__:
- try:
- mask = np.asarray(list(map(len, unique.keys()))) == len(
- list(unique.keys())[0])
- if not all(mask):
- raise ValueError(
- "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length"
- .format(list(map(len, unique.keys()))))
- except TypeError:
- if not all(list(map(np.isscalar, unique.keys()))):
- raise TypeError(
- "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables"
- )
- self._data = np.array(
+ self.data = np.array(
list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object)
- self._flow = flow
+ self.flow = flow
self.label = label
def __getitem__(self, n):
@@ -96,12 +79,10 @@ def setflow(self, val):
raise ValueError(
"TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
)
- self._flow = np.sign(val)
- return self
+ self.flow = 1 if val > 0 else -1
def rename(self, label):
self.label = label
- return self
@property
def flow(self):
From 91f32a684ec62d2f0a5577422caffaaf4ee1631d Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 09:19:03 -0500
Subject: [PATCH 04/60] working in block sparisty
---
tensornetwork/block_tensor/.#block_tensor.py | 1 -
tensornetwork/block_tensor/block_tensor.py | 374 ++++++++++++++-----
2 files changed, 276 insertions(+), 99 deletions(-)
delete mode 120000 tensornetwork/block_tensor/.#block_tensor.py
diff --git a/tensornetwork/block_tensor/.#block_tensor.py b/tensornetwork/block_tensor/.#block_tensor.py
deleted file mode 120000
index be400a111..000000000
--- a/tensornetwork/block_tensor/.#block_tensor.py
+++ /dev/null
@@ -1 +0,0 @@
-martin@Mister-Pickle.local.14868
\ No newline at end of file
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index f99bef8f8..57cb611b4 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -1,111 +1,289 @@
-import collections
+# Copyright 2019 The TensorNetwork Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import numpy as np
+from tensornetwork.network_components import Node, contract, contract_between
+# pylint: disable=line-too-long
+from tensornetwork.backends import backend_factory
+
import numpy as np
-import operator
-import warnings
-import os
-import sys
-#import qutilities as qutils
-#import utils as cutils
-import functools as fct
-import copy
-from typing import Iterable, Optional, Text
-
-
-class AbelianIndex:
+import itertools
+from typing import List, Union, Any, Tuple, Type, Optional
+Tensor = Any
+
+
+def check_flows(flows) -> None:
+ if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}):
+ raise ValueError(
+ "flows = {} contains values different from 1 and -1".format(flows))
+
+ if set(flows) == {1}:
+ raise ValueError("flows = {} has no outflowing index".format(flows))
+ if set(flows) == {-1}:
+ raise ValueError("flows = {} has no inflowing index".format(flows))
+
+
+def fuse_quantum_numbers(q1: Union[List, np.ndarray],
+ q2: Union[List, np.ndarray]) -> np.ndarray:
"""
- An index object for creation of abelian, block-sparse tensors
- `AbelianIndex` is a storage class for storing abelian quantum numbers
- of a tensor index. `AbelianIndex` is a wrapper for a python `dict`
- mapping quantum numbers to integers (the dimension of the block).
- `AbelianIndex` can have a `flow` denoting the "flow of charge".
+ Fuse quantumm numbers `q1` with `q2` by simple addition (valid
+ for U(1) charges). `q1` and `q2` are typically two consecutive
+ elements of `BlockSparseTensor.quantum_numbers`.
+ Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns
+ `[10, 11, 12, 100, 101, 102]`.
+ When using column-major ordering of indices in `BlockSparseTensor`,
+ the position of q1 should be "to the left" of the position of q2.
+ Args:
+ q1: Iterable of integers
+ q2: Iterable of integers
+ Returns:
+ np.ndarray: The result of fusing `q1` with `q2`.
"""
+ return np.reshape(
+ np.asarray(q2)[:, None] + np.asarray(q1)[None, :],
+ len(q1) * len(q2))
- @classmethod
- def fromlist(cls,
- quantumnumbers: Iterable,
- dimensions: Iterable[int],
- flow: int,
- label: Optional[Text] = None):
- if all(map(np.isscalar, quantumnumbers)):
- QNs = list(quantumnumbers)
- elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))):
- QNs = list(map(np.asarray,
- quantumnumbers)) #turn quantum numbers into np.ndarray
- else:
- raise TypeError("quantum numbers have inconsistent types")
- return cls(QNs, dimensions, flow, label)
-
- def __init__(self,
- quantumnumbers: Iterable,
- dimensions: Iterable[int],
- flow: int,
- label: Optional[Text] = None):
- try:
- unique = dict(zip(quantumnumbers, dimensions))
- except TypeError:
- unique = dict(zip(map(tuple, quantumnumbers), dimensions))
- if len(unique) != len(quantumnumbers):
- warnings.warn("removing duplicate quantum numbers")
- try:
- lengths = np.asarray([len(k) for k in unique.keys()])
- if not all(lengths == lenghts[0])
- raise ValueError(
- "quantum number have differing lengths")
- except TypeError:
- if not all(list(map(np.isscalar, unique.keys()))):
- raise TypeError(
- "quantum numbers have mixed types")
- )
- self.data = np.array(
- list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object)
-
- self.flow = flow
- self.label = label
-
- def __getitem__(self, n):
- return self._data[n[0], n[1]]
-
- def Q(self, n):
- return self._data[n, 0]
-
- def D(self, n):
- return self._data[n, 1]
-
- def __len__(self):
- return self._data.shape[0]
-
- def setflow(self, val):
- if val == 0:
+
+def reshape(symmetric_tensor: BlockSparseTensor, shape: Tuple[int]):
+ n = 0
+ for s in shape:
+ dim = 1
+ while dim != s:
+ dim *= symmetric_tensor.shape[n]
+ n += 1
+ if dim > s:
raise ValueError(
- "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
- )
- self.flow = 1 if val > 0 else -1
+ 'desired shape = {} is incompatible with the symmetric tensor shape = {}'
+ .format(shape, symmetric_tensor.shape))
- def rename(self, label):
- self.label = label
- @property
- def flow(self):
- return self._flow
+def compute_num_nonzero(quantum_numbers: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> int:
+ """
+ Compute the number of non-zero elements, given the meta-data of
+ a symmetric tensor.
+ Args:
+ quantum_numbers: List of np.ndarray, one for each leg.
+ Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ The bond dimension `D[leg]` can vary on each leg, the number of
+ symmetries `Q` has to be the same for each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ Returns:
+ dict: Dictionary mapping a tuple of charges to a shape tuple.
+ Each element corresponds to a non-zero valued block of the tensor.
+ """
+
+ if len(quantum_numbers) == 1:
+ return len(quantum_numbers)
+ net_charges = flows[0] * quantum_numbers[0]
+ for i in range(1, len(flows)):
+ net_charges = np.reshape(
+ flows[i] * quantum_numbers[i][:, None] + net_charges[None, :],
+ len(quantum_numbers[i]) * len(net_charges))
+
+ return len(np.nonzero(net_charges == 0)[0])
+
+
+def compute_nonzero_block_shapes(quantum_numbers: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> dict:
+ """
+ Compute the blocks and their respective shapes of a symmetric tensor,
+ given its meta-data.
+ Args:
+ quantum_numbers: List of np.ndarray, one for each leg.
+ Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ The bond dimension `D[leg]` can vary on each leg, the number of
+ symmetries `Q` has to be the same for each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ Returns:
+ dict: Dictionary mapping a tuple of charges to a shape tuple.
+ Each element corresponds to a non-zero valued block of the tensor.
+ """
+ check_flows(flows)
+ degeneracies = []
+ charges = []
+ rank = len(quantum_numbers)
+ #find the unique quantum numbers and their degeneracy on each leg
+ for leg in range(rank):
+ c, d = np.unique(quantum_numbers[leg], return_counts=True)
+ charges.append(c)
+ degeneracies.append(dict(zip(c, d)))
+
+ #find all possible combination of leg charges c0, c1, ...
+ #(with one charge per leg 0, 1, ...)
+ #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0
+ charge_combinations = list(
+ itertools.product(
+ *[charges[leg] * flows[leg] for leg in range(len(charges))]))
+ net_charges = np.array([np.sum(c) for c in charge_combinations])
+ zero_idxs = np.nonzero(net_charges == 0)[0]
+ charge_shape_dict = {}
+ for idx in zero_idxs:
+ charges = charge_combinations[idx]
+ shapes = [
+ degeneracies[leg][flows[leg] * charges[leg]] for leg in range(rank)
+ ]
+ charge_shape_dict[charges] = shapes
+ return charge_shape_dict
+
+
+def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
+ quantum_numbers: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> dict:
+ """
+ Given the meta data and underlying data of a symmetric matrix, compute
+ all diagonal blocks and return them in a dict.
+ Args:
+ data: An np.ndarray of the data. The number of elements in `data`
+ has to match the number of non-zero elements defined by `quantum_numbers`
+ and `flows`
+ quantum_numbers: List of np.ndarray, one for each leg.
+ Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ The bond dimension `D[leg]` can vary on each leg, the number of
+ symmetries `Q` has to be the same for each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ """
+ if len(quantum_numbers) != 2:
+ raise ValueError("input has to be a two-dimensional symmetric matrix")
+ check_flows(flows)
+ if len(flows) != len(quantum_numbers):
+ raise ValueError("`len(flows)` is different from `len(quantum_numbers)`")
+
+ row_charges = quantum_numbers[0] # a list of charges on each row
+ column_charges = quantum_numbers[1] # a list of charges on each column
+ # for each matrix column find the number of non-zero elements in it
+ # Note: the matrix is assumed to be symmetric, i.e. only elements where
+ # ingoing and outgoing charge are identical are non-zero
+ num_non_zero = [len(np.nonzero(row_charges == c)[0]) for c in column_charges]
+
+ #get the unique charges
+ #Note: row and column unique charges are the same due to symmetry
+ unique_charges, row_dims = np.unique(row_charges, return_counts=True)
+ _, column_dims = np.unique(column_charges, return_counts=True)
+
+ # get the degenaricies of each row and column charge
+ row_degeneracies = dict(zip(unique_charges, row_dims))
+ column_degeneracies = dict(zip(unique_charges, column_dims))
+ blocks = {}
+ for c in unique_charges:
+ start = 0
+ idxs = []
+ for column in range(len(column_charges)):
+ charge = column_charges[column]
+ if charge != c:
+ start += num_non_zero[column]
+ else:
+ idxs.extend(start + np.arange(num_non_zero[column]))
- @flow.setter
- def flow(self, val):
- if val == 0:
+ blocks[c] = np.reshape(data[idxs],
+ (row_degeneracies[c], column_degeneracies[c]))
+ return blocks
+
+
+class BlockSparseTensor:
+ """
+ Minimal class implementation of block sparsity.
+ The class currently onluy supports a single U(1) symmetry.
+ Currently only nump.ndarray is supported.
+ Attributes:
+ * self.data: A 1d np.ndarray storing the underlying
+ data of the tensor
+ * self.quantum_numbers: A list of `np.ndarray` of shape
+ (D, Q), where D is the bond dimension, and Q the number
+ of different symmetries (this is 1 for now).
+ * self.flows: A list of integers of length `k`.
+ `self.flows` determines the flows direction of charges
+ on each leg of the tensor. A value of `-1` denotes
+ outflowing charge, a value of `1` denotes inflowing
+ charge.
+
+ The tensor data is stored in self.data, a 1d np.ndarray.
+ """
+
+ def __init__(self, data: np.ndarray, quantum_numbers: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> None:
+ """
+ Args:
+ data: An np.ndarray of the data. The number of elements in `data`
+ has to match the number of non-zero elements defined by `quantum_numbers`
+ and `flows`
+ quantum_numbers: List of np.ndarray, one for each leg.
+ Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ The bond dimension `D[leg]` can vary on each leg, the number of
+ symmetries `Q` has to be the same for each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ """
+ block_dict = compute_nonzero_block_shapes(quantum_numbers, flows)
+ num_non_zero_elements = np.sum([np.prod(s) for s in block_dict.values()])
+
+ if num_non_zero_elements != len(data.flat):
+ raise ValueError("number of tensor elements defined "
+ "by `quantum_numbers` is different from"
+ " len(data)={}".format(len(data.flat)))
+ check_flows(flows)
+ if len(flows) != len(quantum_numbers):
raise ValueError(
- "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only"
- )
- self._flow = np.sign(val)
+ "len(flows) = {} is different from len(quantum_numbers) = {}".format(
+ len(flows), len(quantum_numbers)))
+ self.data = np.asarray(data.flat) #do not copy data
+ self.flows = flows
+ self.quantum_numbers = quantum_numbers
+
+ @classmethod
+ def randn(cls,
+ quantum_numbers: List[np.ndarray],
+ flows: List[Union[bool, int]],
+ dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor":
+ """
+ Initialize a random symmetric tensor from random normal distribution.
+ Args:
+ quantum_numbers: List of np.ndarray, one for each leg.
+ Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ The bond dimension `D[leg]` can vary on each leg, the number of
+ symmetries `Q` has to be the same for each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ dtype: An optional numpy dtype. The dtype of the tensor
+ Returns:
+ BlockSparseTensor
+ """
+ num_non_zero_elements = compute_num_nonzero(quantum_numbers, flows)
+ backend = backend_factory.get_backend('numpy')
+ data = backend.randn((num_non_zero_elements,), dtype=dtype)
+ return cls(data=data, quantum_numbers=quantum_numbers, flows=flows)
@property
- def shape(self):
- return self._data.shape
+ def shape(self) -> Tuple:
+ return tuple([np.shape(q)[0] for q in self.quantum_numbers])
@property
- def DataFrame(self):
- return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D'])
-
- def __str__(self):
- print('')
- print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow))
- print(self.DataFrame)
- return ''
+ def dtype(self) -> Type[np.number]:
+ return self.data.dtype
From 58feabc58ac38fff39e0540d6ef7469e636452c6 Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 22:08:20 -0500
Subject: [PATCH 05/60] added reshape
and lots of other stuff
---
tensornetwork/block_tensor/block_tensor.py | 215 +++++++++++----------
1 file changed, 108 insertions(+), 107 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 57cb611b4..9d78479a8 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -19,7 +19,7 @@
from tensornetwork.network_components import Node, contract, contract_between
# pylint: disable=line-too-long
from tensornetwork.backends import backend_factory
-
+from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index
import numpy as np
import itertools
from typing import List, Union, Any, Tuple, Type, Optional
@@ -31,54 +31,15 @@ def check_flows(flows) -> None:
raise ValueError(
"flows = {} contains values different from 1 and -1".format(flows))
- if set(flows) == {1}:
- raise ValueError("flows = {} has no outflowing index".format(flows))
- if set(flows) == {-1}:
- raise ValueError("flows = {} has no inflowing index".format(flows))
-
-def fuse_quantum_numbers(q1: Union[List, np.ndarray],
- q2: Union[List, np.ndarray]) -> np.ndarray:
- """
- Fuse quantumm numbers `q1` with `q2` by simple addition (valid
- for U(1) charges). `q1` and `q2` are typically two consecutive
- elements of `BlockSparseTensor.quantum_numbers`.
- Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns
- `[10, 11, 12, 100, 101, 102]`.
- When using column-major ordering of indices in `BlockSparseTensor`,
- the position of q1 should be "to the left" of the position of q2.
- Args:
- q1: Iterable of integers
- q2: Iterable of integers
- Returns:
- np.ndarray: The result of fusing `q1` with `q2`.
- """
- return np.reshape(
- np.asarray(q2)[:, None] + np.asarray(q1)[None, :],
- len(q1) * len(q2))
-
-
-def reshape(symmetric_tensor: BlockSparseTensor, shape: Tuple[int]):
- n = 0
- for s in shape:
- dim = 1
- while dim != s:
- dim *= symmetric_tensor.shape[n]
- n += 1
- if dim > s:
- raise ValueError(
- 'desired shape = {} is incompatible with the symmetric tensor shape = {}'
- .format(shape, symmetric_tensor.shape))
-
-
-def compute_num_nonzero(quantum_numbers: List[np.ndarray],
+def compute_num_nonzero(charges: List[np.ndarray],
flows: List[Union[bool, int]]) -> int:
"""
Compute the number of non-zero elements, given the meta-data of
a symmetric tensor.
Args:
- quantum_numbers: List of np.ndarray, one for each leg.
- Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ charges: List of np.ndarray, one for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`.
The bond dimension `D[leg]` can vary on each leg, the number of
symmetries `Q` has to be the same for each leg.
flows: A list of integers, one for each leg,
@@ -90,25 +51,25 @@ def compute_num_nonzero(quantum_numbers: List[np.ndarray],
Each element corresponds to a non-zero valued block of the tensor.
"""
- if len(quantum_numbers) == 1:
- return len(quantum_numbers)
- net_charges = flows[0] * quantum_numbers[0]
+ if len(charges) == 1:
+ return len(charges)
+ net_charges = flows[0] * charges[0]
for i in range(1, len(flows)):
net_charges = np.reshape(
- flows[i] * quantum_numbers[i][:, None] + net_charges[None, :],
- len(quantum_numbers[i]) * len(net_charges))
+ flows[i] * charges[i][:, None] + net_charges[None, :],
+ len(charges[i]) * len(net_charges))
return len(np.nonzero(net_charges == 0)[0])
-def compute_nonzero_block_shapes(quantum_numbers: List[np.ndarray],
+def compute_nonzero_block_shapes(charges: List[np.ndarray],
flows: List[Union[bool, int]]) -> dict:
"""
Compute the blocks and their respective shapes of a symmetric tensor,
given its meta-data.
Args:
- quantum_numbers: List of np.ndarray, one for each leg.
- Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ charges: List of np.ndarray, one for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`.
The bond dimension `D[leg]` can vary on each leg, the number of
symmetries `Q` has to be the same for each leg.
flows: A list of integers, one for each leg,
@@ -121,44 +82,44 @@ def compute_nonzero_block_shapes(quantum_numbers: List[np.ndarray],
"""
check_flows(flows)
degeneracies = []
- charges = []
- rank = len(quantum_numbers)
+ unique_charges = []
+ rank = len(charges)
#find the unique quantum numbers and their degeneracy on each leg
for leg in range(rank):
- c, d = np.unique(quantum_numbers[leg], return_counts=True)
- charges.append(c)
+ c, d = np.unique(charges[leg], return_counts=True)
+ unique_charges.append(c)
degeneracies.append(dict(zip(c, d)))
#find all possible combination of leg charges c0, c1, ...
#(with one charge per leg 0, 1, ...)
#such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0
charge_combinations = list(
- itertools.product(
- *[charges[leg] * flows[leg] for leg in range(len(charges))]))
+ itertools.product(*[
+ unique_charges[leg] * flows[leg]
+ for leg in range(len(unique_charges))
+ ]))
net_charges = np.array([np.sum(c) for c in charge_combinations])
zero_idxs = np.nonzero(net_charges == 0)[0]
charge_shape_dict = {}
for idx in zero_idxs:
- charges = charge_combinations[idx]
- shapes = [
- degeneracies[leg][flows[leg] * charges[leg]] for leg in range(rank)
- ]
- charge_shape_dict[charges] = shapes
+ c = charge_combinations[idx]
+ shapes = [degeneracies[leg][flows[leg] * c[leg]] for leg in range(rank)]
+ charge_shape_dict[c] = shapes
return charge_shape_dict
def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
- quantum_numbers: List[np.ndarray],
+ charges: List[np.ndarray],
flows: List[Union[bool, int]]) -> dict:
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
Args:
data: An np.ndarray of the data. The number of elements in `data`
- has to match the number of non-zero elements defined by `quantum_numbers`
+ has to match the number of non-zero elements defined by `charges`
and `flows`
- quantum_numbers: List of np.ndarray, one for each leg.
- Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
+ charges: List of np.ndarray, one for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`.
The bond dimension `D[leg]` can vary on each leg, the number of
symmetries `Q` has to be the same for each leg.
flows: A list of integers, one for each leg,
@@ -166,14 +127,14 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
"""
- if len(quantum_numbers) != 2:
+ if len(charges) != 2:
raise ValueError("input has to be a two-dimensional symmetric matrix")
check_flows(flows)
- if len(flows) != len(quantum_numbers):
- raise ValueError("`len(flows)` is different from `len(quantum_numbers)`")
+ if len(flows) != len(charges):
+ raise ValueError("`len(flows)` is different from `len(charges)`")
- row_charges = quantum_numbers[0] # a list of charges on each row
- column_charges = quantum_numbers[1] # a list of charges on each column
+ row_charges = charges[0] # a list of charges on each row
+ column_charges = charges[1] # a list of charges on each column
# for each matrix column find the number of non-zero elements in it
# Note: the matrix is assumed to be symmetric, i.e. only elements where
# ingoing and outgoing charge are identical are non-zero
@@ -211,7 +172,7 @@ class BlockSparseTensor:
Attributes:
* self.data: A 1d np.ndarray storing the underlying
data of the tensor
- * self.quantum_numbers: A list of `np.ndarray` of shape
+ * self.charges: A list of `np.ndarray` of shape
(D, Q), where D is the bond dimension, and Q the number
of different symmetries (this is 1 for now).
* self.flows: A list of integers of length `k`.
@@ -223,67 +184,107 @@ class BlockSparseTensor:
The tensor data is stored in self.data, a 1d np.ndarray.
"""
- def __init__(self, data: np.ndarray, quantum_numbers: List[np.ndarray],
- flows: List[Union[bool, int]]) -> None:
+ def __init__(self, data: np.ndarray, indices: List[Index]) -> None:
"""
Args:
data: An np.ndarray of the data. The number of elements in `data`
- has to match the number of non-zero elements defined by `quantum_numbers`
+ has to match the number of non-zero elements defined by `charges`
and `flows`
- quantum_numbers: List of np.ndarray, one for each leg.
- Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
- The bond dimension `D[leg]` can vary on each leg, the number of
- symmetries `Q` has to be the same for each leg.
- flows: A list of integers, one for each leg,
- with values `1` or `-1`, denoting the flow direction
- of the charges on each leg. `1` is inflowing, `-1` is outflowing
- charge.
+ indices: List of `Index` objecst, one for each leg.
"""
- block_dict = compute_nonzero_block_shapes(quantum_numbers, flows)
- num_non_zero_elements = np.sum([np.prod(s) for s in block_dict.values()])
+ self.indices = indices
+ check_flows(self.flows)
+ num_non_zero_elements = compute_num_nonzero(self.charges, self.flows)
if num_non_zero_elements != len(data.flat):
raise ValueError("number of tensor elements defined "
- "by `quantum_numbers` is different from"
+ "by `charges` is different from"
" len(data)={}".format(len(data.flat)))
- check_flows(flows)
- if len(flows) != len(quantum_numbers):
- raise ValueError(
- "len(flows) = {} is different from len(quantum_numbers) = {}".format(
- len(flows), len(quantum_numbers)))
+
self.data = np.asarray(data.flat) #do not copy data
- self.flows = flows
- self.quantum_numbers = quantum_numbers
@classmethod
- def randn(cls,
- quantum_numbers: List[np.ndarray],
- flows: List[Union[bool, int]],
+ def randn(cls, indices: List[Index],
dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor":
"""
Initialize a random symmetric tensor from random normal distribution.
Args:
- quantum_numbers: List of np.ndarray, one for each leg.
- Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`.
- The bond dimension `D[leg]` can vary on each leg, the number of
- symmetries `Q` has to be the same for each leg.
- flows: A list of integers, one for each leg,
- with values `1` or `-1`, denoting the flow direction
- of the charges on each leg. `1` is inflowing, `-1` is outflowing
- charge.
+ indices: List of `Index` objecst, one for each leg.
dtype: An optional numpy dtype. The dtype of the tensor
Returns:
BlockSparseTensor
"""
- num_non_zero_elements = compute_num_nonzero(quantum_numbers, flows)
+ charges = [i.charges for i in indices]
+ flows = [i.flow for i in indices]
+ num_non_zero_elements = compute_num_nonzero(charges, flows)
backend = backend_factory.get_backend('numpy')
data = backend.randn((num_non_zero_elements,), dtype=dtype)
- return cls(data=data, quantum_numbers=quantum_numbers, flows=flows)
+ return cls(data=data, indices=indices)
@property
def shape(self) -> Tuple:
- return tuple([np.shape(q)[0] for q in self.quantum_numbers])
+ return tuple([i.dimension for i in self.indices])
@property
def dtype(self) -> Type[np.number]:
return self.data.dtype
+
+ @property
+ def flows(self):
+ return [i.flow for i in self.indices]
+
+ @property
+ def charges(self):
+ return [i.charges for i in self.indices]
+
+
+def reshape(tensor: BlockSparseTensor, shape: Tuple[int]):
+ # a few simple checks
+ if np.prod(shape) != np.prod(tensor.shape):
+ raise ValueError("A tensor with {} elements cannot be "
+ "reshaped into a tensor with {} elements".format(
+ np.prod(tensor.shape), np.prod(shape)))
+ #copy indices
+ result = BlockSparseTensor(
+ data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices])
+
+ for n in range(len(shape)):
+ if shape[n] > result.shape[n]:
+ while shape[n] > result.shape[n]:
+ #fuse indices
+ i1, i2 = result.indices.pop(n), result.indices.pop(n)
+ #note: the resulting flow is set to one since the flow
+ #is multiplied into the charges. As a result the tensor
+ #will then be invariant in any case.
+ result.indices.insert(n, fuse_index_pair(i1, i2))
+ if result.shape[n] > shape[n]:
+ elementary_indices = []
+ for i in tensor.indices:
+ elementary_indices.extend(i.get_elementary_indices())
+ raise ValueError("The shape {} is incompatible with the "
+ "elementary shape {} of the tensor.".format(
+ shape,
+ tuple(
+ [e.dimension for e in elementary_indices])))
+
+ elif shape[n] < result.shape[n]:
+ while shape[n] < result.shape[n]:
+ #split index at n
+ try:
+ i1, i2 = split_index(result.indices.pop(n))
+ except ValueError:
+ elementary_indices = []
+ for i in tensor.indices:
+ elementary_indices.extend(i.get_elementary_indices())
+ raise ValueError("The shape {} is incompatible with the "
+ "elementary shape {} of the tensor.".format(
+ shape,
+ tuple(
+ [e.dimension for e in elementary_indices])))
+ result.indices.insert(n, i1)
+ result.indices.insert(n + 1, i2)
+ if result.shape[n] < shape[n]:
+ raise ValueError(
+ "shape {} is incompatible with the elementary result shape".format(
+ shape))
+ return result
From 307f2dc4ed005eaf318661a166553b8ec1cb5d3f Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 22:08:41 -0500
Subject: [PATCH 06/60] added Index, an index type for symmetric tensors
---
tensornetwork/block_tensor/index.py | 177 ++++++++++++++++++++++++++++
1 file changed, 177 insertions(+)
create mode 100644 tensornetwork/block_tensor/index.py
diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py
new file mode 100644
index 000000000..d17203dfb
--- /dev/null
+++ b/tensornetwork/block_tensor/index.py
@@ -0,0 +1,177 @@
+# Copyright 2019 The TensorNetwork Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import numpy as np
+from tensornetwork.network_components import Node, contract, contract_between
+# pylint: disable=line-too-long
+from tensornetwork.backends import backend_factory
+
+import numpy as np
+import copy
+from typing import List, Union, Any, Optional, Tuple, Text
+
+
+class Index:
+ """
+ An index class to store indices of a symmetric tensor.
+ An index keeps track of all its childs by storing references
+ to them (i.e. it is a binary tree).
+ """
+
+ def __init__(self,
+ charges: Union[List, np.ndarray],
+ flow: int,
+ name: Optional[Text] = None,
+ left_child: Optional["Index"] = None,
+ right_child: Optional["Index"] = None):
+ self.charges = np.asarray(charges)
+ self.flow = flow
+ self.left_child = left_child
+ self.right_child = right_child
+ self.name = name if name else 'index'
+
+ @property
+ def dimension(self):
+ return len(self.charges)
+
+ def _copy_helper(self, index: "Index", copied_index: "Index") -> None:
+ """
+ Helper function for copy
+ """
+ if index.left_child != None:
+ left_copy = Index(
+ charges=index.left_child.charges.copy(),
+ flow=copy.copy(index.left_child.flow),
+ name=index.left_child.name)
+ copied_index.left_child = left_copy
+ self._copy_helper(index.left_child, left_copy)
+ if index.right_child != None:
+ right_copy = Index(
+ charges=index.right_child.charges.copy(),
+ flow=copy.copy(index.right_child.flow),
+ name=index.right_child.name)
+ copied_index.right_child = right_copy
+ self._copy_helper(index.right_child, right_copy)
+
+ def copy(self):
+ """
+ Returns:
+ Index: A deep copy of `Index`. Note that all children of
+ `Index` are copied as well.
+ """
+ index_copy = Index(
+ charges=self.charges.copy(), flow=copy.copy(self.flow), name=self.name)
+
+ self._copy_helper(self, index_copy)
+ return index_copy
+
+ def _leave_helper(self, index: "Index", leave_list: List) -> None:
+ if index.left_child:
+ self._leave_helper(index.left_child, leave_list)
+ if index.right_child:
+ self._leave_helper(index.right_child, leave_list)
+ if (index.left_child is None) and (index.right_child is None):
+ leave_list.append(index)
+
+ def get_elementary_indices(self) -> List:
+ """
+ Returns:
+ List: A list containing the elementary indices (the leaves)
+ of `Index`.
+ """
+ leave_list = []
+ self._leave_helper(self, leave_list)
+ return leave_list
+
+
+def fuse_charges(q1: Union[List, np.ndarray], flow1: int,
+ q2: Union[List, np.ndarray], flow2: int) -> np.ndarray:
+ """
+ Fuse charges `q1` with charges `q2` by simple addition (valid
+ for U(1) charges). `q1` and `q2` typically belong to two consecutive
+ legs of `BlockSparseTensor`.
+ Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns
+ `[10, 11, 12, 100, 101, 102]`.
+ When using column-major ordering of indices in `BlockSparseTensor`,
+ the position of q1 should be "to the left" of the position of q2.
+ Args:
+ q1: Iterable of integers
+ flow1: Flow direction of charge `q1`.
+ q2: Iterable of integers
+ flow2: Flow direction of charge `q2`.
+ Returns:
+ np.ndarray: The result of fusing `q1` with `q2`.
+ """
+ return np.reshape(
+ flow2 * np.asarray(q2)[:, None] + flow1 * np.asarray(q1)[None, :],
+ len(q1) * len(q2))
+
+
+def fuse_index_pair(left_index: Index,
+ right_index: Index,
+ flow: Optional[int] = 1) -> Index:
+ """
+ Fuse two consecutive indices (legs) of a symmetric tensor.
+ Args:
+ left_index: A tensor Index.
+ right_index: A tensor Index.
+ flow: An optional flow of the resulting `Index` object.
+ Returns:
+ Index: The result of fusing `index1` and `index2`.
+ """
+ #Fuse the charges of the two indices
+ if left_index is right_index:
+ raise ValueError(
+ "index1 and index2 are the same object. Can only fuse distinct objects")
+
+ fused_charges = fuse_charges(left_index.charges, left_index.flow,
+ right_index.charges, right_index.flow)
+ return Index(
+ charges=fused_charges,
+ flow=flow,
+ left_child=left_index,
+ right_child=right_index)
+
+
+def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index:
+ """
+ Fuse a list of indices (legs) of a symmetric tensor.
+ Args:
+ indices: A list of tensor Index objects
+ flow: An optional flow of the resulting `Index` object.
+ Returns:
+ Index: The result of fusing `indices`.
+ """
+
+ index = indices[0]
+ for n in range(1, len(indices)):
+ index = fuse_index_pair(index, indices[n], flow=flow)
+ return index
+
+
+def split_index(index: Index) -> Tuple[Index, Index]:
+ """
+ Split an index (leg) of a symmetric tensor into two legs.
+ Args:
+ index: A tensor Index.
+ Returns:
+ Tuple[Index, Index]: The result of splitting `index`.
+ """
+ if (not index.left_child) or (not index.right_child):
+ raise ValueError("cannot split an elementary index")
+
+ return index.left_child, index.right_child
From 1ebbc7faa6868723e49c2cac88fa9239a4a7b5a2 Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 22:28:53 -0500
Subject: [PATCH 07/60] added small tutorial
---
tensornetwork/block_tensor/tutorial.py | 44 ++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
create mode 100644 tensornetwork/block_tensor/tutorial.py
diff --git a/tensornetwork/block_tensor/tutorial.py b/tensornetwork/block_tensor/tutorial.py
new file mode 100644
index 000000000..0cb0c5ede
--- /dev/null
+++ b/tensornetwork/block_tensor/tutorial.py
@@ -0,0 +1,44 @@
+# Copyright 2019 The TensorNetwork Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import tensornetwork as tn
+import numpy as np
+import tensornetwork.block_tensor.block_tensor as BT
+import tensornetwork.block_tensor.index as IDX
+
+B = 4 # possible charges on each leg can be between [0,B)
+##########################################################
+##### Generate a rank 4 symmetrix tensor #######
+##########################################################
+
+# generate random charges on each leg of the tensor
+D1, D2, D3, D4 = 4, 6, 8, 10 #bond dimensions on each leg
+q1 = np.random.randint(0, B, D1)
+q2 = np.random.randint(0, B, D2)
+q3 = np.random.randint(0, B, D3)
+q4 = np.random.randint(0, B, D4)
+
+# generate Index objects for each leg. neccessary for initialization of
+# BlockSparseTensor
+i1 = IDX.Index(charges=q1, flow=1)
+i2 = IDX.Index(charges=q2, flow=-1)
+i3 = IDX.Index(charges=q3, flow=1)
+i4 = IDX.Index(charges=q4, flow=-1)
+
+# initialize a random symmetric tensor
+A = BT.BlockSparseTensor.randn(indices=[i1, i2, i3, i4], dtype=np.complex128)
+B = BT.reshape(A, (4, 48, 10))
From 1eb3d6f63fa65267482f0f6c07c697a22c50f8c6 Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 22:29:56 -0500
Subject: [PATCH 08/60] added docstring
---
tensornetwork/block_tensor/block_tensor.py | 34 +++++++++++++++++++++-
1 file changed, 33 insertions(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 9d78479a8..3ac3691c3 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -239,6 +239,39 @@ def charges(self):
def reshape(tensor: BlockSparseTensor, shape: Tuple[int]):
+ """
+ Reshape `tensor` into `shape`.
+ `reshape` works essentially the same as the dense version, with the
+ notable exception that the tensor can only be reshaped into a form
+ compatible with its elementary indices. The elementary indices are
+ the indices at the leaves of the `Index` objects `tensors.indices`.
+ For example, while the following reshaping is possible for regular
+ dense numpy tensor,
+ ```
+ A = np.random.rand(6,6,6)
+ np.reshape(A, (2,3,6,6))
+ ```
+ the same code for BlockSparseTensor
+ ```
+ q1 = np.random.randint(0,10,6)
+ q2 = np.random.randint(0,10,6)
+ q3 = np.random.randint(0,10,6)
+ i1 = Index(charges=q1,flow=1)
+ i2 = Index(charges=q2,flow=-1)
+ i3 = Index(charges=q3,flow=1)
+ A=BlockSparseTensor.randn(indices=[i1,i2,i3])
+ print(A.shape) #prints (6,6,6)
+ reshape(A, (2,3,6,6)) #raises ValueError
+ ```
+ raises a `ValueError` since (2,3,6,6)
+ is incompatible with the elementary shape (6,6,6) of the tensor.
+
+ Args:
+ tensor: A symmetric tensor.
+ shape: The new shape.
+ Returns:
+ BlockSparseTensor: A new tensor reshaped into `shape`
+ """
# a few simple checks
if np.prod(shape) != np.prod(tensor.shape):
raise ValueError("A tensor with {} elements cannot be "
@@ -266,7 +299,6 @@ def reshape(tensor: BlockSparseTensor, shape: Tuple[int]):
shape,
tuple(
[e.dimension for e in elementary_indices])))
-
elif shape[n] < result.shape[n]:
while shape[n] < result.shape[n]:
#split index at n
From d25d8aa72e3502922805633e6b5b1bd3a50585c0 Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 23:23:11 -0500
Subject: [PATCH 09/60] fixed bug in retrieve_diagonal_blocks
---
tensornetwork/block_tensor/block_tensor.py | 172 +++++++++++++--------
1 file changed, 108 insertions(+), 64 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 3ac3691c3..55e78858e 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -47,8 +47,7 @@ def compute_num_nonzero(charges: List[np.ndarray],
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
Returns:
- dict: Dictionary mapping a tuple of charges to a shape tuple.
- Each element corresponds to a non-zero valued block of the tensor.
+ int: The number of non-zero elements.
"""
if len(charges) == 1:
@@ -127,48 +126,51 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
"""
+
if len(charges) != 2:
raise ValueError("input has to be a two-dimensional symmetric matrix")
check_flows(flows)
if len(flows) != len(charges):
raise ValueError("`len(flows)` is different from `len(charges)`")
- row_charges = charges[0] # a list of charges on each row
- column_charges = charges[1] # a list of charges on each column
+ row_charges = flows[0] * charges[0] # a list of charges on each row
+ column_charges = flows[1] * charges[1] # a list of charges on each column
# for each matrix column find the number of non-zero elements in it
# Note: the matrix is assumed to be symmetric, i.e. only elements where
# ingoing and outgoing charge are identical are non-zero
- num_non_zero = [len(np.nonzero(row_charges == c)[0]) for c in column_charges]
-
+ num_non_zero = [
+ len(np.nonzero((row_charges + c) == 0)[0]) for c in column_charges
+ ]
#get the unique charges
- #Note: row and column unique charges are the same due to symmetry
- unique_charges, row_dims = np.unique(row_charges, return_counts=True)
- _, column_dims = np.unique(column_charges, return_counts=True)
+ unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
+ unique_column_charges, column_dims = np.unique(
+ column_charges, return_counts=True)
- # get the degenaricies of each row and column charge
- row_degeneracies = dict(zip(unique_charges, row_dims))
- column_degeneracies = dict(zip(unique_charges, column_dims))
+ # get the degeneracies of each row and column charge
+ row_degeneracies = dict(zip(unique_row_charges, row_dims))
+ column_degeneracies = dict(zip(unique_column_charges, column_dims))
blocks = {}
- for c in unique_charges:
+ for c in unique_row_charges:
start = 0
idxs = []
for column in range(len(column_charges)):
charge = column_charges[column]
- if charge != c:
+ if (charge + c) != 0:
start += num_non_zero[column]
else:
idxs.extend(start + np.arange(num_non_zero[column]))
-
- blocks[c] = np.reshape(data[idxs],
- (row_degeneracies[c], column_degeneracies[c]))
+ if idxs:
+ blocks[c] = np.reshape(data[idxs],
+ (row_degeneracies[c], column_degeneracies[-c]))
return blocks
class BlockSparseTensor:
"""
Minimal class implementation of block sparsity.
- The class currently onluy supports a single U(1) symmetry.
- Currently only nump.ndarray is supported.
+ The class design follows Glen's proposal (Design 0).
+ The class currently only supports a single U(1) symmetry
+ and only nump.ndarray.
Attributes:
* self.data: A 1d np.ndarray storing the underlying
data of the tensor
@@ -221,6 +223,10 @@ def randn(cls, indices: List[Index],
data = backend.randn((num_non_zero_elements,), dtype=dtype)
return cls(data=data, indices=indices)
+ @property
+ def rank(self):
+ return len(self.indices)
+
@property
def shape(self) -> Tuple:
return tuple([i.dimension for i in self.indices])
@@ -237,6 +243,88 @@ def flows(self):
def charges(self):
return [i.charges for i in self.indices]
+ def reshape(self, shape):
+ """
+ Reshape `tensor` into `shape` in place.
+ `BlockSparseTensor.reshape` works essentially the same as the dense
+ version, with the notable exception that the tensor can only be
+ reshaped into a form compatible with its elementary indices.
+ The elementary indices are the indices at the leaves of the `Index`
+ objects `tensors.indices`.
+ For example, while the following reshaping is possible for regular
+ dense numpy tensor,
+ ```
+ A = np.random.rand(6,6,6)
+ np.reshape(A, (2,3,6,6))
+ ```
+ the same code for BlockSparseTensor
+ ```
+ q1 = np.random.randint(0,10,6)
+ q2 = np.random.randint(0,10,6)
+ q3 = np.random.randint(0,10,6)
+ i1 = Index(charges=q1,flow=1)
+ i2 = Index(charges=q2,flow=-1)
+ i3 = Index(charges=q3,flow=1)
+ A=BlockSparseTensor.randn(indices=[i1,i2,i3])
+ print(A.shape) #prints (6,6,6)
+ A.reshape((2,3,6,6)) #raises ValueError
+ ```
+ raises a `ValueError` since (2,3,6,6)
+ is incompatible with the elementary shape (6,6,6) of the tensor.
+
+ Args:
+ tensor: A symmetric tensor.
+ shape: The new shape.
+ Returns:
+ BlockSparseTensor: A new tensor reshaped into `shape`
+ """
+
+ # a few simple checks
+ if np.prod(shape) != np.prod(self.shape):
+ raise ValueError("A tensor with {} elements cannot be "
+ "reshaped into a tensor with {} elements".format(
+ np.prod(self.shape), np.prod(shape)))
+
+ def raise_error():
+ elementary_indices = []
+ for i in self.indices:
+ elementary_indices.extend(i.get_elementary_indices())
+ raise ValueError("The shape {} is incompatible with the "
+ "elementary shape {} of the tensor.".format(
+ shape,
+ tuple([e.dimension for e in elementary_indices])))
+
+ for n in range(len(shape)):
+ if shape[n] > self.shape[n]:
+ while shape[n] > self.shape[n]:
+ #fuse indices
+ i1, i2 = self.indices.pop(n), self.indices.pop(n)
+ #note: the resulting flow is set to one since the flow
+ #is multiplied into the charges. As a result the tensor
+ #will then be invariant in any case.
+ self.indices.insert(n, fuse_index_pair(i1, i2))
+ if self.shape[n] > shape[n]:
+ raise_error()
+ elif shape[n] < self.shape[n]:
+ while shape[n] < self.shape[n]:
+ #split index at n
+ try:
+ i1, i2 = split_index(self.indices.pop(n))
+ except ValueError:
+ raise_error()
+ self.indices.insert(n, i1)
+ self.indices.insert(n + 1, i2)
+ if self.shape[n] < shape[n]:
+ raise_error()
+
+ def get_diagonal_blocks(self):
+ if self.rank != 2:
+ raise ValueError(
+ "`get_diagonal_blocks` can only be called on a matrix, but found rank={}"
+ .format(self.rank))
+ return retrieve_non_zero_diagonal_blocks(
+ data=self.data, charges=self.charges, flows=self.flows)
+
def reshape(tensor: BlockSparseTensor, shape: Tuple[int]):
"""
@@ -272,51 +360,7 @@ def reshape(tensor: BlockSparseTensor, shape: Tuple[int]):
Returns:
BlockSparseTensor: A new tensor reshaped into `shape`
"""
- # a few simple checks
- if np.prod(shape) != np.prod(tensor.shape):
- raise ValueError("A tensor with {} elements cannot be "
- "reshaped into a tensor with {} elements".format(
- np.prod(tensor.shape), np.prod(shape)))
- #copy indices
result = BlockSparseTensor(
data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices])
-
- for n in range(len(shape)):
- if shape[n] > result.shape[n]:
- while shape[n] > result.shape[n]:
- #fuse indices
- i1, i2 = result.indices.pop(n), result.indices.pop(n)
- #note: the resulting flow is set to one since the flow
- #is multiplied into the charges. As a result the tensor
- #will then be invariant in any case.
- result.indices.insert(n, fuse_index_pair(i1, i2))
- if result.shape[n] > shape[n]:
- elementary_indices = []
- for i in tensor.indices:
- elementary_indices.extend(i.get_elementary_indices())
- raise ValueError("The shape {} is incompatible with the "
- "elementary shape {} of the tensor.".format(
- shape,
- tuple(
- [e.dimension for e in elementary_indices])))
- elif shape[n] < result.shape[n]:
- while shape[n] < result.shape[n]:
- #split index at n
- try:
- i1, i2 = split_index(result.indices.pop(n))
- except ValueError:
- elementary_indices = []
- for i in tensor.indices:
- elementary_indices.extend(i.get_elementary_indices())
- raise ValueError("The shape {} is incompatible with the "
- "elementary shape {} of the tensor.".format(
- shape,
- tuple(
- [e.dimension for e in elementary_indices])))
- result.indices.insert(n, i1)
- result.indices.insert(n + 1, i2)
- if result.shape[n] < shape[n]:
- raise ValueError(
- "shape {} is incompatible with the elementary result shape".format(
- shape))
+ result.reshape(shape)
return result
From ae8cda65f1e050dff2ddc7399f5dfb5574a5c169 Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 23:31:28 -0500
Subject: [PATCH 10/60] TODO added
---
tensornetwork/block_tensor/block_tensor.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 55e78858e..510710901 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -49,11 +49,13 @@ def compute_num_nonzero(charges: List[np.ndarray],
Returns:
int: The number of non-zero elements.
"""
+ #TODO: this is not very efficient for large bond dimensions
if len(charges) == 1:
return len(charges)
net_charges = flows[0] * charges[0]
for i in range(1, len(flows)):
+ print(len(net_charges))
net_charges = np.reshape(
flows[i] * charges[i][:, None] + net_charges[None, :],
len(charges[i]) * len(net_charges))
From bbac9c4e75ebedd41e3841b7674dd82e56f4d134 Mon Sep 17 00:00:00 2001
From: Martin
Date: Fri, 29 Nov 2019 23:52:44 -0500
Subject: [PATCH 11/60] improved initialization a bit
---
tensornetwork/block_tensor/block_tensor.py | 34 ++++++++++++++++++----
1 file changed, 28 insertions(+), 6 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 510710901..e9a9e560f 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -50,15 +50,37 @@ def compute_num_nonzero(charges: List[np.ndarray],
int: The number of non-zero elements.
"""
#TODO: this is not very efficient for large bond dimensions
-
if len(charges) == 1:
return len(charges)
- net_charges = flows[0] * charges[0]
- for i in range(1, len(flows)):
- print(len(net_charges))
+
+ neg_flows = np.nonzero(np.asarray(flows) == -1)[0]
+ pos_flows = np.nonzero(np.asarray(flows) == 1)[0]
+ neg_max = 0
+ neg_min = 0
+ for i in neg_flows:
+ neg_max += np.max(charges[i])
+ neg_min += np.min(charges[i])
+
+ pos_max = 0
+ pos_min = 0
+ for i in pos_flows:
+ pos_max += np.max(charges[i])
+ pos_min += np.min(charges[i])
+
+ net_charges = charges[pos_flows[0]]
+ net_charges = net_charges[net_charges <= neg_max]
+ for i in range(1, len(pos_flows)):
+ net_charges = np.reshape(
+ charges[pos_flows[i]][:, None] + net_charges[None, :],
+ len(charges[pos_flows[i]]) * len(net_charges))
+ net_charges = net_charges[net_charges <= neg_max]
+ net_charges = net_charges[net_charges >= neg_min]
+
+ for i in range(len(neg_flows)):
net_charges = np.reshape(
- flows[i] * charges[i][:, None] + net_charges[None, :],
- len(charges[i]) * len(net_charges))
+ -1 * charges[neg_flows[i]][:, None] + net_charges[None, :],
+ len(charges[neg_flows[i]]) * len(net_charges))
+ net_charges = net_charges[net_charges <= neg_max]
return len(np.nonzero(net_charges == 0)[0])
From db828c7140bc774f68858a50394d8c2adcff0a61 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 14:16:21 -0500
Subject: [PATCH 12/60] more efficient initialization
---
tensornetwork/block_tensor/block_tensor.py | 78 ++++++++++++----------
1 file changed, 42 insertions(+), 36 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index e9a9e560f..0817119d6 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -16,10 +16,11 @@
from __future__ import division
from __future__ import print_function
import numpy as np
-from tensornetwork.network_components import Node, contract, contract_between
# pylint: disable=line-too-long
+from tensornetwork.network_components import Node, contract, contract_between
from tensornetwork.backends import backend_factory
-from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index
+# pylint: disable=line-too-long
+from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges
import numpy as np
import itertools
from typing import List, Union, Any, Tuple, Type, Optional
@@ -38,8 +39,9 @@ def compute_num_nonzero(charges: List[np.ndarray],
Compute the number of non-zero elements, given the meta-data of
a symmetric tensor.
Args:
- charges: List of np.ndarray, one for each leg.
- Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`.
+ charges: List of np.ndarray, one for each leg of the
+ underlying tensor. Each np.ndarray `charges[leg]`
+ is of shape `(D[leg], Q)`.
The bond dimension `D[leg]` can vary on each leg, the number of
symmetries `Q` has to be the same for each leg.
flows: A list of integers, one for each leg,
@@ -51,38 +53,42 @@ def compute_num_nonzero(charges: List[np.ndarray],
"""
#TODO: this is not very efficient for large bond dimensions
if len(charges) == 1:
- return len(charges)
-
- neg_flows = np.nonzero(np.asarray(flows) == -1)[0]
- pos_flows = np.nonzero(np.asarray(flows) == 1)[0]
- neg_max = 0
- neg_min = 0
- for i in neg_flows:
- neg_max += np.max(charges[i])
- neg_min += np.min(charges[i])
-
- pos_max = 0
- pos_min = 0
- for i in pos_flows:
- pos_max += np.max(charges[i])
- pos_min += np.min(charges[i])
-
- net_charges = charges[pos_flows[0]]
- net_charges = net_charges[net_charges <= neg_max]
- for i in range(1, len(pos_flows)):
- net_charges = np.reshape(
- charges[pos_flows[i]][:, None] + net_charges[None, :],
- len(charges[pos_flows[i]]) * len(net_charges))
- net_charges = net_charges[net_charges <= neg_max]
- net_charges = net_charges[net_charges >= neg_min]
-
- for i in range(len(neg_flows)):
- net_charges = np.reshape(
- -1 * charges[neg_flows[i]][:, None] + net_charges[None, :],
- len(charges[neg_flows[i]]) * len(net_charges))
- net_charges = net_charges[net_charges <= neg_max]
-
- return len(np.nonzero(net_charges == 0)[0])
+ return len(np.nonzero(charges == 0)[0])
+ #get unique charges and their degeneracies on each leg
+ charge_degeneracies = [
+ np.unique(charge, return_counts=True) for charge in charges
+ ]
+ accumulated_charges, accumulated_degeneracies = charge_degeneracies[0]
+ #multiply the flow into the charges of first leg
+ accumulated_charges *= flows[0]
+ for n in range(1, len(charge_degeneracies)):
+ #list of unique charges and list of their degeneracies
+ #on the next unfused leg of the tensor
+ leg_charge, leg_degeneracies = charge_degeneracies[n]
+
+ #fuse the unique charges
+ #Note: entries in `fused_charges` are not unique anymore.
+ #flow1 = 1 because the flow of leg 0 has already been
+ #mulitplied above
+ fused_charges = fuse_charges(
+ q1=accumulated_charges, flow1=1, q2=leg_charge, flow2=flows[n])
+ #compute the degeneracies of `fused_charges` charges
+ #fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies)
+ fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies)
+ #compute the new degeneracies resulting of fusing the vectors of unique charges
+ #`accumulated_charges` and `leg_charge_2`
+ accumulated_charges = np.unique(fused_charges)
+ accumulated_degeneracies = []
+ for n in range(len(accumulated_charges)):
+ accumulated_degeneracies.append(
+ np.sum(fused_degeneracies[fused_charges == accumulated_charges[n]]))
+
+ accumulated_degeneracies = np.asarray(accumulated_degeneracies)
+ if len(np.nonzero(accumulated_charges == 0)[0]) == 0:
+ raise ValueError(
+ "given leg-charges `charges` and flows `flows` are incompatible "
+ "with a symmetric tensor")
+ return np.sum(accumulated_degeneracies[accumulated_charges == 0])
def compute_nonzero_block_shapes(charges: List[np.ndarray],
From 99204f741520ccc8ff9d662684afeec96c074580 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 14:35:03 -0500
Subject: [PATCH 13/60] just formatting
---
tensornetwork/backends/numpy/numpy_backend.py | 25 +++++++++----------
1 file changed, 12 insertions(+), 13 deletions(-)
diff --git a/tensornetwork/backends/numpy/numpy_backend.py b/tensornetwork/backends/numpy/numpy_backend.py
index 7d0527b83..0246d32eb 100644
--- a/tensornetwork/backends/numpy/numpy_backend.py
+++ b/tensornetwork/backends/numpy/numpy_backend.py
@@ -43,9 +43,8 @@ def svd_decomposition(self,
max_singular_values: Optional[int] = None,
max_truncation_error: Optional[float] = None
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
- return decompositions.svd_decomposition(self.np, tensor, split_axis,
- max_singular_values,
- max_truncation_error)
+ return decompositions.svd_decomposition(
+ self.np, tensor, split_axis, max_singular_values, max_truncation_error)
def qr_decomposition(
self,
@@ -224,16 +223,16 @@ def eigs(self,
U = U.astype(dtype)
return list(eta), [U[:, n] for n in range(numeig)]
- def eigsh_lanczos(self,
- A: Callable,
- initial_state: Optional[Tensor] = None,
- num_krylov_vecs: Optional[int] = 200,
- numeig: Optional[int] = 1,
- tol: Optional[float] = 1E-8,
- delta: Optional[float] = 1E-8,
- ndiag: Optional[int] = 20,
- reorthogonalize: Optional[bool] = False
- ) -> Tuple[List, List]:
+ def eigsh_lanczos(
+ self,
+ A: Callable,
+ initial_state: Optional[Tensor] = None,
+ num_krylov_vecs: Optional[int] = 200,
+ numeig: Optional[int] = 1,
+ tol: Optional[float] = 1E-8,
+ delta: Optional[float] = 1E-8,
+ ndiag: Optional[int] = 20,
+ reorthogonalize: Optional[bool] = False) -> Tuple[List, List]:
"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of a linear operator `A`. If no `initial_state` is provided
From 73a9628d82e361a2691902516c9cf3ff81dc5128 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 14:35:14 -0500
Subject: [PATCH 14/60] added random
---
tensornetwork/block_tensor/block_tensor.py | 32 ++++++++++++++++++++--
1 file changed, 29 insertions(+), 3 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 0817119d6..089cd42ff 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -20,7 +20,7 @@
from tensornetwork.network_components import Node, contract, contract_between
from tensornetwork.backends import backend_factory
# pylint: disable=line-too-long
-from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges
+from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies
import numpy as np
import itertools
from typing import List, Union, Any, Tuple, Type, Optional
@@ -73,8 +73,8 @@ def compute_num_nonzero(charges: List[np.ndarray],
fused_charges = fuse_charges(
q1=accumulated_charges, flow1=1, q2=leg_charge, flow2=flows[n])
#compute the degeneracies of `fused_charges` charges
- #fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies)
- fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies)
+ fused_degeneracies = fuse_degeneracies(accumulated_degeneracies,
+ leg_degeneracies)
#compute the new degeneracies resulting of fusing the vectors of unique charges
#`accumulated_charges` and `leg_charge_2`
accumulated_charges = np.unique(fused_charges)
@@ -253,6 +253,32 @@ def randn(cls, indices: List[Index],
data = backend.randn((num_non_zero_elements,), dtype=dtype)
return cls(data=data, indices=indices)
+ @classmethod
+ def random(cls, indices: List[Index],
+ dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor":
+ """
+ Initialize a random symmetric tensor from random normal distribution.
+ Args:
+ indices: List of `Index` objecst, one for each leg.
+ dtype: An optional numpy dtype. The dtype of the tensor
+ Returns:
+ BlockSparseTensor
+ """
+ charges = [i.charges for i in indices]
+ flows = [i.flow for i in indices]
+ num_non_zero_elements = compute_num_nonzero(charges, flows)
+ dtype = dtype if dtype is not None else self.np.float64
+
+ def init_random():
+ if ((np.dtype(dtype) is np.dtype(np.complex128)) or
+ (np.dtype(dtype) is np.dtype(np.complex64))):
+ return np.random.rand(num_non_zero_elements).astype(
+ dtype) - 0.5 + 1j * (
+ np.random.rand(num_non_zero_elements).astype(dtype) - 0.5)
+ return np.random.randn(num_non_zero_elements).astype(dtype) - 0.5
+
+ return cls(data=init_random(), indices=indices)
+
@property
def rank(self):
return len(self.indices)
From efa64a49b439efd599e854a0c2f613c4a4258935 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 14:35:46 -0500
Subject: [PATCH 15/60] added fuse_degeneracies
---
tensornetwork/block_tensor/index.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py
index d17203dfb..327734123 100644
--- a/tensornetwork/block_tensor/index.py
+++ b/tensornetwork/block_tensor/index.py
@@ -121,6 +121,27 @@ def fuse_charges(q1: Union[List, np.ndarray], flow1: int,
len(q1) * len(q2))
+def fuse_degeneracies(degen1: Union[List, np.ndarray],
+ degen2: Union[List, np.ndarray]) -> np.ndarray:
+ """
+ Fuse degeneracies `degen1` and `degen2` of two leg-charges
+ by simple kronecker product. `degen1` and `degen2` typically belong to two
+ consecutive legs of `BlockSparseTensor`.
+ Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns
+ `[10, 11, 12, 100, 101, 102]`.
+ When using column-major ordering of indices in `BlockSparseTensor`,
+ the position of q1 should be "to the left" of the position of q2.
+ Args:
+ q1: Iterable of integers
+ flow1: Flow direction of charge `q1`.
+ q2: Iterable of integers
+ flow2: Flow direction of charge `q2`.
+ Returns:
+ np.ndarray: The result of fusing `q1` with `q2`.
+ """
+ return np.kron(degen2, degen1)
+
+
def fuse_index_pair(left_index: Index,
right_index: Index,
flow: Optional[int] = 1) -> Index:
From 76191627e3ee7cfaf53a9702a8f61d5cdc24151a Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 14:57:27 -0500
Subject: [PATCH 16/60] fixed bug in reshape
---
tensornetwork/block_tensor/block_tensor.py | 17 +++++++++++++----
1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 089cd42ff..c54efc950 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -341,14 +341,23 @@ def reshape(self, shape):
"reshaped into a tensor with {} elements".format(
np.prod(self.shape), np.prod(shape)))
+ #keep a copy of the old indices for the case where reshaping fails
+ #FIXME: this is pretty hacky!
+ index_copy = [i.copy() for i in self.indices]
+
def raise_error():
+ #if this error is raised `shape` is incompatible
+ #with the elementary indices. We have to reset them
+ #to the original.
+ self.indices = index_copy
elementary_indices = []
for i in self.indices:
elementary_indices.extend(i.get_elementary_indices())
- raise ValueError("The shape {} is incompatible with the "
- "elementary shape {} of the tensor.".format(
- shape,
- tuple([e.dimension for e in elementary_indices])))
+ print(elementary_indices)
+ raise ValueError("The shape {} is incompatible with the "
+ "elementary shape {} of the tensor.".format(
+ shape,
+ tuple([e.dimension for e in elementary_indices])))
for n in range(len(shape)):
if shape[n] > self.shape[n]:
From 2be30a9be5c6b2d187864609aa24ccb3c752b92c Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 15:45:25 -0500
Subject: [PATCH 17/60] dosctring, typing
---
tensornetwork/block_tensor/block_tensor.py | 61 +++++++++++++++++++---
1 file changed, 55 insertions(+), 6 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index c54efc950..05cf647aa 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -23,7 +23,8 @@
from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies
import numpy as np
import itertools
-from typing import List, Union, Any, Tuple, Type, Optional
+import time
+from typing import List, Union, Any, Tuple, Type, Optional, Dict
Tensor = Any
@@ -92,7 +93,7 @@ def compute_num_nonzero(charges: List[np.ndarray],
def compute_nonzero_block_shapes(charges: List[np.ndarray],
- flows: List[Union[bool, int]]) -> dict:
+ flows: List[Union[bool, int]]) -> Dict:
"""
Compute the blocks and their respective shapes of a symmetric tensor,
given its meta-data.
@@ -139,10 +140,11 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
charges: List[np.ndarray],
- flows: List[Union[bool, int]]) -> dict:
+ flows: List[Union[bool, int]]) -> Dict:
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
+ !!!!!!!!! This is currently very slow!!!!!!!!!!!!
Args:
data: An np.ndarray of the data. The number of elements in `data`
has to match the number of non-zero elements defined by `charges`
@@ -156,6 +158,37 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
"""
+ #TODO: this is currently way too slow!!!!
+ #Run the following benchmark for testing (typical MPS use case)
+ #retrieving the blocks is ~ 10 times as slow as mulitplying all of them
+
+ # D=4000
+ # B=10
+ # q1 = np.random.randint(0,B,D)
+ # q2 = np.asarray([0,1])
+ # q3 = np.random.randint(0,B,D)
+ # i1 = Index(charges=q1,flow=1)
+ # i2 = Index(charges=q2,flow=1)
+ # i3 = Index(charges=q3,flow=-1)
+ # indices=[i1,i2,i3]
+ # A=BT.BlockSparseTensor.random(indices=indices, dtype=np.complex128)
+ # ts = []
+ # A.reshape((D*2, D))
+ # def multiply_blocks(blocks):
+ # for b in blocks.values():
+ # np.dot(b.T, b)
+ # t1s=[]
+ # t2s=[]
+ # for n in range(10):
+ # print(n)
+ # t1 = time.time()
+ # b = A.get_diagonal_blocks()
+ # t1s.append(time.time() - t1)
+ # t1 = time.time()
+ # multiply_blocks(b)
+ # t2s.append(time.time() - t1)
+ # print('average retrieval time', np.average(t1s))
+ # print('average multiplication time',np.average(t2s))
if len(charges) != 2:
raise ValueError("input has to be a two-dimensional symmetric matrix")
@@ -180,9 +213,12 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
blocks = {}
+
for c in unique_row_charges:
start = 0
idxs = []
+ #TODO: this for loop can be replaced with something
+ #more sophisticated (i.e. using numpy lookups and sums)
for column in range(len(column_charges)):
charge = column_charges[column]
if (charge + c) != 0:
@@ -190,7 +226,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
else:
idxs.extend(start + np.arange(num_non_zero[column]))
if idxs:
- blocks[c] = np.reshape(data[idxs],
+ blocks[c] = np.reshape(data[np.asarray(idxs)],
(row_degeneracies[c], column_degeneracies[-c]))
return blocks
@@ -299,6 +335,13 @@ def flows(self):
def charges(self):
return [i.charges for i in self.indices]
+ def transpose(self, order):
+ """
+ Transpose the tensor into the new order `order`
+
+ """
+ raise NotImplementedError('transpose is not implemented!!')
+
def reshape(self, shape):
"""
Reshape `tensor` into `shape` in place.
@@ -382,7 +425,13 @@ def raise_error():
if self.shape[n] < shape[n]:
raise_error()
- def get_diagonal_blocks(self):
+ def get_diagonal_blocks(self) -> Dict:
+ """
+ Obtain the diagonal blocks of symmetric matrix.
+ BlockSparseTensor has to be a matrix.
+ Returns:
+ dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix)
+ """
if self.rank != 2:
raise ValueError(
"`get_diagonal_blocks` can only be called on a matrix, but found rank={}"
@@ -391,7 +440,7 @@ def get_diagonal_blocks(self):
data=self.data, charges=self.charges, flows=self.flows)
-def reshape(tensor: BlockSparseTensor, shape: Tuple[int]):
+def reshape(tensor: BlockSparseTensor, shape: Tuple[int]) -> BlockSparseTensor:
"""
Reshape `tensor` into `shape`.
`reshape` works essentially the same as the dense version, with the
From 742824f1c2a63211f66df5444ce6a453eba1ce66 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 15:57:49 -0500
Subject: [PATCH 18/60] removed TODO
---
tensornetwork/block_tensor/block_tensor.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 05cf647aa..7b83a2778 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -52,7 +52,6 @@ def compute_num_nonzero(charges: List[np.ndarray],
Returns:
int: The number of non-zero elements.
"""
- #TODO: this is not very efficient for large bond dimensions
if len(charges) == 1:
return len(np.nonzero(charges == 0)[0])
#get unique charges and their degeneracies on each leg
From 2e6c3957b30a68494e4f23f64de75667468e11d4 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 15:58:35 -0500
Subject: [PATCH 19/60] removed confusing code line
---
tensornetwork/block_tensor/block_tensor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 7b83a2778..6dacc7330 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -88,7 +88,7 @@ def compute_num_nonzero(charges: List[np.ndarray],
raise ValueError(
"given leg-charges `charges` and flows `flows` are incompatible "
"with a symmetric tensor")
- return np.sum(accumulated_degeneracies[accumulated_charges == 0])
+ return accumulated_degeneracies[accumulated_charges == 0]
def compute_nonzero_block_shapes(charges: List[np.ndarray],
From ab13d4a24573eca1eab7e6f997aaf3cc27844278 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 15:59:14 -0500
Subject: [PATCH 20/60] bug removed
---
tensornetwork/block_tensor/block_tensor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 6dacc7330..b768a918e 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -88,7 +88,7 @@ def compute_num_nonzero(charges: List[np.ndarray],
raise ValueError(
"given leg-charges `charges` and flows `flows` are incompatible "
"with a symmetric tensor")
- return accumulated_degeneracies[accumulated_charges == 0]
+ return accumulated_degeneracies[accumulated_charges == 0][0]
def compute_nonzero_block_shapes(charges: List[np.ndarray],
From d375b1d6744d6e1fb5b25f804ef62fe030538954 Mon Sep 17 00:00:00 2001
From: Martin
Date: Sun, 1 Dec 2019 16:41:13 -0500
Subject: [PATCH 21/60] comment
---
tensornetwork/block_tensor/block_tensor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index b768a918e..154ae4993 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -159,7 +159,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
"""
#TODO: this is currently way too slow!!!!
#Run the following benchmark for testing (typical MPS use case)
- #retrieving the blocks is ~ 10 times as slow as mulitplying all of them
+ #retrieving the blocks is ~ 10 times as slow as multiplying them
# D=4000
# B=10
From 2727cd07797768b065b2d7e83960be7d36030fcc Mon Sep 17 00:00:00 2001
From: Martin
Date: Mon, 2 Dec 2019 09:21:15 -0500
Subject: [PATCH 22/60] added __mul__ to Index
---
tensornetwork/block_tensor/index.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py
index 327734123..1549a422e 100644
--- a/tensornetwork/block_tensor/index.py
+++ b/tensornetwork/block_tensor/index.py
@@ -97,6 +97,15 @@ def get_elementary_indices(self) -> List:
self._leave_helper(self, leave_list)
return leave_list
+ def __mul__(self, index: "Index") -> "Index":
+ """
+ Merge `index` and self into a single larger index.
+ The flow of the resulting index is set to 1.
+ Flows of `self` and `index` are multiplied into
+ the charges upon fusing.
+ """
+ return fuse_index_pair(self, index)
+
def fuse_charges(q1: Union[List, np.ndarray], flow1: int,
q2: Union[List, np.ndarray], flow2: int) -> np.ndarray:
From 283e36478b8cec4c037c0c6d40d47e8b5eb7ed14 Mon Sep 17 00:00:00 2001
From: Martin
Date: Mon, 2 Dec 2019 09:21:39 -0500
Subject: [PATCH 23/60] added sparse_shape
and updated reshape to accept both int and Index lists
---
tensornetwork/block_tensor/block_tensor.py | 79 ++++++++++++++++------
1 file changed, 60 insertions(+), 19 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 154ae4993..225dacc45 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -24,7 +24,7 @@
import numpy as np
import itertools
import time
-from typing import List, Union, Any, Tuple, Type, Optional, Dict
+from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable
Tensor = Any
@@ -170,8 +170,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
# i2 = Index(charges=q2,flow=1)
# i3 = Index(charges=q3,flow=-1)
# indices=[i1,i2,i3]
- # A=BT.BlockSparseTensor.random(indices=indices, dtype=np.complex128)
- # ts = []
+ # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128)
# A.reshape((D*2, D))
# def multiply_blocks(blocks):
# for b in blocks.values():
@@ -235,7 +234,7 @@ class BlockSparseTensor:
Minimal class implementation of block sparsity.
The class design follows Glen's proposal (Design 0).
The class currently only supports a single U(1) symmetry
- and only nump.ndarray.
+ and only numpy.ndarray.
Attributes:
* self.data: A 1d np.ndarray storing the underlying
data of the tensor
@@ -318,8 +317,40 @@ def init_random():
def rank(self):
return len(self.indices)
+ #TODO: we should consider to switch the names
+ #`BlockSparseTensor.sparse_shape` and `BlockSparseTensor.shape`,
+ #i.e. have `BlockSparseTensor.shape`return the sparse shape of the tensor.
+ #This may be more convenient for building tensor-type and backend
+ #agnostic code. For example, in MPS code we essentially never
+ #explicitly set a shape to a certain value (apart from initialization).
+ #That is, code like this
+ #```
+ #tensor = np.random.rand(10,10,10)
+ #```
+ #is never used. Rather one inquires shapes of tensors and
+ #multiplies them to get new shapes:
+ #```
+ #new_tensor = reshape(tensor, [tensor.shape[0]*tensor.shape[1], tensor.shape[2]])
+ #```
+ #Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly
+ #(apart from debugging).
+ @property
+ def sparse_shape(self) -> Tuple:
+ """
+ The sparse shape of the tensor.
+ Returns a copy of self.indices. Note that copying
+ can be relatively expensive for deeply nested indices.
+ Returns:
+ Tuple: A tuple of `Index` objects.
+ """
+
+ return tuple([i.copy() for i in self.indices])
+
@property
def shape(self) -> Tuple:
+ """
+ The dense shape of the tensor.
+ """
return tuple([i.dimension for i in self.indices])
@property
@@ -339,9 +370,10 @@ def transpose(self, order):
Transpose the tensor into the new order `order`
"""
+
raise NotImplementedError('transpose is not implemented!!')
- def reshape(self, shape):
+ def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None:
"""
Reshape `tensor` into `shape` in place.
`BlockSparseTensor.reshape` works essentially the same as the dense
@@ -372,16 +404,23 @@ def reshape(self, shape):
Args:
tensor: A symmetric tensor.
- shape: The new shape.
+ shape: The new shape. Can either be a list of `Index`
+ or a list of `int`.
Returns:
BlockSparseTensor: A new tensor reshaped into `shape`
"""
-
+ dense_shape = []
+ for s in shape:
+ if isinstance(s, Index):
+ dense_shape.append(s.dimension)
+ else:
+ dense_shape.append(s)
# a few simple checks
- if np.prod(shape) != np.prod(self.shape):
+
+ if np.prod(dense_shape) != np.prod(self.shape):
raise ValueError("A tensor with {} elements cannot be "
"reshaped into a tensor with {} elements".format(
- np.prod(self.shape), np.prod(shape)))
+ np.prod(self.shape), np.prod(dense_shape)))
#keep a copy of the old indices for the case where reshaping fails
#FIXME: this is pretty hacky!
@@ -398,22 +437,22 @@ def raise_error():
print(elementary_indices)
raise ValueError("The shape {} is incompatible with the "
"elementary shape {} of the tensor.".format(
- shape,
+ dense_shape,
tuple([e.dimension for e in elementary_indices])))
- for n in range(len(shape)):
- if shape[n] > self.shape[n]:
- while shape[n] > self.shape[n]:
+ for n in range(len(dense_shape)):
+ if dense_shape[n] > self.shape[n]:
+ while dense_shape[n] > self.shape[n]:
#fuse indices
i1, i2 = self.indices.pop(n), self.indices.pop(n)
#note: the resulting flow is set to one since the flow
#is multiplied into the charges. As a result the tensor
#will then be invariant in any case.
self.indices.insert(n, fuse_index_pair(i1, i2))
- if self.shape[n] > shape[n]:
+ if self.shape[n] > dense_shape[n]:
raise_error()
- elif shape[n] < self.shape[n]:
- while shape[n] < self.shape[n]:
+ elif dense_shape[n] < self.shape[n]:
+ while dense_shape[n] < self.shape[n]:
#split index at n
try:
i1, i2 = split_index(self.indices.pop(n))
@@ -421,7 +460,7 @@ def raise_error():
raise_error()
self.indices.insert(n, i1)
self.indices.insert(n + 1, i2)
- if self.shape[n] < shape[n]:
+ if self.shape[n] < dense_shape[n]:
raise_error()
def get_diagonal_blocks(self) -> Dict:
@@ -439,7 +478,8 @@ def get_diagonal_blocks(self) -> Dict:
data=self.data, charges=self.charges, flows=self.flows)
-def reshape(tensor: BlockSparseTensor, shape: Tuple[int]) -> BlockSparseTensor:
+def reshape(tensor: BlockSparseTensor,
+ shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor:
"""
Reshape `tensor` into `shape`.
`reshape` works essentially the same as the dense version, with the
@@ -469,7 +509,8 @@ def reshape(tensor: BlockSparseTensor, shape: Tuple[int]) -> BlockSparseTensor:
Args:
tensor: A symmetric tensor.
- shape: The new shape.
+ shape: The new shape. Can either be a list of `Index`
+ or a list of `int`.
Returns:
BlockSparseTensor: A new tensor reshaped into `shape`
"""
From 7328ad406561e72359d89dd04521583b9fb360dc Mon Sep 17 00:00:00 2001
From: Martin
Date: Mon, 2 Dec 2019 09:27:19 -0500
Subject: [PATCH 24/60] more in tutorial
---
tensornetwork/block_tensor/tutorial.py | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/tutorial.py b/tensornetwork/block_tensor/tutorial.py
index 0cb0c5ede..fe824ee87 100644
--- a/tensornetwork/block_tensor/tutorial.py
+++ b/tensornetwork/block_tensor/tutorial.py
@@ -41,4 +41,17 @@
# initialize a random symmetric tensor
A = BT.BlockSparseTensor.randn(indices=[i1, i2, i3, i4], dtype=np.complex128)
-B = BT.reshape(A, (4, 48, 10))
+B = BT.reshape(A, (4, 48, 10)) #creates a new tensor (copy)
+shape_A = A.shape #returns the dense shape of A
+A.reshape([shape_A[0] * shape_A[1], shape_A[2],
+ shape_A[3]]) #in place reshaping
+A.reshape(shape_A) #reshape back into original shape
+
+sparse_shape = A.sparse_shape #returns a copy of `A.indices`. Each `Index` object is copied
+
+new_sparse_shape = [
+ sparse_shape[0] * sparse_shape[1], sparse_shape[2], sparse_shape[3]
+]
+B = BT.reshape(A, new_sparse_shape) #return a copy
+A.reshape(new_sparse_shape) #in place reshaping
+A.reshape(sparse_shape) #bring A back into original shape
From e5b614772e53253448c224bd2ea9b36429724bff Mon Sep 17 00:00:00 2001
From: Martin
Date: Mon, 2 Dec 2019 09:28:06 -0500
Subject: [PATCH 25/60] comment
---
tensornetwork/block_tensor/tutorial.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/tutorial.py b/tensornetwork/block_tensor/tutorial.py
index fe824ee87..01e5eabf0 100644
--- a/tensornetwork/block_tensor/tutorial.py
+++ b/tensornetwork/block_tensor/tutorial.py
@@ -47,7 +47,7 @@
shape_A[3]]) #in place reshaping
A.reshape(shape_A) #reshape back into original shape
-sparse_shape = A.sparse_shape #returns a copy of `A.indices`. Each `Index` object is copied
+sparse_shape = A.sparse_shape #returns a deep copy of `A.indices`.
new_sparse_shape = [
sparse_shape[0] * sparse_shape[1], sparse_shape[2], sparse_shape[3]
From eb91c7942a29dfa11fc5b62b5a8872354196f479 Mon Sep 17 00:00:00 2001
From: "martin.ganahl@gmail.com"
Date: Mon, 2 Dec 2019 14:05:11 -0500
Subject: [PATCH 26/60] added new test function
---
tensornetwork/block_tensor/block_tensor.py | 81 ++++++++++++++++++----
1 file changed, 68 insertions(+), 13 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 225dacc45..7c6c2b499 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -40,11 +40,10 @@ def compute_num_nonzero(charges: List[np.ndarray],
Compute the number of non-zero elements, given the meta-data of
a symmetric tensor.
Args:
- charges: List of np.ndarray, one for each leg of the
+ charges: List of np.ndarray of int, one for each leg of the
underlying tensor. Each np.ndarray `charges[leg]`
- is of shape `(D[leg], Q)`.
- The bond dimension `D[leg]` can vary on each leg, the number of
- symmetries `Q` has to be the same for each leg.
+ is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
flows: A list of integers, one for each leg,
with values `1` or `-1`, denoting the flow direction
of the charges on each leg. `1` is inflowing, `-1` is outflowing
@@ -98,9 +97,8 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
given its meta-data.
Args:
charges: List of np.ndarray, one for each leg.
- Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`.
- The bond dimension `D[leg]` can vary on each leg, the number of
- symmetries `Q` has to be the same for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
flows: A list of integers, one for each leg,
with values `1` or `-1`, denoting the flow direction
of the charges on each leg. `1` is inflowing, `-1` is outflowing
@@ -149,9 +147,8 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
has to match the number of non-zero elements defined by `charges`
and `flows`
charges: List of np.ndarray, one for each leg.
- Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`.
- The bond dimension `D[leg]` can vary on each leg, the number of
- symmetries `Q` has to be the same for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
flows: A list of integers, one for each leg,
with values `1` or `-1`, denoting the flow direction
of the charges on each leg. `1` is inflowing, `-1` is outflowing
@@ -229,6 +226,64 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
return blocks
+def retrieve_non_zero_diagonal_blocks_test(data: np.ndarray,
+ charges: List[np.ndarray],
+ flows: List[Union[bool, int]]
+ ) -> Dict:
+ """
+ Testing function, does the same as `retrieve_non_zero_diagonal_blocks`,
+ but should be faster
+ """
+
+ if len(charges) != 2:
+ raise ValueError("input has to be a two-dimensional symmetric matrix")
+ check_flows(flows)
+ if len(flows) != len(charges):
+ raise ValueError("`len(flows)` is different from `len(charges)`")
+
+ #a 1d array of the net charges.
+ net_charges = fuse_charges(
+ q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1])
+ #a 1d array containing row charges added with zero column charges
+ #used to find the positions of the unique charges
+ tmp = fuse_charges(
+ q1=charges[0],
+ flow1=flows[0],
+ q2=np.zeros(charges[1].shape[0], dtype=charges[1].dtype),
+ flow2=1)
+ unique_charges = np.unique(charges[0] * flows[0])
+ symmetric_indices = net_charges == 0
+ charge_lookup = tmp[symmetric_indices]
+ blocks = {}
+ for c in unique_charges:
+ blocks[c] = data[charge_lookup == c]
+ return blocks
+
+
+def compute_mapping_table(charges: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> int:
+ """
+ Compute a mapping table mapping the linear positions of the non-zero
+ elements to their multi-index label.
+ Args:
+ charges: List of np.ndarray of int, one for each leg of the
+ underlying tensor. Each np.ndarray `charges[leg]`
+ is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ Returns:
+ np.ndarray: An (N, r) np.ndarray of dtype np.int16,
+ with `N` the number of non-zero elements, and `r`
+ the rank of the tensor.
+ """
+ tables = np.meshgrid([np.arange(c.shape[0]) for c in charges], indexing='ij')
+ tables = tables[::-1] #reverse the order
+ pass
+
+
class BlockSparseTensor:
"""
Minimal class implementation of block sparsity.
@@ -239,8 +294,9 @@ class BlockSparseTensor:
* self.data: A 1d np.ndarray storing the underlying
data of the tensor
* self.charges: A list of `np.ndarray` of shape
- (D, Q), where D is the bond dimension, and Q the number
- of different symmetries (this is 1 for now).
+ (D,), where D is the bond dimension. Once we go beyond
+ a single U(1) symmetry, this has to be updated.
+
* self.flows: A list of integers of length `k`.
`self.flows` determines the flows direction of charges
on each leg of the tensor. A value of `-1` denotes
@@ -368,7 +424,6 @@ def charges(self):
def transpose(self, order):
"""
Transpose the tensor into the new order `order`
-
"""
raise NotImplementedError('transpose is not implemented!!')
From a544dbc719d10a85cd6646fd3aad5821a4dccf55 Mon Sep 17 00:00:00 2001
From: Martin
Date: Mon, 2 Dec 2019 15:15:15 -0500
Subject: [PATCH 27/60] testing function hacking
---
tensornetwork/block_tensor/block_tensor.py | 40 +++++++++++++---------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 7c6c2b499..2df966a77 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -226,13 +226,11 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
return blocks
-def retrieve_non_zero_diagonal_blocks_test(data: np.ndarray,
- charges: List[np.ndarray],
- flows: List[Union[bool, int]]
- ) -> Dict:
+def retrieve_non_zero_diagonal_blocks_test(
+ data: np.ndarray, charges: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> Dict:
"""
- Testing function, does the same as `retrieve_non_zero_diagonal_blocks`,
- but should be faster
+ Testing function, does the same as `retrieve_non_zero_diagonal_blocks`.
"""
if len(charges) != 2:
@@ -241,22 +239,32 @@ def retrieve_non_zero_diagonal_blocks_test(data: np.ndarray,
if len(flows) != len(charges):
raise ValueError("`len(flows)` is different from `len(charges)`")
+ #get the unique charges
+ unique_row_charges, row_dims = np.unique(
+ flows[0] * charges[0], return_counts=True)
+ unique_column_charges, column_dims = np.unique(
+ flows[1] * charges[1], return_counts=True)
+
#a 1d array of the net charges.
net_charges = fuse_charges(
q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1])
#a 1d array containing row charges added with zero column charges
- #used to find the positions of the unique charges
- tmp = fuse_charges(
- q1=charges[0],
- flow1=flows[0],
- q2=np.zeros(charges[1].shape[0], dtype=charges[1].dtype),
- flow2=1)
- unique_charges = np.unique(charges[0] * flows[0])
+ #used to find the indices of in data corresponding to a given charge
+ #(see below)
+ tmp = np.tile(charges[0] * flows[0], len(charges[1]))
+
symmetric_indices = net_charges == 0
charge_lookup = tmp[symmetric_indices]
+
+ row_degeneracies = dict(zip(unique_row_charges, row_dims))
+ column_degeneracies = dict(zip(unique_column_charges, column_dims))
blocks = {}
- for c in unique_charges:
- blocks[c] = data[charge_lookup == c]
+
+ common_charges = np.intersect1d(unique_row_charges, -unique_column_charges)
+ for c in common_charges:
+ blocks[c] = np.reshape(data[charge_lookup == c],
+ (row_degeneracies[c], column_degeneracies[-c]))
+
return blocks
@@ -281,7 +289,7 @@ def compute_mapping_table(charges: List[np.ndarray],
"""
tables = np.meshgrid([np.arange(c.shape[0]) for c in charges], indexing='ij')
tables = tables[::-1] #reverse the order
- pass
+ raise NotImplementedError()
class BlockSparseTensor:
From 0457cca404c6a40ec3db7e52022139204bd67bca Mon Sep 17 00:00:00 2001
From: Martin
Date: Mon, 2 Dec 2019 15:16:30 -0500
Subject: [PATCH 28/60] docstring
---
tensornetwork/block_tensor/block_tensor.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 2df966a77..4de358f2c 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -231,6 +231,7 @@ def retrieve_non_zero_diagonal_blocks_test(
flows: List[Union[bool, int]]) -> Dict:
"""
Testing function, does the same as `retrieve_non_zero_diagonal_blocks`.
+ This is very slow for high rank tensors with many blocks
"""
if len(charges) != 2:
From 95958a740d387693bb65766f606c4ce6839bbd8e Mon Sep 17 00:00:00 2001
From: Martin
Date: Tue, 3 Dec 2019 14:33:58 -0500
Subject: [PATCH 29/60] small speed up
---
tensornetwork/block_tensor/block_tensor.py | 33 +++++++++++++---------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 4de358f2c..73e1063b0 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -193,33 +193,36 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
row_charges = flows[0] * charges[0] # a list of charges on each row
column_charges = flows[1] * charges[1] # a list of charges on each column
- # for each matrix column find the number of non-zero elements in it
- # Note: the matrix is assumed to be symmetric, i.e. only elements where
- # ingoing and outgoing charge are identical are non-zero
- num_non_zero = [
- len(np.nonzero((row_charges + c) == 0)[0]) for c in column_charges
- ]
+
#get the unique charges
unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
unique_column_charges, column_dims = np.unique(
column_charges, return_counts=True)
+ common_charges = np.intersect1d(flows[0] * unique_row_charges,
+ flows[1] * unique_column_charges)
+
+ # for each matrix column find the number of non-zero elements in it
+ # Note: the matrix is assumed to be symmetric, i.e. only elements where
+ # ingoing and outgoing charge are identical are non-zero
# get the degeneracies of each row and column charge
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
blocks = {}
-
- for c in unique_row_charges:
+ #TODO: the nested loops could probably be easily moved to cython
+ for c in common_charges:
start = 0
idxs = []
#TODO: this for loop can be replaced with something
#more sophisticated (i.e. using numpy lookups and sums)
for column in range(len(column_charges)):
charge = column_charges[column]
+ if charge not in common_charges:
+ continue
if (charge + c) != 0:
- start += num_non_zero[column]
+ start += row_degeneracies[c]
else:
- idxs.extend(start + np.arange(num_non_zero[column]))
+ idxs.extend(start + np.arange(row_degeneracies[c]))
if idxs:
blocks[c] = np.reshape(data[np.asarray(idxs)],
(row_degeneracies[c], column_degeneracies[-c]))
@@ -230,10 +233,12 @@ def retrieve_non_zero_diagonal_blocks_test(
data: np.ndarray, charges: List[np.ndarray],
flows: List[Union[bool, int]]) -> Dict:
"""
- Testing function, does the same as `retrieve_non_zero_diagonal_blocks`.
- This is very slow for high rank tensors with many blocks
+ For testing purposes. Produces the same output as `retrieve_non_zero_diagonal_blocks`,
+ but computes it in a different way.
+ This is currently very slow for high rank tensors with many blocks, but can be faster than
+ `retrieve_non_zero_diagonal_blocks` in certain other cases.
+ It's pretty memory heavy too.
"""
-
if len(charges) != 2:
raise ValueError("input has to be a two-dimensional symmetric matrix")
check_flows(flows)
@@ -247,11 +252,13 @@ def retrieve_non_zero_diagonal_blocks_test(
flows[1] * charges[1], return_counts=True)
#a 1d array of the net charges.
+ #this can use a lot of memory
net_charges = fuse_charges(
q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1])
#a 1d array containing row charges added with zero column charges
#used to find the indices of in data corresponding to a given charge
#(see below)
+ #this can be very large
tmp = np.tile(charges[0] * flows[0], len(charges[1]))
symmetric_indices = net_charges == 0
From ac3d980dcfdd74da0fb1fe0499afaaa29fe0a9dc Mon Sep 17 00:00:00 2001
From: Cutter Coryell <14116109+coryell@users.noreply.github.com>
Date: Tue, 3 Dec 2019 12:34:50 -0800
Subject: [PATCH 30/60] Remove gui directory (migrated to another repo) (#399)
---
gui/README.md | 5 -
gui/css/index.css | 78 -------
gui/index.html | 39 ----
gui/js/app.js | 46 ----
gui/js/edge.js | 88 -------
gui/js/initialState.js | 78 -------
gui/js/mixins.js | 80 -------
gui/js/node.js | 344 ---------------------------
gui/js/output.js | 84 -------
gui/js/toolbar.js | 519 -----------------------------------------
gui/js/workspace.js | 182 ---------------
11 files changed, 1543 deletions(-)
delete mode 100644 gui/README.md
delete mode 100644 gui/css/index.css
delete mode 100644 gui/index.html
delete mode 100644 gui/js/app.js
delete mode 100644 gui/js/edge.js
delete mode 100644 gui/js/initialState.js
delete mode 100644 gui/js/mixins.js
delete mode 100644 gui/js/node.js
delete mode 100644 gui/js/output.js
delete mode 100644 gui/js/toolbar.js
delete mode 100644 gui/js/workspace.js
diff --git a/gui/README.md b/gui/README.md
deleted file mode 100644
index 45c410a62..000000000
--- a/gui/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# TensorNetwork GUI
-
-⚠️ **UNDER CONSTRUCTION** 🏗️
-
-A graphical interface for defining tensor networks. Compiles to TensorNetwork Python code.
diff --git a/gui/css/index.css b/gui/css/index.css
deleted file mode 100644
index 1dcce637d..000000000
--- a/gui/css/index.css
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
-Copyright 2019 The TensorNetwork Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-.app {
- display: flex;
- flex-direction: row;
- font: normal 15px sans-serif;
-}
-
-svg.workspace {
- float: left;
- background-color: #f9f9f9;
-}
-
-svg.workspace .drag-selector {
- stroke: #fff;
- stroke-width: 2;
- fill: rgba(200, 200, 200, 0.5);
-}
-
-a.export {
- position: absolute;
-}
-
-svg text {
- user-select: none;
- -moz-user-select: none;
- -ms-user-select: none;
- -webkit-user-select: none;
-}
-
-.toolbar {
- width: 300px;
- background-color: #fff;
- box-shadow: 0 1px 3px rgba(0,0,0,0.1), 0 1px 2px rgba(0,0,0,0.2);
-}
-
-section {
- padding: 10px 20px;
- border-bottom: 1px solid #ddd;
-}
-
-.tensor-creator .svg-container {
- height: 200px;
-}
-
-.delete {
- text-align: right;
- float: right;
- color: darkred;
-}
-
-.button-holder {
- padding: 20px 0;
-}
-
-.code-output {
- position: absolute;
- top: 600px;
- width: 900px;
- padding: 10px;
-}
-
-label {
- padding: 10px;
-}
\ No newline at end of file
diff --git a/gui/index.html b/gui/index.html
deleted file mode 100644
index f235bcc6a..000000000
--- a/gui/index.html
+++ /dev/null
@@ -1,39 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- TensorNetwork GUI
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/gui/js/app.js b/gui/js/app.js
deleted file mode 100644
index 71c8dc85b..000000000
--- a/gui/js/app.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019 The TensorNetwork Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-let app = new Vue({
- el: '#app',
- data: {
- state: initialState // now state object is reactive, whereas initialState is not
- },
- methods: {
- exportSVG: function(event) {
- event.preventDefault();
- let serializer = new XMLSerializer();
- let workspace = document.getElementById('workspace');
- let blob = new Blob([serializer.serializeToString(workspace)], {type:"image/svg+xml;charset=utf-8"});
- let url = URL.createObjectURL(blob);
- let link = document.createElement('a');
- link.href = url;
- link.download = "export.svg";
- document.body.appendChild(link);
- link.click();
- document.body.removeChild(link);
- }
- },
- template: `
-
Node {{node.name}} has {{node.axes.length}} axes:
-
-
- Axis {{i}} ({{axisName}})
- is connected to axis {{neighborAt(i)[1]}}
- ({{getAxis(neighborAt(i))}})
- of node {{getNode(neighborAt(i)[0]).name}}
- by edge "{{edgeNameAt(i)}}"
-
- is free
-
-
-
- `
- }
-);
diff --git a/gui/js/output.js b/gui/js/output.js
deleted file mode 100644
index 6347db6d1..000000000
--- a/gui/js/output.js
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2019 The TensorNetwork Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-Vue.component(
- 'code-output',
- {
- props: {
- state: Object
- },
- computed: {
- outputCode: function() {
- let code = `import numpy as np\nimport tensornetwork as tn\n`;
-
- code += `\n# Node definitions\n`;
- code += `# TODO: replace np.zeros with actual values\n\n`;
-
- for (let i = 0; i < this.state.nodes.length; i++) {
- let node = this.state.nodes[i];
- let values = this.placeholderValues(node);
- let axes = this.axisNames(node);
- code += `${node.name} = tn.Node(${values}, name="${node.name}"${axes})\n`;
- }
-
- code += `\n# Edge definitions\n\n`;
-
- for (let i = 0; i < this.state.edges.length; i++) {
- let edge = this.state.edges[i];
- let name = this.edgeName(edge);
- code += `tn.connect(${edge[0][0]}[${edge[0][1]}], ${edge[1][0]}[${edge[1][1]}]${name})\n`;
- }
-
- return code;
- }
- },
- methods: {
- placeholderValues: function(node) {
- let code = `np.zeros((`;
- for (let i = 0; i < node.axes.length; i++) {
- code += `0, `;
- }
- code += `))`;
- return code;
- },
- axisNames: function(node) {
- let code = `, axis_names=[`;
- let willOutput = false;
- for (let i = 0; i < node.axes.length; i++) {
- let axis = node.axes[i].name;
- if (axis) {
- willOutput = true;
- code += `"${axis}", `
- }
- else {
- code += `None, `
- }
- }
- code += `]`;
- return willOutput ? code : ``;
- },
- edgeName: function(edge) {
- let name = edge[2];
- return name ? `, name="${name}"` : ``;
- }
- },
- template: `
-
-
TensorNetwork Output
-
{{outputCode}}
-
- `
- }
-);
-
diff --git a/gui/js/toolbar.js b/gui/js/toolbar.js
deleted file mode 100644
index 823ffe150..000000000
--- a/gui/js/toolbar.js
+++ /dev/null
@@ -1,519 +0,0 @@
-// Copyright 2019 The TensorNetwork Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-Vue.component(
- 'toolbar',
- {
- props: {
- state: Object
- },
- data: function() {
- return {
- copyNodeName: '',
- }
- },
- methods: {
- deselectNode: function() {
- this.state.selectedNodes = [];
- },
- deleteNode: function(event) {
- event.preventDefault();
- let selectedName = this.state.selectedNodes[0].name;
-
- this.state.edges = this.state.edges.filter(function(edge) {
- if (edge[0][0] === selectedName || edge[1][0] === selectedName) {
- return false;
- }
- else {
- return true;
- }
- });
- this.state.nodes = this.state.nodes.filter(function(node) {
- return node.name !== selectedName;
- });
- this.selectedNodes = [];
- },
- copyNode: function(event) {
- event.preventDefault();
- let workspace = document.getElementById('workspace').getBoundingClientRect();
-
- let node = JSON.parse(JSON.stringify(this.node));
- node.name = this.copyNodeName;
- node.position = {x: workspace.width / 2, y: workspace.height / 2};
-
- this.state.nodes.push(node);
- this.state.selectedNodes = [node];
- this.copyNodeName = '';
- },
- rotate: function(angle) {
- this.node.rotation += angle;
- }
- },
- computed: {
- node: function() {
- return this.state.selectedNodes[0];
- },
- copyNodeDisabled: function() {
- return this.nameTaken || this.copyNodeName == null || this.copyNodeName === '';
- },
- nameTaken: function() {
- for (let i = 0; i < this.state.nodes.length; i++) {
- if (this.copyNodeName === this.state.nodes[i].name) {
- return true;
- }
- }
- return false;
- }
- },
- template: `
-
-
-
-
-
Selecting nodes
-
Click a node to select it for editing.
-
Drag-select or shift-click multiple nodes to drag as a group and adjust alignment and
- spacing.
- Shift-click a node in the workspace to deselect it.
-
-
-
Align Vertically
-
-
-
-
Align Horizontally
-
-
-
-
Space Vertically
-
-
-
-
Space Horizontally
-
-
-
- `
- }
-);
diff --git a/gui/js/workspace.js b/gui/js/workspace.js
deleted file mode 100644
index dea17bbca..000000000
--- a/gui/js/workspace.js
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2019 The TensorNetwork Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-Vue.component(
- 'workspace',
- {
- props: {
- state: Object
- },
- data: function() {
- return {
- width: 900,
- height: 600,
- dragSelector: {
- dragging: false,
- startX: null,
- startY: null,
- endX: null,
- endY: null
- },
- protoEdge: {
- x: null,
- y: null,
- node: null,
- axis: null,
- dragging: false
- }
- };
- },
- methods: {
- onMouseDown: function(event) {
- this.state.selectedNodes = [];
-
- document.addEventListener('mousemove', this.onMouseMove);
- document.addEventListener('mouseup', this.onMouseUp);
-
- let workspace = document.getElementById('workspace').getBoundingClientRect();
-
- this.dragSelector.dragging = true;
- this.dragSelector.startX = event.pageX - workspace.left;
- this.dragSelector.startY = event.pageY - workspace.top;
- this.dragSelector.endX = event.pageX - workspace.left;
- this.dragSelector.endY = event.pageY - workspace.top;
- },
- onMouseMove: function(event) {
- let workspace = document.getElementById('workspace').getBoundingClientRect();
-
- this.dragSelector.endX = event.pageX - workspace.left;
- this.dragSelector.endY = event.pageY - workspace.top;
- },
- onMouseUp: function() {
- document.removeEventListener('mousemove', this.onMouseMove);
- document.removeEventListener('mouseup', this.onMouseUp);
-
- this.dragSelector.dragging = false;
-
- let x1 = this.dragSelector.startX;
- let x2 = this.dragSelector.endX;
- let y1 = this.dragSelector.startY;
- let y2 = this.dragSelector.endY;
-
- this.state.selectedNodes = [];
- let selected = this.state.selectedNodes;
- this.state.nodes.forEach(function(node) {
- let x = node.position.x;
- let y = node.position.y;
- if ((x1 <= x && x <= x2) || (x2 <= x && x <= x1)) {
- if ((y1 <= y && y <= y2) || (y2 <= y && y <= y1)) {
- selected.push(node);
- }
- }
- });
- this.state.selectedNodes.sort(function(node1, node2) {
- let distance1 = (node1.position.x - x1) ** 2 + (node1.position.y - y1) ** 2;
- let distance2 = (node2.position.x - x1) ** 2 + (node2.position.y - y1) ** 2;
- return distance1 - distance2;
- })
- },
- onAxisMouseDown: function(node, axis) {
- if (this.axisOccupied(node, axis)) {
- return;
- }
- document.addEventListener('mousemove', this.dragAxis);
- document.addEventListener('mouseup', this.releaseAxisDrag);
- this.protoEdge.node = node;
- this.protoEdge.axis = axis;
- },
- dragAxis: function(event) {
- let workspace = document.getElementById('workspace').getBoundingClientRect();
- this.protoEdge.dragging = true;
- this.protoEdge.x = event.clientX - workspace.left;
- this.protoEdge.y = event.clientY - workspace.top;
- },
- releaseAxisDrag: function() {
- document.removeEventListener('mousemove', this.dragAxis);
- document.removeEventListener('mouseup', this.releaseAxisDrag);
- this.protoEdge.dragging = false;
- this.protoEdge.node = null;
- this.protoEdge.axis = null;
- },
- onAxisMouseUp: function(node, axis) {
- if (this.protoEdge.dragging) {
- if (this.axisOccupied(node, axis)) {
- return;
- }
- if (this.protoEdge.node.name === node.name
- && this.protoEdge.axis === axis) {
- return; // don't allow connection of an axis to itself
- }
- this.state.edges.push([
- [this.protoEdge.node.name, this.protoEdge.axis],
- [node.name, axis],
- null
- ])
- }
- },
- axisOccupied: function(node, axis) {
- for (let i = 0; i < this.state.edges.length; i++) {
- let edge = this.state.edges[i];
- if ((node.name === edge[0][0] && axis === edge[0][1])
- || (node.name === edge[1][0] && axis === edge[1][1])) {
- return true;
- }
- }
- return false;
- }
- },
- template: `
-
- `
- }
-);
-
-Vue.component(
- 'drag-selector',
- {
- props: {
- startX: Number,
- startY: Number,
- endX: Number,
- endY: Number,
- },
- computed: {
- x: function() {
- return Math.min(this.startX, this.endX);
- },
- y: function() {
- return Math.min(this.startY, this.endY);
- },
- width: function() {
- return Math.abs(this.startX - this.endX);
- },
- height: function() {
- return Math.abs(this.startY - this.endY);
- }
- },
- template: `
-
- `
- }
-);
From 5d2d2bad0a0162fd6f3350776a923e6559d23e4c Mon Sep 17 00:00:00 2001
From: mganahl
Date: Fri, 6 Dec 2019 21:53:08 -0500
Subject: [PATCH 31/60] a slightly more elegant code
---
tensornetwork/block_tensor/block_tensor.py | 35 +++++++++++-----------
1 file changed, 17 insertions(+), 18 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 73e1063b0..d51c73dad 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -198,8 +198,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
unique_column_charges, column_dims = np.unique(
column_charges, return_counts=True)
- common_charges = np.intersect1d(flows[0] * unique_row_charges,
- flows[1] * unique_column_charges)
+ common_charges = np.intersect1d(unique_row_charges, -unique_column_charges)
# for each matrix column find the number of non-zero elements in it
# Note: the matrix is assumed to be symmetric, i.e. only elements where
@@ -209,23 +208,23 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
blocks = {}
- #TODO: the nested loops could probably be easily moved to cython
- for c in common_charges:
- start = 0
- idxs = []
+
+ number_of_seen_elements = 0
+ idxs = {c: [] for c in common_charges}
+ for column in range(len(column_charges)):
#TODO: this for loop can be replaced with something
- #more sophisticated (i.e. using numpy lookups and sums)
- for column in range(len(column_charges)):
- charge = column_charges[column]
- if charge not in common_charges:
- continue
- if (charge + c) != 0:
- start += row_degeneracies[c]
- else:
- idxs.extend(start + np.arange(row_degeneracies[c]))
- if idxs:
- blocks[c] = np.reshape(data[np.asarray(idxs)],
- (row_degeneracies[c], column_degeneracies[-c]))
+ #more sophisticated (if.e. using numpy lookups and sums)
+ charge = column_charges[column]
+ if -charge not in common_charges:
+ continue
+
+ idxs[-charge].extend(number_of_seen_elements +
+ np.arange(row_degeneracies[-charge]))
+ number_of_seen_elements += row_degeneracies[-charge]
+
+ for c, idx in idxs.items():
+ blocks[c] = np.reshape(data[np.asarray(idx)],
+ (row_degeneracies[c], column_degeneracies[-c]))
return blocks
From 04eadf377be532ee50dd6751748f9cf0e0e38666 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Fri, 6 Dec 2019 22:14:44 -0500
Subject: [PATCH 32/60] use one more np function
---
tensornetwork/block_tensor/block_tensor.py | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index d51c73dad..803c9ce01 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -198,7 +198,9 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
unique_column_charges, column_dims = np.unique(
column_charges, return_counts=True)
- common_charges = np.intersect1d(unique_row_charges, -unique_column_charges)
+ common_charges = np.intersect1d(
+ unique_row_charges, -unique_column_charges, assume_unique=True)
+ #common_charges = np.intersect1d(row_charges, -column_charges)
# for each matrix column find the number of non-zero elements in it
# Note: the matrix is assumed to be symmetric, i.e. only elements where
@@ -211,13 +213,9 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
number_of_seen_elements = 0
idxs = {c: [] for c in common_charges}
- for column in range(len(column_charges)):
- #TODO: this for loop can be replaced with something
- #more sophisticated (if.e. using numpy lookups and sums)
- charge = column_charges[column]
- if -charge not in common_charges:
- continue
-
+ mask = np.isin(column_charges, -common_charges)
+ #TODO: move this for loop to cython
+ for charge in column_charges[mask]:
idxs[-charge].extend(number_of_seen_elements +
np.arange(row_degeneracies[-charge]))
number_of_seen_elements += row_degeneracies[-charge]
From 2ea5674e426bec8d753fe1551e9f2facb642b0a1 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Fri, 6 Dec 2019 22:30:30 -0500
Subject: [PATCH 33/60] removed some crazy slow code
---
tensornetwork/block_tensor/block_tensor.py | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 803c9ce01..a23928545 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -216,12 +216,14 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
mask = np.isin(column_charges, -common_charges)
#TODO: move this for loop to cython
for charge in column_charges[mask]:
- idxs[-charge].extend(number_of_seen_elements +
- np.arange(row_degeneracies[-charge]))
+ idxs[-charge].append(
+ np.arange(number_of_seen_elements,
+ row_degeneracies[-charge] + number_of_seen_elements))
number_of_seen_elements += row_degeneracies[-charge]
for c, idx in idxs.items():
- blocks[c] = np.reshape(data[np.asarray(idx)],
+ indexes = np.concatenate(idx)
+ blocks[c] = np.reshape(data[indexes],
(row_degeneracies[c], column_degeneracies[-c]))
return blocks
From 5d8c86ad75d1b9bbe4ee994620af121193ae48aa Mon Sep 17 00:00:00 2001
From: mganahl
Date: Fri, 6 Dec 2019 22:32:45 -0500
Subject: [PATCH 34/60] faster code
---
tensornetwork/block_tensor/block_tensor.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index a23928545..a35a8941a 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -214,7 +214,6 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
number_of_seen_elements = 0
idxs = {c: [] for c in common_charges}
mask = np.isin(column_charges, -common_charges)
- #TODO: move this for loop to cython
for charge in column_charges[mask]:
idxs[-charge].append(
np.arange(number_of_seen_elements,
From 4eae410ae5bbc8bac2ca26f44c929c3ff50d765b Mon Sep 17 00:00:00 2001
From: Chase Roberts
Date: Sun, 8 Dec 2019 23:38:49 -0800
Subject: [PATCH 35/60] Update README.md (#404)
---
README.md | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 18e0b50f5..d83fcaa59 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,11 @@ pip3 install tensornetwork
For details about the TensorNetwork API, see the [reference documentation.](https://tensornetwork.readthedocs.io)
-We also have a basic [tutorial colab](https://colab.research.google.com/drive/1Fp9DolkPT-P_Dkg_s9PLbTOKSq64EVSu) for a more "hands-on" example.
+## Tutorials
+
+[Basic API tutorial](https://colab.research.google.com/drive/1Fp9DolkPT-P_Dkg_s9PLbTOKSq64EVSu)
+
+[Tensor Networks inside Neural Networks using Keras](https://colab.research.google.com/drive/1JUh84N5sbfQYk6HWowWCGl0IZ1idQi6z)
## Basic Example
Here, we build a simple 2 node contraction.
From 04c8573cdbc652669f8fc13c95f6399877162595 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Mon, 9 Dec 2019 13:49:41 -0500
Subject: [PATCH 36/60] add return_data
---
tensornetwork/block_tensor/block_tensor.py | 45 ++++++++++++++++++----
1 file changed, 38 insertions(+), 7 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index a35a8941a..0e393f05f 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -135,13 +135,14 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
return charge_shape_dict
-def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
- charges: List[np.ndarray],
- flows: List[Union[bool, int]]) -> Dict:
+def retrieve_non_zero_diagonal_blocks(
+ data: np.ndarray,
+ charges: List[np.ndarray],
+ flows: List[Union[bool, int]],
+ return_data: Optional[bool] = True) -> Dict:
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
- !!!!!!!!! This is currently very slow!!!!!!!!!!!!
Args:
data: An np.ndarray of the data. The number of elements in `data`
has to match the number of non-zero elements defined by `charges`
@@ -153,6 +154,16 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
with values `1` or `-1`, denoting the flow direction
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
+ return_data: If `True`, the return dictionary maps quantum numbers `q` to
+ actual `np.ndarray` with the data. This involves a copy of data.
+ If `False`, the returned dict maps quantum numbers of a list
+ [locations, shape], where `locations` is an np.ndarray of type np.int64
+ containing the locations of the tensor elements within A.data, i.e.
+ `A.data[locations]` contains the elements belonging to the tensor with
+ quantum numbers `(q,q). `shape` is the shape of the corresponding array.
+ Returns:
+ dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
+ or a python list of locations and shapes, depending on the value of `return_data`.
"""
#TODO: this is currently way too slow!!!!
#Run the following benchmark for testing (typical MPS use case)
@@ -209,7 +220,6 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
# get the degeneracies of each row and column charge
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
- blocks = {}
number_of_seen_elements = 0
idxs = {c: [] for c in common_charges}
@@ -220,10 +230,22 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray,
row_degeneracies[-charge] + number_of_seen_elements))
number_of_seen_elements += row_degeneracies[-charge]
+ blocks = {}
+ if not return_data:
+ for c, idx in idxs.items():
+ num_elements = np.sum([len(t) for t in idx])
+ indexes = np.empty(num_elements, dtype=np.int64)
+ np.concatenate(idx, out=indexes)
+ blocks[c] = [indexes, (row_degeneracies[c], column_degeneracies[-c])]
+ return blocks
+
for c, idx in idxs.items():
- indexes = np.concatenate(idx)
+ num_elements = np.sum([len(t) for t in idx])
+ indexes = np.empty(num_elements, dtype=np.int64)
+ np.concatenate(idx, out=indexes)
blocks[c] = np.reshape(data[indexes],
(row_degeneracies[c], column_degeneracies[-c]))
+
return blocks
@@ -532,12 +554,21 @@ def raise_error():
if self.shape[n] < dense_shape[n]:
raise_error()
- def get_diagonal_blocks(self) -> Dict:
+ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
"""
Obtain the diagonal blocks of symmetric matrix.
BlockSparseTensor has to be a matrix.
+ Args:
+ return_data: If `True`, the return dictionary maps quantum numbers `q` to
+ actual `np.ndarray` with the data. This involves a copy of data.
+ If `False`, the returned dict maps quantum numbers of a list
+ [locations, shape], where `locations` is an np.ndarray of type np.int64
+ containing the locations of the tensor elements within A.data, i.e.
+ `A.data[locations]` contains the elements belonging to the tensor with
+ quantum numbers `(q,q). `shape` is the shape of the corresponding array.
Returns:
dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix)
+
"""
if self.rank != 2:
raise ValueError(
From 33d1a40e9d678416e0f5dba9c89e833929602ced Mon Sep 17 00:00:00 2001
From: mganahl
Date: Mon, 9 Dec 2019 13:56:36 -0500
Subject: [PATCH 37/60] doc
---
tensornetwork/block_tensor/block_tensor.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 0e393f05f..e5392b7a8 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -161,6 +161,7 @@ def retrieve_non_zero_diagonal_blocks(
containing the locations of the tensor elements within A.data, i.e.
`A.data[locations]` contains the elements belonging to the tensor with
quantum numbers `(q,q). `shape` is the shape of the corresponding array.
+
Returns:
dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
or a python list of locations and shapes, depending on the value of `return_data`.
From fb1978a9d50cb9cc91abc62e465cc56827a49eef Mon Sep 17 00:00:00 2001
From: mganahl
Date: Mon, 9 Dec 2019 14:12:11 -0500
Subject: [PATCH 38/60] bug fix
---
tensornetwork/block_tensor/block_tensor.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index e5392b7a8..1ad942293 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -576,7 +576,10 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
"`get_diagonal_blocks` can only be called on a matrix, but found rank={}"
.format(self.rank))
return retrieve_non_zero_diagonal_blocks(
- data=self.data, charges=self.charges, flows=self.flows)
+ data=self.data,
+ charges=self.charges,
+ flows=self.flows,
+ return_data=return_data)
def reshape(tensor: BlockSparseTensor,
From 0d4a6258a780e9034de19ac35886bc5bd59cffdf Mon Sep 17 00:00:00 2001
From: mganahl
Date: Tue, 10 Dec 2019 23:01:14 -0500
Subject: [PATCH 39/60] a little faster
---
tensornetwork/block_tensor/block_tensor.py | 80 +++++++++++++++++-----
1 file changed, 63 insertions(+), 17 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 1ad942293..053a118dd 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -207,11 +207,18 @@ def retrieve_non_zero_diagonal_blocks(
column_charges = flows[1] * charges[1] # a list of charges on each column
#get the unique charges
+ t1 = time.time()
unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
+ # print('finding unique row charges', time.time() - t1)
+ # t1 = time.time()
unique_column_charges, column_dims = np.unique(
column_charges, return_counts=True)
+ # print('finding unique column charges', time.time() - t1)
+ # t1 = time.time()
common_charges = np.intersect1d(
unique_row_charges, -unique_column_charges, assume_unique=True)
+ # print('finding unique intersections', time.time() - t1)
+ # t1 = time.time()
#common_charges = np.intersect1d(row_charges, -column_charges)
# for each matrix column find the number of non-zero elements in it
@@ -223,31 +230,70 @@ def retrieve_non_zero_diagonal_blocks(
column_degeneracies = dict(zip(unique_column_charges, column_dims))
number_of_seen_elements = 0
- idxs = {c: [] for c in common_charges}
+ #idxs = {c: [] for c in common_charges}
+ idxs = {
+ c: np.empty(
+ row_degeneracies[c] * column_degeneracies[-c], dtype=np.int64)
+ for c in common_charges
+ }
+ idxs_stops = {c: 0 for c in common_charges}
+ t1 = time.time()
mask = np.isin(column_charges, -common_charges)
- for charge in column_charges[mask]:
- idxs[-charge].append(
- np.arange(number_of_seen_elements,
- row_degeneracies[-charge] + number_of_seen_elements))
+ masked_charges = column_charges[mask]
+ print('finding mask', time.time() - t1)
+ # print(len(column_charges), len(masked_charges))
+ t1 = time.time()
+ elements = {c: np.arange(row_degeneracies[c]) for c in common_charges}
+ for charge in masked_charges:
+ # idxs[-charge].append((number_of_seen_elements,
+ # row_degeneracies[-charge] + number_of_seen_elements))
+
+ idxs[-charge][
+ idxs_stops[-charge]:idxs_stops[-charge] +
+ row_degeneracies[-charge]] = number_of_seen_elements + elements[-charge]
+
+ # np.arange(
+ # number_of_seen_elements,
+ # row_degeneracies[-charge] + number_of_seen_elements)
+
number_of_seen_elements += row_degeneracies[-charge]
+ idxs_stops[-charge] += row_degeneracies[-charge]
+ print('getting start and stop', time.time() - t1)
+ # t1 = time.time()
+ # for charge in masked_charges:
+ # tmp = np.arange(number_of_seen_elements,
+ # row_degeneracies[-charge] + number_of_seen_elements)
+ # number_of_seen_elements += row_degeneracies[-charge]
+ # print('running the partial loop', time.time() - t1)
+
+ #######################################################################################
+ #looks like this takes pretty long for rectangular matrices where shape[1] >> shape[0]
+ #it's mostly np.arange that causes the overhead.
+ # t1 = time.time()
+ # for charge in masked_charges:
+ # idxs[-charge].append(
+ # np.arange(number_of_seen_elements,
+ # row_degeneracies[-charge] + number_of_seen_elements))
+ # number_of_seen_elements += row_degeneracies[-charge]
+ # print('running the full loop', time.time() - t1)
+ #######################################################################################
blocks = {}
if not return_data:
for c, idx in idxs.items():
- num_elements = np.sum([len(t) for t in idx])
- indexes = np.empty(num_elements, dtype=np.int64)
- np.concatenate(idx, out=indexes)
- blocks[c] = [indexes, (row_degeneracies[c], column_degeneracies[-c])]
+ #num_elements = np.sum([len(t) for t in idx])
+ #indexes = np.empty(num_elements, dtype=np.int64)
+ #np.concatenate(idx, out=indexes)
+ blocks[c] = [idx, (row_degeneracies[c], column_degeneracies[-c])]
return blocks
- for c, idx in idxs.items():
- num_elements = np.sum([len(t) for t in idx])
- indexes = np.empty(num_elements, dtype=np.int64)
- np.concatenate(idx, out=indexes)
- blocks[c] = np.reshape(data[indexes],
- (row_degeneracies[c], column_degeneracies[-c]))
-
- return blocks
+ # for c, idx in idxs.items():
+ # num_elements = np.sum([len(t) for t in idx])
+ # indexes = np.empty(num_elements, dtype=np.int64)
+ # np.concatenate(idx, out=indexes)
+ # blocks[c] = np.reshape(data[indexes],
+ # (row_degeneracies[c], column_degeneracies[-c]))
+ #return blocks
def retrieve_non_zero_diagonal_blocks_test(
From 82a4148401cd852255e7debc6f76f4be0b2008e0 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 00:05:16 -0500
Subject: [PATCH 40/60] substantial speedup
---
tensornetwork/block_tensor/block_tensor.py | 117 +++++++++++++++++++--
1 file changed, 110 insertions(+), 7 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 053a118dd..0bf4ceb62 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -206,6 +206,109 @@ def retrieve_non_zero_diagonal_blocks(
row_charges = flows[0] * charges[0] # a list of charges on each row
column_charges = flows[1] * charges[1] # a list of charges on each column
+ #get the unique charges
+ unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
+ unique_column_charges, column_dims = np.unique(
+ column_charges, return_counts=True)
+ common_charges = np.intersect1d(
+ unique_row_charges, -unique_column_charges, assume_unique=True)
+
+ row_degeneracies = dict(zip(unique_row_charges, row_dims))
+ column_degeneracies = dict(zip(unique_column_charges, column_dims))
+
+ mask = np.isin(column_charges, -common_charges)
+ masked_charges = column_charges[mask]
+ degeneracy_vector = np.empty(len(masked_charges), dtype=np.int64)
+ masks = {}
+ for c in common_charges:
+ mask = masked_charges == -c
+ masks[c] = mask
+ degeneracy_vector[mask] = row_degeneracies[c]
+ summed_degeneracies = np.cumsum(degeneracy_vector)
+ blocks = {}
+
+ for c in common_charges:
+ a = np.expand_dims(summed_degeneracies[masks[c]] - row_degeneracies[c], 0)
+ b = np.expand_dims(np.arange(row_degeneracies[c]), 1)
+ if not return_data:
+ blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
+ else:
+ blocks[c] = np.reshape(data[a + b],
+ (row_degeneracies[c], column_degeneracies[-c]))
+ return blocks
+
+
+def retrieve_non_zero_diagonal_blocks_bkp(
+ data: np.ndarray,
+ charges: List[np.ndarray],
+ flows: List[Union[bool, int]],
+ return_data: Optional[bool] = True) -> Dict:
+ """
+ Given the meta data and underlying data of a symmetric matrix, compute
+ all diagonal blocks and return them in a dict.
+ Args:
+ data: An np.ndarray of the data. The number of elements in `data`
+ has to match the number of non-zero elements defined by `charges`
+ and `flows`
+ charges: List of np.ndarray, one for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ return_data: If `True`, the return dictionary maps quantum numbers `q` to
+ actual `np.ndarray` with the data. This involves a copy of data.
+ If `False`, the returned dict maps quantum numbers of a list
+ [locations, shape], where `locations` is an np.ndarray of type np.int64
+ containing the locations of the tensor elements within A.data, i.e.
+ `A.data[locations]` contains the elements belonging to the tensor with
+ quantum numbers `(q,q). `shape` is the shape of the corresponding array.
+
+ Returns:
+ dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
+ or a python list of locations and shapes, depending on the value of `return_data`.
+ """
+ #TODO: this is currently way too slow!!!!
+ #Run the following benchmark for testing (typical MPS use case)
+ #retrieving the blocks is ~ 10 times as slow as multiplying them
+
+ # D=4000
+ # B=10
+ # q1 = np.random.randint(0,B,D)
+ # q2 = np.asarray([0,1])
+ # q3 = np.random.randint(0,B,D)
+ # i1 = Index(charges=q1,flow=1)
+ # i2 = Index(charges=q2,flow=1)
+ # i3 = Index(charges=q3,flow=-1)
+ # indices=[i1,i2,i3]
+ # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128)
+ # A.reshape((D*2, D))
+ # def multiply_blocks(blocks):
+ # for b in blocks.values():
+ # np.dot(b.T, b)
+ # t1s=[]
+ # t2s=[]
+ # for n in range(10):
+ # print(n)
+ # t1 = time.time()
+ # b = A.get_diagonal_blocks()
+ # t1s.append(time.time() - t1)
+ # t1 = time.time()
+ # multiply_blocks(b)
+ # t2s.append(time.time() - t1)
+ # print('average retrieval time', np.average(t1s))
+ # print('average multiplication time',np.average(t2s))
+
+ if len(charges) != 2:
+ raise ValueError("input has to be a two-dimensional symmetric matrix")
+ check_flows(flows)
+ if len(flows) != len(charges):
+ raise ValueError("`len(flows)` is different from `len(charges)`")
+
+ row_charges = flows[0] * charges[0] # a list of charges on each row
+ column_charges = flows[1] * charges[1] # a list of charges on each column
+
#get the unique charges
t1 = time.time()
unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
@@ -287,13 +390,13 @@ def retrieve_non_zero_diagonal_blocks(
blocks[c] = [idx, (row_degeneracies[c], column_degeneracies[-c])]
return blocks
- # for c, idx in idxs.items():
- # num_elements = np.sum([len(t) for t in idx])
- # indexes = np.empty(num_elements, dtype=np.int64)
- # np.concatenate(idx, out=indexes)
- # blocks[c] = np.reshape(data[indexes],
- # (row_degeneracies[c], column_degeneracies[-c]))
- #return blocks
+ for c, idx in idxs.items():
+ num_elements = np.sum([len(t) for t in idx])
+ indexes = np.empty(num_elements, dtype=np.int64)
+ np.concatenate(idx, out=indexes)
+ blocks[c] = np.reshape(data[indexes],
+ (row_degeneracies[c], column_degeneracies[-c]))
+ return blocks
def retrieve_non_zero_diagonal_blocks_test(
From 7bd7be72eefea3007b88b9a8273b9661ac8feecf Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 00:05:40 -0500
Subject: [PATCH 41/60] renaming
---
tensornetwork/block_tensor/block_tensor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 0bf4ceb62..c39fa38e7 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -238,7 +238,7 @@ def retrieve_non_zero_diagonal_blocks(
return blocks
-def retrieve_non_zero_diagonal_blocks_bkp(
+def retrieve_non_zero_diagonal_blocks_deprecated(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
From d9c094b3a46d410266c7ba40abded46b8bfcac62 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 00:14:26 -0500
Subject: [PATCH 42/60] removed todo
---
tensornetwork/block_tensor/block_tensor.py | 31 ----------------------
1 file changed, 31 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index c39fa38e7..8849f2b30 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -166,37 +166,6 @@ def retrieve_non_zero_diagonal_blocks(
dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
or a python list of locations and shapes, depending on the value of `return_data`.
"""
- #TODO: this is currently way too slow!!!!
- #Run the following benchmark for testing (typical MPS use case)
- #retrieving the blocks is ~ 10 times as slow as multiplying them
-
- # D=4000
- # B=10
- # q1 = np.random.randint(0,B,D)
- # q2 = np.asarray([0,1])
- # q3 = np.random.randint(0,B,D)
- # i1 = Index(charges=q1,flow=1)
- # i2 = Index(charges=q2,flow=1)
- # i3 = Index(charges=q3,flow=-1)
- # indices=[i1,i2,i3]
- # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128)
- # A.reshape((D*2, D))
- # def multiply_blocks(blocks):
- # for b in blocks.values():
- # np.dot(b.T, b)
- # t1s=[]
- # t2s=[]
- # for n in range(10):
- # print(n)
- # t1 = time.time()
- # b = A.get_diagonal_blocks()
- # t1s.append(time.time() - t1)
- # t1 = time.time()
- # multiply_blocks(b)
- # t2s.append(time.time() - t1)
- # print('average retrieval time', np.average(t1s))
- # print('average multiplication time',np.average(t2s))
-
if len(charges) != 2:
raise ValueError("input has to be a two-dimensional symmetric matrix")
check_flows(flows)
From 06c3f3cad6bd2537aac8fbf192e85f8576fe6ba8 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 00:17:24 -0500
Subject: [PATCH 43/60] some comments
---
tensornetwork/block_tensor/block_tensor.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 8849f2b30..b3d773078 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -172,6 +172,7 @@ def retrieve_non_zero_diagonal_blocks(
if len(flows) != len(charges):
raise ValueError("`len(flows)` is different from `len(charges)`")
+ #we multiply the flows into the charges
row_charges = flows[0] * charges[0] # a list of charges on each row
column_charges = flows[1] * charges[1] # a list of charges on each column
@@ -179,14 +180,20 @@ def retrieve_non_zero_diagonal_blocks(
unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
unique_column_charges, column_dims = np.unique(
column_charges, return_counts=True)
+ #get the charges common to rows and columns (only those matter)
common_charges = np.intersect1d(
unique_row_charges, -unique_column_charges, assume_unique=True)
+ #convenience container for obtaining the degeneracies of each
+ #charge
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
+ # we only care about common charges
mask = np.isin(column_charges, -common_charges)
masked_charges = column_charges[mask]
+
+ #some numpy magic to get the index locations of the blocks
degeneracy_vector = np.empty(len(masked_charges), dtype=np.int64)
masks = {}
for c in common_charges:
From 426fd1a2d3b9270d63accf512eb6bd33a40654f0 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 10:30:19 -0500
Subject: [PATCH 44/60] comments
---
tensornetwork/block_tensor/block_tensor.py | 36 +++++++++++++++++-----
1 file changed, 28 insertions(+), 8 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index b3d773078..0e751fcfc 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -184,27 +184,47 @@ def retrieve_non_zero_diagonal_blocks(
common_charges = np.intersect1d(
unique_row_charges, -unique_column_charges, assume_unique=True)
- #convenience container for obtaining the degeneracies of each
- #charge
+ #convenience container for storing the degeneracies of each
+ #row and column charge
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
- # we only care about common charges
+ # we only care about charges common to row and columns
mask = np.isin(column_charges, -common_charges)
- masked_charges = column_charges[mask]
+ relevant_column_charges = column_charges[mask]
#some numpy magic to get the index locations of the blocks
- degeneracy_vector = np.empty(len(masked_charges), dtype=np.int64)
+ #we generate a vector of `len(relevant_column_charges) which,
+ #for each charge `c` in `relevant_column_charges` holds the
+ #row-degeneracy of charge `c`
+ degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64)
+ #for each charge `c` in `common_charges` we generate a boolean mask
+ #for indexing the positions where `relevant_column_charges` has a value of `c`.
masks = {}
for c in common_charges:
- mask = masked_charges == -c
+ mask = relevant_column_charges == -c
masks[c] = mask
degeneracy_vector[mask] = row_degeneracies[c]
- summed_degeneracies = np.cumsum(degeneracy_vector)
+
+ # the result of the cumulative sum is a vector containing
+ # the stop positions of the non-zero values of each column
+ # within the data vector.
+ # E.g. for `relevant_column_charges` = [0,1,0,0,3], and
+ # row_degeneracies[0] = 10
+ # row_degeneracies[1] = 20
+ # row_degeneracies[3] = 30
+ # we have
+ # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30]
+ # The starting positions of consecutive elements (in column-major order) in
+ # each column with charge `c=0` within the data vector are then simply obtained using
+ # masks[0] = [True, False, True, True, False]
+ # and `stop_positions[masks[0]] - row_degeneracies[0]`
+ stop_positions = np.cumsum(degeneracy_vector)
blocks = {}
for c in common_charges:
- a = np.expand_dims(summed_degeneracies[masks[c]] - row_degeneracies[c], 0)
+ #numpy broadcasting is substantially faster than kron!
+ a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0)
b = np.expand_dims(np.arange(row_degeneracies[c]), 1)
if not return_data:
blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
From 7f3e148c215b372b7ad60d2c5eae922a1239c529 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 13:39:36 -0500
Subject: [PATCH 45/60] fixed some bug in reshape
---
tensornetwork/block_tensor/block_tensor.py | 227 +++++++--------------
1 file changed, 69 insertions(+), 158 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 0e751fcfc..8c06f5c83 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -135,7 +135,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
return charge_shape_dict
-def retrieve_non_zero_diagonal_blocks(
+def retrieve_non_zero_diagonal_blocks_deprecated(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
@@ -143,6 +143,8 @@ def retrieve_non_zero_diagonal_blocks(
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
+ This is a deprecated version which in general performs worse than the
+ current main implementation.
Args:
data: An np.ndarray of the data. The number of elements in `data`
has to match the number of non-zero elements defined by `charges`
@@ -234,7 +236,7 @@ def retrieve_non_zero_diagonal_blocks(
return blocks
-def retrieve_non_zero_diagonal_blocks_deprecated(
+def retrieve_non_zero_diagonal_blocks(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
@@ -265,180 +267,51 @@ def retrieve_non_zero_diagonal_blocks_deprecated(
dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
or a python list of locations and shapes, depending on the value of `return_data`.
"""
- #TODO: this is currently way too slow!!!!
- #Run the following benchmark for testing (typical MPS use case)
- #retrieving the blocks is ~ 10 times as slow as multiplying them
-
- # D=4000
- # B=10
- # q1 = np.random.randint(0,B,D)
- # q2 = np.asarray([0,1])
- # q3 = np.random.randint(0,B,D)
- # i1 = Index(charges=q1,flow=1)
- # i2 = Index(charges=q2,flow=1)
- # i3 = Index(charges=q3,flow=-1)
- # indices=[i1,i2,i3]
- # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128)
- # A.reshape((D*2, D))
- # def multiply_blocks(blocks):
- # for b in blocks.values():
- # np.dot(b.T, b)
- # t1s=[]
- # t2s=[]
- # for n in range(10):
- # print(n)
- # t1 = time.time()
- # b = A.get_diagonal_blocks()
- # t1s.append(time.time() - t1)
- # t1 = time.time()
- # multiply_blocks(b)
- # t2s.append(time.time() - t1)
- # print('average retrieval time', np.average(t1s))
- # print('average multiplication time',np.average(t2s))
-
if len(charges) != 2:
raise ValueError("input has to be a two-dimensional symmetric matrix")
check_flows(flows)
if len(flows) != len(charges):
raise ValueError("`len(flows)` is different from `len(charges)`")
+ #we multiply the flows into the charges
row_charges = flows[0] * charges[0] # a list of charges on each row
column_charges = flows[1] * charges[1] # a list of charges on each column
- #get the unique charges
- t1 = time.time()
- unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
- # print('finding unique row charges', time.time() - t1)
- # t1 = time.time()
- unique_column_charges, column_dims = np.unique(
- column_charges, return_counts=True)
- # print('finding unique column charges', time.time() - t1)
- # t1 = time.time()
- common_charges = np.intersect1d(
- unique_row_charges, -unique_column_charges, assume_unique=True)
- # print('finding unique intersections', time.time() - t1)
- # t1 = time.time()
- #common_charges = np.intersect1d(row_charges, -column_charges)
+ # we only care about charges common to rows and columns
+ common_charges = np.unique(np.intersect1d(row_charges, -column_charges))
+ row_charges = row_charges[np.isin(row_charges, common_charges)]
+ column_charges = column_charges[np.isin(column_charges, -common_charges)]
- # for each matrix column find the number of non-zero elements in it
- # Note: the matrix is assumed to be symmetric, i.e. only elements where
- # ingoing and outgoing charge are identical are non-zero
+ #get the unique charges
+ unique_row_charges, row_locations, row_dims = np.unique(
+ row_charges, return_inverse=True, return_counts=True)
+ unique_column_charges, column_locations, column_dims = np.unique(
+ column_charges, return_inverse=True, return_counts=True)
- # get the degeneracies of each row and column charge
+ #convenience container for storing the degeneracies of each
+ #row and column charge
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
- number_of_seen_elements = 0
- #idxs = {c: [] for c in common_charges}
- idxs = {
- c: np.empty(
- row_degeneracies[c] * column_degeneracies[-c], dtype=np.int64)
- for c in common_charges
- }
- idxs_stops = {c: 0 for c in common_charges}
- t1 = time.time()
- mask = np.isin(column_charges, -common_charges)
- masked_charges = column_charges[mask]
- print('finding mask', time.time() - t1)
- # print(len(column_charges), len(masked_charges))
- t1 = time.time()
- elements = {c: np.arange(row_degeneracies[c]) for c in common_charges}
- for charge in masked_charges:
- # idxs[-charge].append((number_of_seen_elements,
- # row_degeneracies[-charge] + number_of_seen_elements))
-
- idxs[-charge][
- idxs_stops[-charge]:idxs_stops[-charge] +
- row_degeneracies[-charge]] = number_of_seen_elements + elements[-charge]
-
- # np.arange(
- # number_of_seen_elements,
- # row_degeneracies[-charge] + number_of_seen_elements)
-
- number_of_seen_elements += row_degeneracies[-charge]
- idxs_stops[-charge] += row_degeneracies[-charge]
- print('getting start and stop', time.time() - t1)
- # t1 = time.time()
- # for charge in masked_charges:
- # tmp = np.arange(number_of_seen_elements,
- # row_degeneracies[-charge] + number_of_seen_elements)
- # number_of_seen_elements += row_degeneracies[-charge]
- # print('running the partial loop', time.time() - t1)
-
- #######################################################################################
- #looks like this takes pretty long for rectangular matrices where shape[1] >> shape[0]
- #it's mostly np.arange that causes the overhead.
- # t1 = time.time()
- # for charge in masked_charges:
- # idxs[-charge].append(
- # np.arange(number_of_seen_elements,
- # row_degeneracies[-charge] + number_of_seen_elements))
- # number_of_seen_elements += row_degeneracies[-charge]
- # print('running the full loop', time.time() - t1)
- #######################################################################################
-
- blocks = {}
- if not return_data:
- for c, idx in idxs.items():
- #num_elements = np.sum([len(t) for t in idx])
- #indexes = np.empty(num_elements, dtype=np.int64)
- #np.concatenate(idx, out=indexes)
- blocks[c] = [idx, (row_degeneracies[c], column_degeneracies[-c])]
- return blocks
-
- for c, idx in idxs.items():
- num_elements = np.sum([len(t) for t in idx])
- indexes = np.empty(num_elements, dtype=np.int64)
- np.concatenate(idx, out=indexes)
- blocks[c] = np.reshape(data[indexes],
- (row_degeneracies[c], column_degeneracies[-c]))
- return blocks
-
-
-def retrieve_non_zero_diagonal_blocks_test(
- data: np.ndarray, charges: List[np.ndarray],
- flows: List[Union[bool, int]]) -> Dict:
- """
- For testing purposes. Produces the same output as `retrieve_non_zero_diagonal_blocks`,
- but computes it in a different way.
- This is currently very slow for high rank tensors with many blocks, but can be faster than
- `retrieve_non_zero_diagonal_blocks` in certain other cases.
- It's pretty memory heavy too.
- """
- if len(charges) != 2:
- raise ValueError("input has to be a two-dimensional symmetric matrix")
- check_flows(flows)
- if len(flows) != len(charges):
- raise ValueError("`len(flows)` is different from `len(charges)`")
-
- #get the unique charges
- unique_row_charges, row_dims = np.unique(
- flows[0] * charges[0], return_counts=True)
- unique_column_charges, column_dims = np.unique(
- flows[1] * charges[1], return_counts=True)
-
- #a 1d array of the net charges.
- #this can use a lot of memory
- net_charges = fuse_charges(
- q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1])
- #a 1d array containing row charges added with zero column charges
- #used to find the indices of in data corresponding to a given charge
- #(see below)
- #this can be very large
- tmp = np.tile(charges[0] * flows[0], len(charges[1]))
+ #some numpy magic to get the index locations of the blocks
+ #we generate a vector of `len(relevant_column_charges) which,
+ #for each charge `c` in `relevant_column_charges` holds the
+ #row-degeneracy of charge `c`
- symmetric_indices = net_charges == 0
- charge_lookup = tmp[symmetric_indices]
+ degeneracy_vector = row_dims[column_locations]
+ stop_positions = np.cumsum(degeneracy_vector)
- row_degeneracies = dict(zip(unique_row_charges, row_dims))
- column_degeneracies = dict(zip(unique_column_charges, column_dims))
blocks = {}
-
- common_charges = np.intersect1d(unique_row_charges, -unique_column_charges)
for c in common_charges:
- blocks[c] = np.reshape(data[charge_lookup == c],
- (row_degeneracies[c], column_degeneracies[-c]))
-
+ #numpy broadcasting is substantially faster than kron!
+ a = np.expand_dims(
+ stop_positions[column_locations == -c] - row_degeneracies[c], 0)
+ b = np.expand_dims(np.arange(row_degeneracies[c]), 1)
+ if not return_data:
+ blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
+ else:
+ blocks[c] = np.reshape(data[a + b],
+ (row_degeneracies[c], column_degeneracies[-c]))
return blocks
@@ -610,6 +483,16 @@ def transpose(self, order):
raise NotImplementedError('transpose is not implemented!!')
+ def reset_shape(self) -> None:
+ """
+ Bring the tensor back into its elementary shape.
+ """
+ elementary_indices = []
+ for i in self.indices:
+ elementary_indices.extend(i.get_elementary_indices())
+
+ self.indices = elementary_indices
+
def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None:
"""
Reshape `tensor` into `shape` in place.
@@ -677,6 +560,7 @@ def raise_error():
dense_shape,
tuple([e.dimension for e in elementary_indices])))
+ self.reset_shape()
for n in range(len(dense_shape)):
if dense_shape[n] > self.shape[n]:
while dense_shape[n] > self.shape[n]:
@@ -726,6 +610,33 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
flows=self.flows,
return_data=return_data)
+ def get_diagonal_blocks_deprecated(
+ self, return_data: Optional[bool] = True) -> Dict:
+ """
+ Obtain the diagonal blocks of symmetric matrix.
+ BlockSparseTensor has to be a matrix.
+ Args:
+ return_data: If `True`, the return dictionary maps quantum numbers `q` to
+ actual `np.ndarray` with the data. This involves a copy of data.
+ If `False`, the returned dict maps quantum numbers of a list
+ [locations, shape], where `locations` is an np.ndarray of type np.int64
+ containing the locations of the tensor elements within A.data, i.e.
+ `A.data[locations]` contains the elements belonging to the tensor with
+ quantum numbers `(q,q). `shape` is the shape of the corresponding array.
+ Returns:
+ dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix)
+
+ """
+ if self.rank != 2:
+ raise ValueError(
+ "`get_diagonal_blocks` can only be called on a matrix, but found rank={}"
+ .format(self.rank))
+ return retrieve_non_zero_diagonal_blocks_deprecated(
+ data=self.data,
+ charges=self.charges,
+ flows=self.flows,
+ return_data=return_data)
+
def reshape(tensor: BlockSparseTensor,
shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor:
From 19c3fe8fc12d393aadd0ec92a6de6200feb25687 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 13:41:31 -0500
Subject: [PATCH 46/60] comments
---
tensornetwork/block_tensor/block_tensor.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 8c06f5c83..cf6bb8f67 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -547,20 +547,19 @@ def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None:
index_copy = [i.copy() for i in self.indices]
def raise_error():
- #if this error is raised `shape` is incompatible
- #with the elementary indices. We have to reset them
- #to the original.
+ #if this error is raised then `shape` is incompatible
+ #with the elementary indices. We then reset the shape
+ #to what is was before the call to `reshape`.
self.indices = index_copy
elementary_indices = []
for i in self.indices:
elementary_indices.extend(i.get_elementary_indices())
- print(elementary_indices)
raise ValueError("The shape {} is incompatible with the "
"elementary shape {} of the tensor.".format(
dense_shape,
tuple([e.dimension for e in elementary_indices])))
- self.reset_shape()
+ self.reset_shape() #bring tensor back into its elementary shape
for n in range(len(dense_shape)):
if dense_shape[n] > self.shape[n]:
while dense_shape[n] > self.shape[n]:
From 5c8fd3e982aea8d9ca0df0f26c9369f4430fa894 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 13:46:06 -0500
Subject: [PATCH 47/60] default value changed
---
tensornetwork/block_tensor/block_tensor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index cf6bb8f67..cb2976a00 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -240,7 +240,7 @@ def retrieve_non_zero_diagonal_blocks(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
- return_data: Optional[bool] = True) -> Dict:
+ return_data: Optional[bool] = False) -> Dict:
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
From 94c8c2cbe344d83387b6ce2b79bb063ecf48a86a Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 22:09:09 -0500
Subject: [PATCH 48/60] fixed bug, old version is now faster again
---
tensornetwork/block_tensor/block_tensor.py | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index cb2976a00..57d7d8607 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -135,7 +135,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
return charge_shape_dict
-def retrieve_non_zero_diagonal_blocks_deprecated(
+def retrieve_non_zero_diagonal_blocks(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
@@ -143,8 +143,6 @@ def retrieve_non_zero_diagonal_blocks_deprecated(
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
- This is a deprecated version which in general performs worse than the
- current main implementation.
Args:
data: An np.ndarray of the data. The number of elements in `data`
has to match the number of non-zero elements defined by `charges`
@@ -236,7 +234,7 @@ def retrieve_non_zero_diagonal_blocks_deprecated(
return blocks
-def retrieve_non_zero_diagonal_blocks(
+def retrieve_non_zero_diagonal_blocks_deprecated(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
@@ -244,6 +242,9 @@ def retrieve_non_zero_diagonal_blocks(
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
+ This is a deprecated version which in general performs worse than the
+ current main implementation.
+
Args:
data: An np.ndarray of the data. The number of elements in `data`
has to match the number of non-zero elements defined by `charges`
@@ -287,7 +288,6 @@ def retrieve_non_zero_diagonal_blocks(
row_charges, return_inverse=True, return_counts=True)
unique_column_charges, column_locations, column_dims = np.unique(
column_charges, return_inverse=True, return_counts=True)
-
#convenience container for storing the degeneracies of each
#row and column charge
row_degeneracies = dict(zip(unique_row_charges, row_dims))
@@ -300,12 +300,11 @@ def retrieve_non_zero_diagonal_blocks(
degeneracy_vector = row_dims[column_locations]
stop_positions = np.cumsum(degeneracy_vector)
-
blocks = {}
for c in common_charges:
#numpy broadcasting is substantially faster than kron!
a = np.expand_dims(
- stop_positions[column_locations == -c] - row_degeneracies[c], 0)
+ stop_positions[column_charges == -c] - row_degeneracies[c], 0)
b = np.expand_dims(np.arange(row_degeneracies[c]), 1)
if not return_data:
blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
From 7eec7f05fcb05b6cd4d3be21608a2f9c711e611e Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 22:13:48 -0500
Subject: [PATCH 49/60] cleaned up reshape
---
tensornetwork/block_tensor/block_tensor.py | 11 +----------
1 file changed, 1 insertion(+), 10 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 57d7d8607..a4388ce79 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -571,16 +571,7 @@ def raise_error():
if self.shape[n] > dense_shape[n]:
raise_error()
elif dense_shape[n] < self.shape[n]:
- while dense_shape[n] < self.shape[n]:
- #split index at n
- try:
- i1, i2 = split_index(self.indices.pop(n))
- except ValueError:
- raise_error()
- self.indices.insert(n, i1)
- self.indices.insert(n + 1, i2)
- if self.shape[n] < dense_shape[n]:
- raise_error()
+ raise_error()
def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
"""
From c188ab9ccbb56a6e9be5bbe11f25cf2e1f7b02b2 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 11 Dec 2019 22:31:15 -0500
Subject: [PATCH 50/60] started adding tests
---
tensornetwork/block_tensor/index_test.py | 46 ++++++++++++++++++++++++
1 file changed, 46 insertions(+)
create mode 100644 tensornetwork/block_tensor/index_test.py
diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py
new file mode 100644
index 000000000..ff331a36a
--- /dev/null
+++ b/tensornetwork/block_tensor/index_test.py
@@ -0,0 +1,46 @@
+import numpy as np
+# pylint: disable=line-too-long
+from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies
+
+
+def test_fuse_charges():
+ q1 = np.asarray([0, 1])
+ q2 = np.asarray([2, 3, 4])
+ fused_charges = fuse_charges(q1, 1, q2, 1)
+ assert np.all(fused_charges == np.asarray([2, 3, 3, 4, 4, 5]))
+ fused_charges = fuse_charges(q1, 1, q2, -1)
+ assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3]))
+
+
+def test_index_fusion_mul():
+ D = 100
+ B = 4
+ dtype = np.int16
+ q1 = np.random.randint(-B // 2, B // 2 + 1,
+ D).astype(dtype) #quantum numbers on leg 1
+ q2 = np.random.randint(-B // 2, B // 2 + 1,
+ D).astype(dtype) #quantum numbers on leg 2
+ i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1
+ i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2
+
+ i12 = i1 * i2
+ assert i12.left_child is i1
+ assert i12.right_child is i2
+ assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1))
+
+
+def test_index_fusion():
+ D = 100
+ B = 4
+ dtype = np.int16
+ q1 = np.random.randint(-B // 2, B // 2 + 1,
+ D).astype(dtype) #quantum numbers on leg 1
+ q2 = np.random.randint(-B // 2, B // 2 + 1,
+ D).astype(dtype) #quantum numbers on leg 2
+ i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1
+ i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2
+
+ i12 = fuse_index_pair(i1, i2)
+ assert i12.left_child is i1
+ assert i12.right_child is i2
+ assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1))
From 46aeec135bbb0e024efcd77c50b2068628a20e18 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Fri, 13 Dec 2019 22:19:02 -0500
Subject: [PATCH 51/60] replace kron with broadcasting
---
tensornetwork/block_tensor/index.py | 43 +++++++++++++++++++----------
1 file changed, 28 insertions(+), 15 deletions(-)
diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py
index fc6b36cd8..326311ec1 100644
--- a/tensornetwork/block_tensor/index.py
+++ b/tensornetwork/block_tensor/index.py
@@ -36,15 +36,19 @@ def __init__(self,
name: Optional[Text] = None,
left_child: Optional["Index"] = None,
right_child: Optional["Index"] = None):
- self.charges = np.asarray(charges)
+ self._charges = np.asarray(charges)
self.flow = flow
self.left_child = left_child
self.right_child = right_child
self.name = name if name else 'index'
+ @property
+ def is_leave(self):
+ return (self.left_child is None) and (self.right_child is None)
+
@property
def dimension(self):
- return len(self.charges)
+ return np.prod([len(i.charges) for i in self.get_elementary_indices()])
def _copy_helper(self, index: "Index", copied_index: "Index") -> None:
"""
@@ -52,16 +56,17 @@ def _copy_helper(self, index: "Index", copied_index: "Index") -> None:
"""
if index.left_child != None:
left_copy = Index(
- charges=index.left_child.charges.copy(),
+ charges=copy.copy(index.left_child.charges),
flow=copy.copy(index.left_child.flow),
- name=index.left_child.name)
+ name=copy.copy(index.left_child.name))
+
copied_index.left_child = left_copy
self._copy_helper(index.left_child, left_copy)
if index.right_child != None:
right_copy = Index(
- charges=index.right_child.charges.copy(),
+ charges=copy.copy(index.right_child.charges),
flow=copy.copy(index.right_child.flow),
- name=index.right_child.name)
+ name=copy.copy(index.right_child.name))
copied_index.right_child = right_copy
self._copy_helper(index.right_child, right_copy)
@@ -72,7 +77,7 @@ def copy(self):
`Index` are copied as well.
"""
index_copy = Index(
- charges=self.charges.copy(), flow=copy.copy(self.flow), name=self.name)
+ charges=self._charges.copy(), flow=copy.copy(self.flow), name=self.name)
self._copy_helper(self, index_copy)
return index_copy
@@ -100,10 +105,20 @@ def __mul__(self, index: "Index") -> "Index":
Merge `index` and self into a single larger index.
The flow of the resulting index is set to 1.
Flows of `self` and `index` are multiplied into
- the charges upon fusing.
+ the charges upon fusing.n
"""
return fuse_index_pair(self, index)
+ @property
+ def charges(self):
+ if self.is_leave:
+ return self._charges
+ fused_charges = fuse_charges(self.left_child.charges, self.left_child.flow,
+ self.right_child.charges,
+ self.right_child.flow)
+
+ return fused_charges
+
def fuse_charges(q1: Union[List, np.ndarray], flow1: int,
q2: Union[List, np.ndarray], flow2: int) -> np.ndarray:
@@ -146,7 +161,8 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray],
Returns:
np.ndarray: The result of fusing `q1` with `q2`.
"""
- return np.kron(degen2, degen1)
+ return np.reshape(degen2[:, None] * degen1[None, :],
+ len(degen1) * len(degen2))
def fuse_index_pair(left_index: Index,
@@ -166,13 +182,10 @@ def fuse_index_pair(left_index: Index,
raise ValueError(
"index1 and index2 are the same object. Can only fuse distinct objects")
- fused_charges = fuse_charges(left_index.charges, left_index.flow,
- right_index.charges, right_index.flow)
+ # fused_charges = fuse_charges(left_index.charges, left_index.flow,
+ # right_index.charges, right_index.flow)
return Index(
- charges=fused_charges,
- flow=flow,
- left_child=left_index,
- right_child=right_index)
+ charges=None, flow=flow, left_child=left_index, right_child=right_index)
def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index:
From 6844f2cc487ab3e9f913961935f5829ba2c56945 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Sun, 15 Dec 2019 14:55:41 -0500
Subject: [PATCH 52/60] column-major -> row-major
---
tensornetwork/block_tensor/block_tensor.py | 114 ++++++++++++++++++++-
1 file changed, 110 insertions(+), 4 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index a4388ce79..db9bd393a 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -74,7 +74,7 @@ def compute_num_nonzero(charges: List[np.ndarray],
#compute the degeneracies of `fused_charges` charges
fused_degeneracies = fuse_degeneracies(accumulated_degeneracies,
leg_degeneracies)
- #compute the new degeneracies resulting of fusing the vectors of unique charges
+ #compute the new degeneracies resulting from fusing
#`accumulated_charges` and `leg_charge_2`
accumulated_charges = np.unique(fused_charges)
accumulated_degeneracies = []
@@ -107,6 +107,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
dict: Dictionary mapping a tuple of charges to a shape tuple.
Each element corresponds to a non-zero valued block of the tensor.
"""
+ #FIXME: this routine is slow
check_flows(flows)
degeneracies = []
unique_charges = []
@@ -189,6 +190,108 @@ def retrieve_non_zero_diagonal_blocks(
row_degeneracies = dict(zip(unique_row_charges, row_dims))
column_degeneracies = dict(zip(unique_column_charges, column_dims))
+ # we only care about charges common to row and columns
+ mask = np.isin(row_charges, common_charges)
+ relevant_row_charges = row_charges[mask]
+
+ #some numpy magic to get the index locations of the blocks
+ #we generate a vector of `len(relevant_row_charges) which,
+ #for each charge `c` in `relevant_row_charges` holds the
+ #column-degeneracy of charge `c`
+ degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64)
+ #for each charge `c` in `common_charges` we generate a boolean mask
+ #for indexing the positions where `relevant_column_charges` has a value of `c`.
+ masks = {}
+ for c in common_charges:
+ mask = relevant_row_charges == c
+ masks[c] = mask
+ degeneracy_vector[mask] = column_degeneracies[-c]
+
+ # the result of the cumulative sum is a vector containing
+ # the stop positions of the non-zero values of each row
+ # within the data vector.
+ # E.g. for `relevant_row_charges` = [0,1,0,0,3], and
+ # column_degeneracies[0] = 10
+ # column_degeneracies[1] = 20
+ # column_degeneracies[3] = 30
+ # we have
+ # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30]
+ # The starting positions of consecutive elements (in row-major order) in
+ # each row with charge `c=0` within the data vector are then simply obtained using
+ # masks[0] = [True, False, True, True, False]
+ # and `stop_positions[masks[0]] - column_degeneracies[0]`
+ stop_positions = np.cumsum(degeneracy_vector)
+ blocks = {}
+
+ for c in common_charges:
+ #numpy broadcasting is substantially faster than kron!
+ a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0)
+ b = np.expand_dims(np.arange(column_degeneracies[-c]), 1)
+ if not return_data:
+ blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
+ else:
+ blocks[c] = np.reshape(data[a + b],
+ (row_degeneracies[c], column_degeneracies[-c]))
+ return blocks
+
+
+def retrieve_non_zero_diagonal_blocks_column_major(
+ data: np.ndarray,
+ charges: List[np.ndarray],
+ flows: List[Union[bool, int]],
+ return_data: Optional[bool] = True) -> Dict:
+ """
+ Deprecated
+
+ Given the meta data and underlying data of a symmetric matrix, compute
+ all diagonal blocks and return them in a dict, assuming column-major
+ ordering.
+ Args:
+ data: An np.ndarray of the data. The number of elements in `data`
+ has to match the number of non-zero elements defined by `charges`
+ and `flows`
+ charges: List of np.ndarray, one for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ return_data: If `True`, the return dictionary maps quantum numbers `q` to
+ actual `np.ndarray` with the data. This involves a copy of data.
+ If `False`, the returned dict maps quantum numbers of a list
+ [locations, shape], where `locations` is an np.ndarray of type np.int64
+ containing the locations of the tensor elements within A.data, i.e.
+ `A.data[locations]` contains the elements belonging to the tensor with
+ quantum numbers `(q,q). `shape` is the shape of the corresponding array.
+
+ Returns:
+ dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
+ or a python list of locations and shapes, depending on the value of `return_data`.
+ """
+ if len(charges) != 2:
+ raise ValueError("input has to be a two-dimensional symmetric matrix")
+ check_flows(flows)
+ if len(flows) != len(charges):
+ raise ValueError("`len(flows)` is different from `len(charges)`")
+
+ #we multiply the flows into the charges
+ row_charges = flows[0] * charges[0] # a list of charges on each row
+ column_charges = flows[1] * charges[1] # a list of charges on each column
+
+ #get the unique charges
+ unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
+ unique_column_charges, column_dims = np.unique(
+ column_charges, return_counts=True)
+ #get the charges common to rows and columns (only those matter)
+ common_charges = np.intersect1d(
+ unique_row_charges, -unique_column_charges, assume_unique=True)
+
+ #convenience container for storing the degeneracies of each
+ #row and column charge
+ row_degeneracies = dict(zip(unique_row_charges, row_dims))
+ column_degeneracies = dict(zip(unique_column_charges, column_dims))
+
# we only care about charges common to row and columns
mask = np.isin(column_charges, -common_charges)
relevant_column_charges = column_charges[mask]
@@ -240,6 +343,8 @@ def retrieve_non_zero_diagonal_blocks_deprecated(
flows: List[Union[bool, int]],
return_data: Optional[bool] = False) -> Dict:
"""
+ Deprecated
+
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
This is a deprecated version which in general performs worse than the
@@ -298,14 +403,14 @@ def retrieve_non_zero_diagonal_blocks_deprecated(
#for each charge `c` in `relevant_column_charges` holds the
#row-degeneracy of charge `c`
- degeneracy_vector = row_dims[column_locations]
+ degeneracy_vector = column_dims[row_locations]
stop_positions = np.cumsum(degeneracy_vector)
blocks = {}
for c in common_charges:
#numpy broadcasting is substantially faster than kron!
a = np.expand_dims(
- stop_positions[column_charges == -c] - row_degeneracies[c], 0)
- b = np.expand_dims(np.arange(row_degeneracies[c]), 1)
+ stop_positions[row_charges == c] - column_degeneracies[-c], 0)
+ b = np.expand_dims(np.arange(column_degeneracies[-c]), 1)
if not return_data:
blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
else:
@@ -344,6 +449,7 @@ class BlockSparseTensor:
The class design follows Glen's proposal (Design 0).
The class currently only supports a single U(1) symmetry
and only numpy.ndarray.
+
Attributes:
* self.data: A 1d np.ndarray storing the underlying
data of the tensor
From 4f4ba935b3bd148c8d8c9d67da5c697c6d355aaa Mon Sep 17 00:00:00 2001
From: mganahl
Date: Sun, 15 Dec 2019 14:58:04 -0500
Subject: [PATCH 53/60] documentation
---
tensornetwork/block_tensor/block_tensor.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index db9bd393a..31c8298e6 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -554,8 +554,7 @@ def rank(self):
def sparse_shape(self) -> Tuple:
"""
The sparse shape of the tensor.
- Returns a copy of self.indices. Note that copying
- can be relatively expensive for deeply nested indices.
+ Returns a copy of self.indices.
Returns:
Tuple: A tuple of `Index` objects.
"""
From d583e2b5dedaa7e0de2cadcafc0382c0de95ac6d Mon Sep 17 00:00:00 2001
From: mganahl
Date: Tue, 17 Dec 2019 12:33:26 -0500
Subject: [PATCH 54/60] added function to compute unique charges and charge
degeneracies
Function avoids explicit full fusion of all legs, and instead only keeps track of the unique charges and their degeneracies upon fusion
---
tensornetwork/block_tensor/block_tensor.py | 176 +++++++++++++++++----
1 file changed, 147 insertions(+), 29 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 31c8298e6..8f3bbf023 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -34,11 +34,11 @@ def check_flows(flows) -> None:
"flows = {} contains values different from 1 and -1".format(flows))
-def compute_num_nonzero(charges: List[np.ndarray],
- flows: List[Union[bool, int]]) -> int:
+def compute_fused_charge_degeneracies(charges: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> Dict:
"""
- Compute the number of non-zero elements, given the meta-data of
- a symmetric tensor.
+ For a list of charges, compute all possible fused charges resulting
+ from fusing `charges`, together with their respective degeneracyn
Args:
charges: List of np.ndarray of int, one for each leg of the
underlying tensor. Each np.ndarray `charges[leg]`
@@ -49,40 +49,64 @@ def compute_num_nonzero(charges: List[np.ndarray],
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
Returns:
- int: The number of non-zero elements.
+ dict: Mapping fused charges (int) to degeneracies (int)
"""
if len(charges) == 1:
- return len(np.nonzero(charges == 0)[0])
- #get unique charges and their degeneracies on each leg
- charge_degeneracies = [
- np.unique(charge, return_counts=True) for charge in charges
- ]
- accumulated_charges, accumulated_degeneracies = charge_degeneracies[0]
+ return dict(zip(np.unique(charges[0], return_counts=True)))
+
+ # get unique charges and their degeneracies on the first leg.
+ # We are fusing from "left" to "right".
+ accumulated_charges, accumulated_degeneracies = np.unique(
+ charges[0], return_counts=True)
#multiply the flow into the charges of first leg
accumulated_charges *= flows[0]
- for n in range(1, len(charge_degeneracies)):
+ for n in range(1, len(charges)):
#list of unique charges and list of their degeneracies
#on the next unfused leg of the tensor
- leg_charge, leg_degeneracies = charge_degeneracies[n]
+ leg_charges, leg_degeneracies = np.unique(charges[n], return_counts=True)
#fuse the unique charges
#Note: entries in `fused_charges` are not unique anymore.
#flow1 = 1 because the flow of leg 0 has already been
#mulitplied above
fused_charges = fuse_charges(
- q1=accumulated_charges, flow1=1, q2=leg_charge, flow2=flows[n])
+ q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n])
#compute the degeneracies of `fused_charges` charges
+ #`fused_degeneracies` is a list of degeneracies such that
+ # `fused_degeneracies[n]` is the degeneracy of of
+ # charge `c = fused_charges[n]`.
fused_degeneracies = fuse_degeneracies(accumulated_degeneracies,
leg_degeneracies)
#compute the new degeneracies resulting from fusing
- #`accumulated_charges` and `leg_charge_2`
+ #`accumulated_charges` and `leg_charges_2`
accumulated_charges = np.unique(fused_charges)
- accumulated_degeneracies = []
+ accumulated_degeneracies = np.empty(
+ len(accumulated_charges), dtype=np.int64)
for n in range(len(accumulated_charges)):
- accumulated_degeneracies.append(
- np.sum(fused_degeneracies[fused_charges == accumulated_charges[n]]))
+ accumulated_degeneracies[n] = np.sum(
+ fused_degeneracies[fused_charges == accumulated_charges[n]])
+ return accumulated_charges, accumulated_degeneracies
+
- accumulated_degeneracies = np.asarray(accumulated_degeneracies)
+def compute_num_nonzero(charges: List[np.ndarray],
+ flows: List[Union[bool, int]]) -> int:
+ """
+ Compute the number of non-zero elements, given the meta-data of
+ a symmetric tensor.
+ Args:
+ charges: List of np.ndarray of int, one for each leg of the
+ underlying tensor. Each np.ndarray `charges[leg]`
+ is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ Returns:
+ int: The number of non-zero elements.
+ """
+ accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies(
+ charges, flows)
if len(np.nonzero(accumulated_charges == 0)[0]) == 0:
raise ValueError(
"given leg-charges `charges` and flows `flows` are incompatible "
@@ -235,6 +259,105 @@ def retrieve_non_zero_diagonal_blocks(
return blocks
+def retrieve_non_zero_diagonal_blocks_test_2(
+ data: np.ndarray,
+ charges: List[np.ndarray],
+ flows: List[Union[bool, int]],
+ return_data: Optional[bool] = True) -> Dict:
+ """
+ Given the meta data and underlying data of a symmetric matrix, compute
+ all diagonal blocks and return them in a dict.
+ Args:
+ data: An np.ndarray of the data. The number of elements in `data`
+ has to match the number of non-zero elements defined by `charges`
+ and `flows`
+ charges: List of np.ndarray, one for each leg.
+ Each np.ndarray `charges[leg]` is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
+ flows: A list of integers, one for each leg,
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ return_data: If `True`, the return dictionary maps quantum numbers `q` to
+ actual `np.ndarray` with the data. This involves a copy of data.
+ If `False`, the returned dict maps quantum numbers of a list
+ [locations, shape], where `locations` is an np.ndarray of type np.int64
+ containing the locations of the tensor elements within A.data, i.e.
+ `A.data[locations]` contains the elements belonging to the tensor with
+ quantum numbers `(q,q). `shape` is the shape of the corresponding array.
+
+ Returns:
+ dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
+ or a python list of locations and shapes, depending on the value of `return_data`.
+ """
+ if len(charges) != 2:
+ raise ValueError("input has to be a two-dimensional symmetric matrix")
+ check_flows(flows)
+ if len(flows) != len(charges):
+ raise ValueError("`len(flows)` is different from `len(charges)`")
+
+ #we multiply the flows into the charges
+ row_charges = flows[0] * charges[0] # a list of charges on each row
+ column_charges = flows[1] * charges[1] # a list of charges on each column
+
+ #get the unique charges
+ unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
+ unique_column_charges, column_dims = np.unique(
+ column_charges, return_counts=True)
+ #get the charges common to rows and columns (only those matter)
+ common_charges = np.intersect1d(
+ unique_row_charges, -unique_column_charges, assume_unique=True)
+
+ #convenience container for storing the degeneracies of each
+ #row and column charge
+ row_degeneracies = dict(zip(unique_row_charges, row_dims))
+ column_degeneracies = dict(zip(unique_column_charges, column_dims))
+
+ # we only care about charges common to row and columns
+ mask = np.isin(row_charges, common_charges)
+ relevant_row_charges = row_charges[mask]
+
+ #some numpy magic to get the index locations of the blocks
+ #we generate a vector of `len(relevant_row_charges) which,
+ #for each charge `c` in `relevant_row_charges` holds the
+ #column-degeneracy of charge `c`
+ degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64)
+ #for each charge `c` in `common_charges` we generate a boolean mask
+ #for indexing the positions where `relevant_column_charges` has a value of `c`.
+ masks = {}
+ for c in common_charges:
+ mask = relevant_row_charges == c
+ masks[c] = mask
+ degeneracy_vector[mask] = column_degeneracies[-c]
+
+ # the result of the cumulative sum is a vector containing
+ # the stop positions of the non-zero values of each row
+ # within the data vector.
+ # E.g. for `relevant_row_charges` = [0,1,0,0,3], and
+ # column_degeneracies[0] = 10
+ # column_degeneracies[1] = 20
+ # column_degeneracies[3] = 30
+ # we have
+ # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30]
+ # The starting positions of consecutive elements (in row-major order) in
+ # each row with charge `c=0` within the data vector are then simply obtained using
+ # masks[0] = [True, False, True, True, False]
+ # and `stop_positions[masks[0]] - column_degeneracies[0]`
+ stop_positions = np.cumsum(degeneracy_vector)
+ blocks = {}
+
+ for c in common_charges:
+ #numpy broadcasting is substantially faster than kron!
+ a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0)
+ b = np.expand_dims(np.arange(column_degeneracies[-c]), 1)
+ if not return_data:
+ blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])]
+ else:
+ blocks[c] = np.reshape(data[a + b],
+ (row_degeneracies[c], column_degeneracies[-c]))
+ return blocks
+
+
def retrieve_non_zero_diagonal_blocks_column_major(
data: np.ndarray,
charges: List[np.ndarray],
@@ -438,6 +561,10 @@ def compute_mapping_table(charges: List[np.ndarray],
with `N` the number of non-zero elements, and `r`
the rank of the tensor.
"""
+ # we are using row-major encoding, meaning that the last index
+ # is moving quickest when iterating through the linear data
+ # transposing is done taking, for each value of the indices i_0 to i_N-2
+ # the junk i_N-1 that gives non-zero
tables = np.meshgrid([np.arange(c.shape[0]) for c in charges], indexing='ij')
tables = tables[::-1] #reverse the order
raise NotImplementedError()
@@ -551,23 +678,15 @@ def rank(self):
#Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly
#(apart from debugging).
@property
- def sparse_shape(self) -> Tuple:
+ def shape(self) -> Tuple:
"""
The sparse shape of the tensor.
Returns a copy of self.indices.
Returns:
Tuple: A tuple of `Index` objects.
"""
-
return tuple([i.copy() for i in self.indices])
- @property
- def shape(self) -> Tuple:
- """
- The dense shape of the tensor.
- """
- return tuple([i.dimension for i in self.indices])
-
@property
def dtype(self) -> Type[np.number]:
return self.data.dtype
@@ -584,7 +703,6 @@ def transpose(self, order):
"""
Transpose the tensor into the new order `order`
"""
-
raise NotImplementedError('transpose is not implemented!!')
def reset_shape(self) -> None:
From b9f45cbeb329fce90f642aad2e31f44ee25af632 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Tue, 17 Dec 2019 22:37:04 -0500
Subject: [PATCH 55/60] improved block finding, fixed bug in reshape
re-intorduced BlockSparseTensor.dense_shape
new method for fusing charges and degeneracies (faster for very rectangular matrices)
---
tensornetwork/block_tensor/block_tensor.py | 125 ++++++++++++++++-----
1 file changed, 94 insertions(+), 31 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index 8f3bbf023..d783a71e0 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -20,7 +20,7 @@
from tensornetwork.network_components import Node, contract, contract_between
from tensornetwork.backends import backend_factory
# pylint: disable=line-too-long
-from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies
+from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges
import numpy as np
import itertools
import time
@@ -52,7 +52,7 @@ def compute_fused_charge_degeneracies(charges: List[np.ndarray],
dict: Mapping fused charges (int) to degeneracies (int)
"""
if len(charges) == 1:
- return dict(zip(np.unique(charges[0], return_counts=True)))
+ return np.unique(charges[0], return_counts=True)
# get unique charges and their degeneracies on the first leg.
# We are fusing from "left" to "right".
@@ -69,7 +69,7 @@ def compute_fused_charge_degeneracies(charges: List[np.ndarray],
#Note: entries in `fused_charges` are not unique anymore.
#flow1 = 1 because the flow of leg 0 has already been
#mulitplied above
- fused_charges = fuse_charges(
+ fused_charges = fuse_charge_pair(
q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n])
#compute the degeneracies of `fused_charges` charges
#`fused_degeneracies` is a list of degeneracies such that
@@ -160,12 +160,14 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray],
return charge_shape_dict
-def retrieve_non_zero_diagonal_blocks(
+def retrieve_non_zero_diagonal_blocks_old_version(
data: np.ndarray,
charges: List[np.ndarray],
flows: List[Union[bool, int]],
return_data: Optional[bool] = True) -> Dict:
"""
+ Deprecated: this version is about 2 times slower (worst case) than the current used
+ implementation
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
Args:
@@ -259,22 +261,35 @@ def retrieve_non_zero_diagonal_blocks(
return blocks
-def retrieve_non_zero_diagonal_blocks_test_2(
+def retrieve_non_zero_diagonal_blocks(
data: np.ndarray,
- charges: List[np.ndarray],
- flows: List[Union[bool, int]],
+ row_charges: List[Union[List, np.ndarray]],
+ column_charges: List[Union[List, np.ndarray]],
+ row_flows: List[Union[bool, int]],
+ column_flows: List[Union[bool, int]],
return_data: Optional[bool] = True) -> Dict:
"""
Given the meta data and underlying data of a symmetric matrix, compute
all diagonal blocks and return them in a dict.
+ `row_charges` and `column_charges` are lists of np.ndarray. The tensor
+ is viewed as a matrix with rows given by fusing `row_charges` and
+ columns given by fusing `column_charges`. Note that `column_charges`
+ are never explicitly fused (`row_charges` are).
Args:
data: An np.ndarray of the data. The number of elements in `data`
has to match the number of non-zero elements defined by `charges`
and `flows`
- charges: List of np.ndarray, one for each leg.
- Each np.ndarray `charges[leg]` is of shape `(D[leg],)`.
+ row_charges: List of np.ndarray, one for each leg of the row-indices.
+ Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`.
The bond dimension `D[leg]` can vary on each leg.
- flows: A list of integers, one for each leg,
+ column_charges: List of np.ndarray, one for each leg of the column-indices.
+ Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`.
+ The bond dimension `D[leg]` can vary on each leg.
+ row_flows: A list of integers, one for each entry in `row_charges`.
+ with values `1` or `-1`, denoting the flow direction
+ of the charges on each leg. `1` is inflowing, `-1` is outflowing
+ charge.
+ column_flows: A list of integers, one for each entry in `column_charges`.
with values `1` or `-1`, denoting the flow direction
of the charges on each leg. `1` is inflowing, `-1` is outflowing
charge.
@@ -290,20 +305,25 @@ def retrieve_non_zero_diagonal_blocks_test_2(
dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray
or a python list of locations and shapes, depending on the value of `return_data`.
"""
- if len(charges) != 2:
- raise ValueError("input has to be a two-dimensional symmetric matrix")
+ flows = row_flows.copy()
+ flows.extend(column_flows)
check_flows(flows)
- if len(flows) != len(charges):
- raise ValueError("`len(flows)` is different from `len(charges)`")
-
- #we multiply the flows into the charges
- row_charges = flows[0] * charges[0] # a list of charges on each row
- column_charges = flows[1] * charges[1] # a list of charges on each column
-
- #get the unique charges
- unique_row_charges, row_dims = np.unique(row_charges, return_counts=True)
- unique_column_charges, column_dims = np.unique(
- column_charges, return_counts=True)
+ if len(flows) != (len(row_charges) + len(column_charges)):
+ raise ValueError(
+ "`len(flows)` is different from `len(row_charges) + len(column_charges)`"
+ )
+
+ #since we are using row-major we have to fuse the row charges anyway.
+ fused_row_charges = fuse_charges(row_charges, row_flows)
+ #get the unique row-charges
+ unique_row_charges, row_dims = np.unique(
+ fused_row_charges, return_counts=True)
+
+ #get the unique column-charges
+ #we only care about their degeneracies, not their order; that's much faster
+ #to compute since we don't have to fuse all charges explicitly
+ unique_column_charges, column_dims = compute_fused_charge_degeneracies(
+ column_charges, column_flows)
#get the charges common to rows and columns (only those matter)
common_charges = np.intersect1d(
unique_row_charges, -unique_column_charges, assume_unique=True)
@@ -314,8 +334,8 @@ def retrieve_non_zero_diagonal_blocks_test_2(
column_degeneracies = dict(zip(unique_column_charges, column_dims))
# we only care about charges common to row and columns
- mask = np.isin(row_charges, common_charges)
- relevant_row_charges = row_charges[mask]
+ mask = np.isin(fused_row_charges, common_charges)
+ relevant_row_charges = fused_row_charges[mask]
#some numpy magic to get the index locations of the blocks
#we generate a vector of `len(relevant_row_charges) which,
@@ -677,6 +697,16 @@ def rank(self):
#```
#Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly
#(apart from debugging).
+
+ @property
+ def dense_shape(self) -> Tuple:
+ """
+ The dense shape of the tensor.
+ Returns:
+ Tuple: A tuple of `int`.
+ """
+ return tuple([i.dimension for i in self.indices])
+
@property
def shape(self) -> Tuple:
"""
@@ -758,8 +788,7 @@ def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None:
else:
dense_shape.append(s)
# a few simple checks
-
- if np.prod(dense_shape) != np.prod(self.shape):
+ if np.prod(dense_shape) != np.prod(self.dense_shape):
raise ValueError("A tensor with {} elements cannot be "
"reshaped into a tensor with {} elements".format(
np.prod(self.shape), np.prod(dense_shape)))
@@ -783,17 +812,17 @@ def raise_error():
self.reset_shape() #bring tensor back into its elementary shape
for n in range(len(dense_shape)):
- if dense_shape[n] > self.shape[n]:
- while dense_shape[n] > self.shape[n]:
+ if dense_shape[n] > self.dense_shape[n]:
+ while dense_shape[n] > self.dense_shape[n]:
#fuse indices
i1, i2 = self.indices.pop(n), self.indices.pop(n)
#note: the resulting flow is set to one since the flow
#is multiplied into the charges. As a result the tensor
#will then be invariant in any case.
self.indices.insert(n, fuse_index_pair(i1, i2))
- if self.shape[n] > dense_shape[n]:
+ if self.dense_shape[n] > dense_shape[n]:
raise_error()
- elif dense_shape[n] < self.shape[n]:
+ elif dense_shape[n] < self.dense_shape[n]:
raise_error()
def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
@@ -816,7 +845,41 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
raise ValueError(
"`get_diagonal_blocks` can only be called on a matrix, but found rank={}"
.format(self.rank))
+
+ row_indices = self.indices[0].get_elementary_indices()
+ column_indices = self.indices[1].get_elementary_indices()
+
return retrieve_non_zero_diagonal_blocks(
+ data=self.data,
+ row_charges=[i.charges for i in row_indices],
+ column_charges=[i.charges for i in column_indices],
+ row_flows=[i.flow for i in row_indices],
+ column_flows=[i.flow for i in column_indices],
+ return_data=return_data)
+
+ def get_diagonal_blocks_old_version(
+ self, return_data: Optional[bool] = True) -> Dict:
+ """
+ Obtain the diagonal blocks of symmetric matrix.
+ BlockSparseTensor has to be a matrix.
+ Args:
+ return_data: If `True`, the return dictionary maps quantum numbers `q` to
+ actual `np.ndarray` with the data. This involves a copy of data.
+ If `False`, the returned dict maps quantum numbers of a list
+ [locations, shape], where `locations` is an np.ndarray of type np.int64
+ containing the locations of the tensor elements within A.data, i.e.
+ `A.data[locations]` contains the elements belonging to the tensor with
+ quantum numbers `(q,q). `shape` is the shape of the corresponding array.
+ Returns:
+ dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix)
+
+ """
+ if self.rank != 2:
+ raise ValueError(
+ "`get_diagonal_blocks` can only be called on a matrix, but found rank={}"
+ .format(self.rank))
+
+ return retrieve_non_zero_diagonal_blocks_old_version(
data=self.data,
charges=self.charges,
flows=self.flows,
From 69309eb3853fd37fec342449540d4f9593f6c164 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Tue, 17 Dec 2019 22:37:45 -0500
Subject: [PATCH 56/60] fuse_charge_pair added
fuse_charges added
---
tensornetwork/block_tensor/index.py | 40 ++++++++++++++++++++++-------
1 file changed, 31 insertions(+), 9 deletions(-)
diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py
index 326311ec1..ffb004d1e 100644
--- a/tensornetwork/block_tensor/index.py
+++ b/tensornetwork/block_tensor/index.py
@@ -42,6 +42,9 @@ def __init__(self,
self.right_child = right_child
self.name = name if name else 'index'
+ def __repr__(self):
+ return str(self.dimension)
+
@property
def is_leave(self):
return (self.left_child is None) and (self.right_child is None)
@@ -113,22 +116,22 @@ def __mul__(self, index: "Index") -> "Index":
def charges(self):
if self.is_leave:
return self._charges
- fused_charges = fuse_charges(self.left_child.charges, self.left_child.flow,
- self.right_child.charges,
- self.right_child.flow)
+ fused_charges = fuse_charge_pair(
+ self.left_child.charges, self.left_child.flow, self.right_child.charges,
+ self.right_child.flow)
return fused_charges
-def fuse_charges(q1: Union[List, np.ndarray], flow1: int,
- q2: Union[List, np.ndarray], flow2: int) -> np.ndarray:
+def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int,
+ q2: Union[List, np.ndarray], flow2: int) -> np.ndarray:
"""
Fuse charges `q1` with charges `q2` by simple addition (valid
for U(1) charges). `q1` and `q2` typically belong to two consecutive
legs of `BlockSparseTensor`.
Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns
`[10, 11, 12, 100, 101, 102]`.
- When using column-major ordering of indices in `BlockSparseTensor`,
+ When using row-major ordering of indices in `BlockSparseTensor`,
the position of q1 should be "to the left" of the position of q2.
Args:
q1: Iterable of integers
@@ -143,6 +146,27 @@ def fuse_charges(q1: Union[List, np.ndarray], flow1: int,
len(q1) * len(q2))
+def fuse_charges(charges: List[Union[List, np.ndarray]],
+ flows: List[int]) -> np.ndarray:
+ """
+ Fuse all `charges` by simple addition (valid
+ for U(1) charges).
+ Args:
+ chargs: A list of charges to be fused.
+ flows: A list of flows, one for each element in `charges`.
+ Returns:
+ np.ndarray: The result of fusing `charges`.
+ """
+ if len(charges) == 1:
+ #nothing to do
+ return charges[0]
+ fused_charges = charges[0] * flows[0]
+ for n in range(1, len(charges)):
+ fused_charges = fuse_charge_pair(
+ q1=fused_charges, flow1=1, q2=charges[n], flow2=flows[n])
+ return fused_charges
+
+
def fuse_degeneracies(degen1: Union[List, np.ndarray],
degen2: Union[List, np.ndarray]) -> np.ndarray:
"""
@@ -151,7 +175,7 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray],
consecutive legs of `BlockSparseTensor`.
Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns
`[10, 11, 12, 100, 101, 102]`.
- When using column-major ordering of indices in `BlockSparseTensor`,
+ When using row-major ordering of indices in `BlockSparseTensor`,
the position of q1 should be "to the left" of the position of q2.
Args:
q1: Iterable of integers
@@ -182,8 +206,6 @@ def fuse_index_pair(left_index: Index,
raise ValueError(
"index1 and index2 are the same object. Can only fuse distinct objects")
- # fused_charges = fuse_charges(left_index.charges, left_index.flow,
- # right_index.charges, right_index.flow)
return Index(
charges=None, flow=flow, left_child=left_index, right_child=right_index)
From 5026ed36073ccad1363d62752541c5599cb0e220 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Tue, 17 Dec 2019 22:44:16 -0500
Subject: [PATCH 57/60] use is_leave
---
tensornetwork/block_tensor/index.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py
index ffb004d1e..96e8ba2d6 100644
--- a/tensornetwork/block_tensor/index.py
+++ b/tensornetwork/block_tensor/index.py
@@ -234,7 +234,7 @@ def split_index(index: Index) -> Tuple[Index, Index]:
Returns:
Tuple[Index, Index]: The result of splitting `index`.
"""
- if (not index.left_child) or (not index.right_child):
+ if index.is_leave:
raise ValueError("cannot split an elementary index")
return index.left_child, index.right_child
From 8ada65d20d0755bf535ce8f22e203f079a5685a4 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Tue, 17 Dec 2019 22:44:24 -0500
Subject: [PATCH 58/60] new tests
---
tensornetwork/block_tensor/index_test.py | 40 ++++++++++++++++++++----
1 file changed, 34 insertions(+), 6 deletions(-)
diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py
index ff331a36a..8cdda8720 100644
--- a/tensornetwork/block_tensor/index_test.py
+++ b/tensornetwork/block_tensor/index_test.py
@@ -1,14 +1,14 @@
import numpy as np
# pylint: disable=line-too-long
-from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies
+from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair
-def test_fuse_charges():
+def test_fuse_charge_pair():
q1 = np.asarray([0, 1])
q2 = np.asarray([2, 3, 4])
- fused_charges = fuse_charges(q1, 1, q2, 1)
+ fused_charges = fuse_charge_pair(q1, 1, q2, 1)
assert np.all(fused_charges == np.asarray([2, 3, 3, 4, 4, 5]))
- fused_charges = fuse_charges(q1, 1, q2, -1)
+ fused_charges = fuse_charge_pair(q1, 1, q2, -1)
assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3]))
@@ -26,7 +26,7 @@ def test_index_fusion_mul():
i12 = i1 * i2
assert i12.left_child is i1
assert i12.right_child is i2
- assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1))
+ assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1))
def test_index_fusion():
@@ -43,4 +43,32 @@ def test_index_fusion():
i12 = fuse_index_pair(i1, i2)
assert i12.left_child is i1
assert i12.right_child is i2
- assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1))
+ assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1))
+
+
+def test_elementary_indices():
+ D = 10
+ B = 4
+ dtype = np.int16
+ q1 = np.random.randint(-B // 2, B // 2 + 1,
+ D).astype(dtype) #quantum numbers on leg 1
+ q2 = np.random.randint(-B // 2, B // 2 + 1,
+ D).astype(dtype) #quantum numbers on leg 2
+ i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1
+ i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2
+
+ i3 = Index(charges=q1, flow=1, name='index1') #index on leg 1
+ i4 = Index(charges=q2, flow=1, name='index2') #index on leg 2
+
+ i12 = i1 * i2
+ i34 = i3 * i4
+ elmt12 = i12.get_elementary_indices()
+ assert elmt12[0] is i1
+ assert elmt12[1] is i2
+
+ i1234 = i12 * i34
+ elmt1234 = i1234.get_elementary_indices()
+ assert elmt1234[0] is i1
+ assert elmt1234[1] is i2
+ assert elmt1234[2] is i3
+ assert elmt1234[3] is i4
From 11ab7c0c0ec7b4bbfcf0657b0cdb4d95b3035b74 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 18 Dec 2019 08:38:32 -0500
Subject: [PATCH 59/60] removed TODO, BlockSparseTensor.shape returns ref
instead of copy
---
tensornetwork/block_tensor/block_tensor.py | 28 +++++++---------------
1 file changed, 8 insertions(+), 20 deletions(-)
diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py
index d783a71e0..515e4cdd1 100644
--- a/tensornetwork/block_tensor/block_tensor.py
+++ b/tensornetwork/block_tensor/block_tensor.py
@@ -680,24 +680,6 @@ def init_random():
def rank(self):
return len(self.indices)
- #TODO: we should consider to switch the names
- #`BlockSparseTensor.sparse_shape` and `BlockSparseTensor.shape`,
- #i.e. have `BlockSparseTensor.shape`return the sparse shape of the tensor.
- #This may be more convenient for building tensor-type and backend
- #agnostic code. For example, in MPS code we essentially never
- #explicitly set a shape to a certain value (apart from initialization).
- #That is, code like this
- #```
- #tensor = np.random.rand(10,10,10)
- #```
- #is never used. Rather one inquires shapes of tensors and
- #multiplies them to get new shapes:
- #```
- #new_tensor = reshape(tensor, [tensor.shape[0]*tensor.shape[1], tensor.shape[2]])
- #```
- #Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly
- #(apart from debugging).
-
@property
def dense_shape(self) -> Tuple:
"""
@@ -711,11 +693,10 @@ def dense_shape(self) -> Tuple:
def shape(self) -> Tuple:
"""
The sparse shape of the tensor.
- Returns a copy of self.indices.
Returns:
Tuple: A tuple of `Index` objects.
"""
- return tuple([i.copy() for i in self.indices])
+ return tuple(self.indices)
@property
def dtype(self) -> Type[np.number]:
@@ -829,6 +810,9 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
"""
Obtain the diagonal blocks of symmetric matrix.
BlockSparseTensor has to be a matrix.
+ For matrices with shape[0] << shape[1], this routine avoids explicit fusion
+ of column charges.
+
Args:
return_data: If `True`, the return dictionary maps quantum numbers `q` to
actual `np.ndarray` with the data. This involves a copy of data.
@@ -860,6 +844,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict:
def get_diagonal_blocks_old_version(
self, return_data: Optional[bool] = True) -> Dict:
"""
+ Deprecated
+
Obtain the diagonal blocks of symmetric matrix.
BlockSparseTensor has to be a matrix.
Args:
@@ -888,6 +874,8 @@ def get_diagonal_blocks_old_version(
def get_diagonal_blocks_deprecated(
self, return_data: Optional[bool] = True) -> Dict:
"""
+ Deprecated
+
Obtain the diagonal blocks of symmetric matrix.
BlockSparseTensor has to be a matrix.
Args:
From 04c40283b98d8b31e1e0dac13a0cca6a3a3def84 Mon Sep 17 00:00:00 2001
From: mganahl
Date: Wed, 18 Dec 2019 08:38:51 -0500
Subject: [PATCH 60/60] added tests
---
tensornetwork/block_tensor/index_test.py | 38 ++++++++++++++++++------
1 file changed, 29 insertions(+), 9 deletions(-)
diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py
index 8cdda8720..780034133 100644
--- a/tensornetwork/block_tensor/index_test.py
+++ b/tensornetwork/block_tensor/index_test.py
@@ -50,15 +50,12 @@ def test_elementary_indices():
D = 10
B = 4
dtype = np.int16
- q1 = np.random.randint(-B // 2, B // 2 + 1,
- D).astype(dtype) #quantum numbers on leg 1
- q2 = np.random.randint(-B // 2, B // 2 + 1,
- D).astype(dtype) #quantum numbers on leg 2
- i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1
- i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2
-
- i3 = Index(charges=q1, flow=1, name='index1') #index on leg 1
- i4 = Index(charges=q2, flow=1, name='index2') #index on leg 2
+ q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)
+ q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)
+ i1 = Index(charges=q1, flow=1, name='index1')
+ i2 = Index(charges=q2, flow=1, name='index2')
+ i3 = Index(charges=q1, flow=1, name='index3')
+ i4 = Index(charges=q2, flow=1, name='index4')
i12 = i1 * i2
i34 = i3 * i4
@@ -72,3 +69,26 @@ def test_elementary_indices():
assert elmt1234[1] is i2
assert elmt1234[2] is i3
assert elmt1234[3] is i4
+
+
+def test_copy():
+ D = 10
+ B = 4
+ dtype = np.int16
+ q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)
+ q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)
+ i1 = Index(charges=q1, flow=1, name='index1')
+ i2 = Index(charges=q2, flow=1, name='index2')
+ i3 = Index(charges=q1, flow=-1, name='index3')
+ i4 = Index(charges=q2, flow=-1, name='index4')
+
+ i12 = i1 * i2
+ i34 = i3 * i4
+ i1234 = i12 * i34
+ i1234_copy = i1234.copy()
+
+ elmt1234 = i1234_copy.get_elementary_indices()
+ assert elmt1234[0] is not i1
+ assert elmt1234[1] is not i2
+ assert elmt1234[2] is not i3
+ assert elmt1234[3] is not i4