From 63cddc41db2b1ff45a984515dd1a299503c5f89a Mon Sep 17 00:00:00 2001 From: Martin Date: Tue, 22 Oct 2019 13:11:38 -0400 Subject: [PATCH 001/212] started implementing block-sparse tensors --- tensornetwork/block_tensor/#block_tensor.py# | 131 +++++++++++++++++++ tensornetwork/block_tensor/.#block_tensor.py | 1 + tensornetwork/block_tensor/block_tensor.py | 130 ++++++++++++++++++ tensornetwork/block_tensor/block_tensor.py~ | 95 ++++++++++++++ 4 files changed, 357 insertions(+) create mode 100644 tensornetwork/block_tensor/#block_tensor.py# create mode 120000 tensornetwork/block_tensor/.#block_tensor.py create mode 100644 tensornetwork/block_tensor/block_tensor.py create mode 100644 tensornetwork/block_tensor/block_tensor.py~ diff --git a/tensornetwork/block_tensor/#block_tensor.py# b/tensornetwork/block_tensor/#block_tensor.py# new file mode 100644 index 000000000..64356f38e --- /dev/null +++ b/tensornetwork/block_tensor/#block_tensor.py# @@ -0,0 +1,131 @@ +import collections +import numpy as np +import operator +import warnings +import os +import sys +#import qutilities as qutils +#import utils as cutils +import functools as fct +import copy + + +class AbelianIndex: + """ + An index object for creation of abelian, block-sparse tensors + `AbelianIndex` is a storage class for storing abelian quantum numbers + of a tensor index. `AbelianIndex` is a wrapper for a python `dict` + mapping quantum numbers to integers (the dimension of the block) + + """ + + @classmethod + def fromlist(cls, quantumnumbers, dimensions, flow, label=None): + if all(map(np.isscalar, quantumnumbers)): + QNs = list(quantumnumbers) + elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))): + QNs = list(map(np.asarray, quantumnumbers)) + else: + raise TypeError( + "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" + ) + return cls(QNs, dimensions, flow, label) + + @classmethod + def fromdict(cls, dictionary, flow, label=None): + if all(map(np.isscalar, dictionary.keys())): + QNs = list(dictionary.keys()) + elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))): + QNs = list(map(np.asarray, dictionary.keys())) + else: + raise TypeError( + "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" + ) + + return cls(QNs, list(dictionary.values()), flow, label) + + def __init__(self, quantumnumbers, dimensions, flow, label=None): + if __debug__: + if len(quantumnumbers) != len(dimensions): + raise ValueError( + "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)") + + try: + unique = dict(zip(quantumnumbers, dimensions)) + except TypeError: + unique = dict(zip(map(tuple, quantumnumbers), dimensions)) + + if __debug__: + if len(unique) != len(quantumnumbers): + warnings.warn( + "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed" + ) + + if __debug__: + try: + mask = np.asarray(list(map(len, unique.keys()))) == len( + list(unique.keys())[0]) + if not all(mask): + raise ValueError( + "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length" + .format(list(map(len, unique.keys())))) + except TypeError: + if not all(list(map(np.isscalar, unique.keys()))): + raise TypeError( + "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables" + ) + self._data = np.array( + list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object) + + self._flow = flow + self.label = label + + def __getitem__(self, n): + return self._data[n[0], n[1]] + + def Q(self, n): + return self._data[n, 0] + + def D(self, n): + return self._data[n, 1] + + def __len__(self): + return self._data.shape[0] + + def setflow(self, val): + if val == 0: + raise ValueError( + "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" + ) + self._flow = np.sign(val) + return self + + def rename(self, label): + self.label = label + return self + + @property + def flow(self): + return self._flow + + @flow.setter + def flow(self, val): + if val == 0: + raise ValueError( + "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" + ) + self._flow = np.sign(val) + + @property + def shape(self): + return self._data.shape + + @property + def DataFrame(self): + return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D']) + + def __str__(self): + print('') + print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow)) + print(self.DataFrame) + return '' diff --git a/tensornetwork/block_tensor/.#block_tensor.py b/tensornetwork/block_tensor/.#block_tensor.py new file mode 120000 index 000000000..be400a111 --- /dev/null +++ b/tensornetwork/block_tensor/.#block_tensor.py @@ -0,0 +1 @@ +martin@Mister-Pickle.local.14868 \ No newline at end of file diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py new file mode 100644 index 000000000..b070c1ec4 --- /dev/null +++ b/tensornetwork/block_tensor/block_tensor.py @@ -0,0 +1,130 @@ +import collections +import numpy as np +import operator +import warnings +import os +import sys +#import qutilities as qutils +#import utils as cutils +import functools as fct +import copy + + +class AbelianIndex: + """ + An index object for creation of abelian, block-sparse tensors + `AbelianIndex` is a storage class for storing abelian quantum numbers + of a tensor index. `AbelianIndex` is a wrapper for a python `dict` + mapping quantum numbers to integers (the dimension of the block) + """ + + @classmethod + def fromlist(cls, quantumnumbers, dimensions, flow, label=None): + if all(map(np.isscalar, quantumnumbers)): + QNs = list(quantumnumbers) + elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))): + QNs = list(map(np.asarray, quantumnumbers)) + else: + raise TypeError( + "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" + ) + return cls(QNs, dimensions, flow, label) + + @classmethod + def fromdict(cls, dictionary, flow, label=None): + if all(map(np.isscalar, dictionary.keys())): + QNs = list(dictionary.keys()) + elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))): + QNs = list(map(np.asarray, dictionary.keys())) + else: + raise TypeError( + "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" + ) + + return cls(QNs, list(dictionary.values()), flow, label) + + def __init__(self, quantumnumbers, dimensions, flow, label=None): + if __debug__: + if len(quantumnumbers) != len(dimensions): + raise ValueError( + "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)") + + try: + unique = dict(zip(quantumnumbers, dimensions)) + except TypeError: + unique = dict(zip(map(tuple, quantumnumbers), dimensions)) + + if __debug__: + if len(unique) != len(quantumnumbers): + warnings.warn( + "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed" + ) + + if __debug__: + try: + mask = np.asarray(list(map(len, unique.keys()))) == len( + list(unique.keys())[0]) + if not all(mask): + raise ValueError( + "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length" + .format(list(map(len, unique.keys())))) + except TypeError: + if not all(list(map(np.isscalar, unique.keys()))): + raise TypeError( + "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables" + ) + self._data = np.array( + list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object) + + self._flow = flow + self.label = label + + def __getitem__(self, n): + return self._data[n[0], n[1]] + + def Q(self, n): + return self._data[n, 0] + + def D(self, n): + return self._data[n, 1] + + def __len__(self): + return self._data.shape[0] + + def setflow(self, val): + if val == 0: + raise ValueError( + "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" + ) + self._flow = np.sign(val) + return self + + def rename(self, label): + self.label = label + return self + + @property + def flow(self): + return self._flow + + @flow.setter + def flow(self, val): + if val == 0: + raise ValueError( + "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" + ) + self._flow = np.sign(val) + + @property + def shape(self): + return self._data.shape + + @property + def DataFrame(self): + return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D']) + + def __str__(self): + print('') + print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow)) + print(self.DataFrame) + return '' diff --git a/tensornetwork/block_tensor/block_tensor.py~ b/tensornetwork/block_tensor/block_tensor.py~ new file mode 100644 index 000000000..90e848755 --- /dev/null +++ b/tensornetwork/block_tensor/block_tensor.py~ @@ -0,0 +1,95 @@ +class TensorIndex(object): + @classmethod + def fromlist(cls,quantumnumbers,dimensions,flow,label=None): + if all(map(np.isscalar,quantumnumbers)): + QNs=list(quantumnumbers) + elif all(list(map(lambda x: not np.isscalar(x),quantumnumbers))): + QNs=list(map(np.asarray,quantumnumbers)) + else: + raise TypeError("TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types") + return cls(QNs,dimensions,flow,label) + + @classmethod + def fromdict(cls,dictionary,flow,label=None): + if all(map(np.isscalar,dictionary.keys())): + QNs=list(dictionary.keys()) + elif all(list(map(lambda x: not np.isscalar(x),dictionary.keys()))): + QNs=list(map(np.asarray,dictionary.keys())) + else: + raise TypeError("TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types") + + return cls(QNs,list(dictionary.values()),flow,label) + + def __init__(self,quantumnumbers,dimensions,flow,label=None): + if __debug__: + if len(quantumnumbers)!=len(dimensions): + raise ValueError("TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)") + + try: + unique=dict(zip(quantumnumbers,dimensions)) + except TypeError: + unique=dict(zip(map(tuple,quantumnumbers),dimensions)) + + + if __debug__: + if len(unique)!=len(quantumnumbers): + warnings.warn("in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed") + + if __debug__: + try: + mask=np.asarray(list(map(len,unique.keys())))==len(list(unique.keys())[0]) + if not all(mask): + raise ValueError("in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length".format(list(map(len,unique.keys())))) + except TypeError: + if not all(list(map(np.isscalar,unique.keys()))): + raise TypeError("in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables") + self._data=np.array(list(zip(map(np.asarray,unique.keys()),dimensions)),dtype=object) + + self._flow=flow + self.label=label + + def __getitem__(self,n): + return self._data[n[0],n[1]] + + def Q(self,n): + return self._data[n,0] + + def D(self,n): + return self._data[n,1] + + def __len__(self): + return self._data.shape[0] + + def setflow(self,val): + if val==0: + raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only") + self._flow=np.sign(val) + return self + + def rename(self,label): + self.label=label + return self + + @property + def flow(self): + return self._flow + + @flow.setter + def flow(self,val): + if val==0: + raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only") + self._flow=np.sign(val) + + @property + def shape(self): + return self._data.shape + + @property + def DataFrame(self): + return pd.DataFrame.from_records(data=self._data,columns=['qn','D']) + + def __str__(self): + print('') + print('TensorIndex, label={0}, flow={1}'.format(self.label,self.flow)) + print(self.DataFrame) + return '' From 2910b27316180b720445693676f5b62067e75388 Mon Sep 17 00:00:00 2001 From: Martin Date: Tue, 22 Oct 2019 13:12:13 -0400 Subject: [PATCH 002/212] removed files --- tensornetwork/block_tensor/#block_tensor.py# | 131 ------------------- tensornetwork/block_tensor/block_tensor.py~ | 95 -------------- 2 files changed, 226 deletions(-) delete mode 100644 tensornetwork/block_tensor/#block_tensor.py# delete mode 100644 tensornetwork/block_tensor/block_tensor.py~ diff --git a/tensornetwork/block_tensor/#block_tensor.py# b/tensornetwork/block_tensor/#block_tensor.py# deleted file mode 100644 index 64356f38e..000000000 --- a/tensornetwork/block_tensor/#block_tensor.py# +++ /dev/null @@ -1,131 +0,0 @@ -import collections -import numpy as np -import operator -import warnings -import os -import sys -#import qutilities as qutils -#import utils as cutils -import functools as fct -import copy - - -class AbelianIndex: - """ - An index object for creation of abelian, block-sparse tensors - `AbelianIndex` is a storage class for storing abelian quantum numbers - of a tensor index. `AbelianIndex` is a wrapper for a python `dict` - mapping quantum numbers to integers (the dimension of the block) - - """ - - @classmethod - def fromlist(cls, quantumnumbers, dimensions, flow, label=None): - if all(map(np.isscalar, quantumnumbers)): - QNs = list(quantumnumbers) - elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))): - QNs = list(map(np.asarray, quantumnumbers)) - else: - raise TypeError( - "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" - ) - return cls(QNs, dimensions, flow, label) - - @classmethod - def fromdict(cls, dictionary, flow, label=None): - if all(map(np.isscalar, dictionary.keys())): - QNs = list(dictionary.keys()) - elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))): - QNs = list(map(np.asarray, dictionary.keys())) - else: - raise TypeError( - "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" - ) - - return cls(QNs, list(dictionary.values()), flow, label) - - def __init__(self, quantumnumbers, dimensions, flow, label=None): - if __debug__: - if len(quantumnumbers) != len(dimensions): - raise ValueError( - "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)") - - try: - unique = dict(zip(quantumnumbers, dimensions)) - except TypeError: - unique = dict(zip(map(tuple, quantumnumbers), dimensions)) - - if __debug__: - if len(unique) != len(quantumnumbers): - warnings.warn( - "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed" - ) - - if __debug__: - try: - mask = np.asarray(list(map(len, unique.keys()))) == len( - list(unique.keys())[0]) - if not all(mask): - raise ValueError( - "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length" - .format(list(map(len, unique.keys())))) - except TypeError: - if not all(list(map(np.isscalar, unique.keys()))): - raise TypeError( - "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables" - ) - self._data = np.array( - list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object) - - self._flow = flow - self.label = label - - def __getitem__(self, n): - return self._data[n[0], n[1]] - - def Q(self, n): - return self._data[n, 0] - - def D(self, n): - return self._data[n, 1] - - def __len__(self): - return self._data.shape[0] - - def setflow(self, val): - if val == 0: - raise ValueError( - "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" - ) - self._flow = np.sign(val) - return self - - def rename(self, label): - self.label = label - return self - - @property - def flow(self): - return self._flow - - @flow.setter - def flow(self, val): - if val == 0: - raise ValueError( - "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" - ) - self._flow = np.sign(val) - - @property - def shape(self): - return self._data.shape - - @property - def DataFrame(self): - return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D']) - - def __str__(self): - print('') - print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow)) - print(self.DataFrame) - return '' diff --git a/tensornetwork/block_tensor/block_tensor.py~ b/tensornetwork/block_tensor/block_tensor.py~ deleted file mode 100644 index 90e848755..000000000 --- a/tensornetwork/block_tensor/block_tensor.py~ +++ /dev/null @@ -1,95 +0,0 @@ -class TensorIndex(object): - @classmethod - def fromlist(cls,quantumnumbers,dimensions,flow,label=None): - if all(map(np.isscalar,quantumnumbers)): - QNs=list(quantumnumbers) - elif all(list(map(lambda x: not np.isscalar(x),quantumnumbers))): - QNs=list(map(np.asarray,quantumnumbers)) - else: - raise TypeError("TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types") - return cls(QNs,dimensions,flow,label) - - @classmethod - def fromdict(cls,dictionary,flow,label=None): - if all(map(np.isscalar,dictionary.keys())): - QNs=list(dictionary.keys()) - elif all(list(map(lambda x: not np.isscalar(x),dictionary.keys()))): - QNs=list(map(np.asarray,dictionary.keys())) - else: - raise TypeError("TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types") - - return cls(QNs,list(dictionary.values()),flow,label) - - def __init__(self,quantumnumbers,dimensions,flow,label=None): - if __debug__: - if len(quantumnumbers)!=len(dimensions): - raise ValueError("TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)") - - try: - unique=dict(zip(quantumnumbers,dimensions)) - except TypeError: - unique=dict(zip(map(tuple,quantumnumbers),dimensions)) - - - if __debug__: - if len(unique)!=len(quantumnumbers): - warnings.warn("in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed") - - if __debug__: - try: - mask=np.asarray(list(map(len,unique.keys())))==len(list(unique.keys())[0]) - if not all(mask): - raise ValueError("in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length".format(list(map(len,unique.keys())))) - except TypeError: - if not all(list(map(np.isscalar,unique.keys()))): - raise TypeError("in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables") - self._data=np.array(list(zip(map(np.asarray,unique.keys()),dimensions)),dtype=object) - - self._flow=flow - self.label=label - - def __getitem__(self,n): - return self._data[n[0],n[1]] - - def Q(self,n): - return self._data[n,0] - - def D(self,n): - return self._data[n,1] - - def __len__(self): - return self._data.shape[0] - - def setflow(self,val): - if val==0: - raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only") - self._flow=np.sign(val) - return self - - def rename(self,label): - self.label=label - return self - - @property - def flow(self): - return self._flow - - @flow.setter - def flow(self,val): - if val==0: - raise ValueError("TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only") - self._flow=np.sign(val) - - @property - def shape(self): - return self._data.shape - - @property - def DataFrame(self): - return pd.DataFrame.from_records(data=self._data,columns=['qn','D']) - - def __str__(self): - print('') - print('TensorIndex, label={0}, flow={1}'.format(self.label,self.flow)) - print(self.DataFrame) - return '' From 46f1e10144675c4c123bd6b9fc48deeb0c07bc8a Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 25 Oct 2019 08:51:48 -0400 Subject: [PATCH 003/212] working on AbelianIndex --- tensornetwork/block_tensor/block_tensor.py | 79 ++++++++-------------- 1 file changed, 30 insertions(+), 49 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index b070c1ec4..f99bef8f8 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -8,6 +8,7 @@ #import utils as cutils import functools as fct import copy +from typing import Iterable, Optional, Text class AbelianIndex: @@ -15,68 +16,50 @@ class AbelianIndex: An index object for creation of abelian, block-sparse tensors `AbelianIndex` is a storage class for storing abelian quantum numbers of a tensor index. `AbelianIndex` is a wrapper for a python `dict` - mapping quantum numbers to integers (the dimension of the block) + mapping quantum numbers to integers (the dimension of the block). + `AbelianIndex` can have a `flow` denoting the "flow of charge". """ @classmethod - def fromlist(cls, quantumnumbers, dimensions, flow, label=None): + def fromlist(cls, + quantumnumbers: Iterable, + dimensions: Iterable[int], + flow: int, + label: Optional[Text] = None): if all(map(np.isscalar, quantumnumbers)): QNs = list(quantumnumbers) elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))): - QNs = list(map(np.asarray, quantumnumbers)) + QNs = list(map(np.asarray, + quantumnumbers)) #turn quantum numbers into np.ndarray else: - raise TypeError( - "TensorIndex.fromlist(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" - ) + raise TypeError("quantum numbers have inconsistent types") return cls(QNs, dimensions, flow, label) - @classmethod - def fromdict(cls, dictionary, flow, label=None): - if all(map(np.isscalar, dictionary.keys())): - QNs = list(dictionary.keys()) - elif all(list(map(lambda x: not np.isscalar(x), dictionary.keys()))): - QNs = list(map(np.asarray, dictionary.keys())) - else: - raise TypeError( - "TensorIndex.fromdict(cls,dictionary,flow,label=None): quantum numbers have inconsistent types" - ) - - return cls(QNs, list(dictionary.values()), flow, label) - - def __init__(self, quantumnumbers, dimensions, flow, label=None): - if __debug__: - if len(quantumnumbers) != len(dimensions): - raise ValueError( - "TensorIndex.__init__: len(quantumnumbers)!=len(dimensions)") - + def __init__(self, + quantumnumbers: Iterable, + dimensions: Iterable[int], + flow: int, + label: Optional[Text] = None): try: unique = dict(zip(quantumnumbers, dimensions)) except TypeError: unique = dict(zip(map(tuple, quantumnumbers), dimensions)) - - if __debug__: - if len(unique) != len(quantumnumbers): - warnings.warn( - "in TensorIndex.__init__: found some duplicate quantum numbers; duplicates have been removed" + if len(unique) != len(quantumnumbers): + warnings.warn("removing duplicate quantum numbers") + try: + lengths = np.asarray([len(k) for k in unique.keys()]) + if not all(lengths == lenghts[0]) + raise ValueError( + "quantum number have differing lengths") + except TypeError: + if not all(list(map(np.isscalar, unique.keys()))): + raise TypeError( + "quantum numbers have mixed types") ) - - if __debug__: - try: - mask = np.asarray(list(map(len, unique.keys()))) == len( - list(unique.keys())[0]) - if not all(mask): - raise ValueError( - "in TensorIndex.__init__: found quantum number keys of differing length {0}\n all quantum number have to have identical length" - .format(list(map(len, unique.keys())))) - except TypeError: - if not all(list(map(np.isscalar, unique.keys()))): - raise TypeError( - "in TensorIndex.__init__: found quantum number keys of mixed type. all quantum numbers have to be either integers or iterables" - ) - self._data = np.array( + self.data = np.array( list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object) - self._flow = flow + self.flow = flow self.label = label def __getitem__(self, n): @@ -96,12 +79,10 @@ def setflow(self, val): raise ValueError( "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" ) - self._flow = np.sign(val) - return self + self.flow = 1 if val > 0 else -1 def rename(self, label): self.label = label - return self @property def flow(self): From 91f32a684ec62d2f0a5577422caffaaf4ee1631d Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 09:19:03 -0500 Subject: [PATCH 004/212] working in block sparisty --- tensornetwork/block_tensor/.#block_tensor.py | 1 - tensornetwork/block_tensor/block_tensor.py | 374 ++++++++++++++----- 2 files changed, 276 insertions(+), 99 deletions(-) delete mode 120000 tensornetwork/block_tensor/.#block_tensor.py diff --git a/tensornetwork/block_tensor/.#block_tensor.py b/tensornetwork/block_tensor/.#block_tensor.py deleted file mode 120000 index be400a111..000000000 --- a/tensornetwork/block_tensor/.#block_tensor.py +++ /dev/null @@ -1 +0,0 @@ -martin@Mister-Pickle.local.14868 \ No newline at end of file diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index f99bef8f8..57cb611b4 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -1,111 +1,289 @@ -import collections +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensornetwork.network_components import Node, contract, contract_between +# pylint: disable=line-too-long +from tensornetwork.backends import backend_factory + import numpy as np -import operator -import warnings -import os -import sys -#import qutilities as qutils -#import utils as cutils -import functools as fct -import copy -from typing import Iterable, Optional, Text - - -class AbelianIndex: +import itertools +from typing import List, Union, Any, Tuple, Type, Optional +Tensor = Any + + +def check_flows(flows) -> None: + if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): + raise ValueError( + "flows = {} contains values different from 1 and -1".format(flows)) + + if set(flows) == {1}: + raise ValueError("flows = {} has no outflowing index".format(flows)) + if set(flows) == {-1}: + raise ValueError("flows = {} has no inflowing index".format(flows)) + + +def fuse_quantum_numbers(q1: Union[List, np.ndarray], + q2: Union[List, np.ndarray]) -> np.ndarray: """ - An index object for creation of abelian, block-sparse tensors - `AbelianIndex` is a storage class for storing abelian quantum numbers - of a tensor index. `AbelianIndex` is a wrapper for a python `dict` - mapping quantum numbers to integers (the dimension of the block). - `AbelianIndex` can have a `flow` denoting the "flow of charge". + Fuse quantumm numbers `q1` with `q2` by simple addition (valid + for U(1) charges). `q1` and `q2` are typically two consecutive + elements of `BlockSparseTensor.quantum_numbers`. + Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns + `[10, 11, 12, 100, 101, 102]`. + When using column-major ordering of indices in `BlockSparseTensor`, + the position of q1 should be "to the left" of the position of q2. + Args: + q1: Iterable of integers + q2: Iterable of integers + Returns: + np.ndarray: The result of fusing `q1` with `q2`. """ + return np.reshape( + np.asarray(q2)[:, None] + np.asarray(q1)[None, :], + len(q1) * len(q2)) - @classmethod - def fromlist(cls, - quantumnumbers: Iterable, - dimensions: Iterable[int], - flow: int, - label: Optional[Text] = None): - if all(map(np.isscalar, quantumnumbers)): - QNs = list(quantumnumbers) - elif all(list(map(lambda x: not np.isscalar(x), quantumnumbers))): - QNs = list(map(np.asarray, - quantumnumbers)) #turn quantum numbers into np.ndarray - else: - raise TypeError("quantum numbers have inconsistent types") - return cls(QNs, dimensions, flow, label) - - def __init__(self, - quantumnumbers: Iterable, - dimensions: Iterable[int], - flow: int, - label: Optional[Text] = None): - try: - unique = dict(zip(quantumnumbers, dimensions)) - except TypeError: - unique = dict(zip(map(tuple, quantumnumbers), dimensions)) - if len(unique) != len(quantumnumbers): - warnings.warn("removing duplicate quantum numbers") - try: - lengths = np.asarray([len(k) for k in unique.keys()]) - if not all(lengths == lenghts[0]) - raise ValueError( - "quantum number have differing lengths") - except TypeError: - if not all(list(map(np.isscalar, unique.keys()))): - raise TypeError( - "quantum numbers have mixed types") - ) - self.data = np.array( - list(zip(map(np.asarray, unique.keys()), dimensions)), dtype=object) - - self.flow = flow - self.label = label - - def __getitem__(self, n): - return self._data[n[0], n[1]] - - def Q(self, n): - return self._data[n, 0] - - def D(self, n): - return self._data[n, 1] - - def __len__(self): - return self._data.shape[0] - - def setflow(self, val): - if val == 0: + +def reshape(symmetric_tensor: BlockSparseTensor, shape: Tuple[int]): + n = 0 + for s in shape: + dim = 1 + while dim != s: + dim *= symmetric_tensor.shape[n] + n += 1 + if dim > s: raise ValueError( - "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" - ) - self.flow = 1 if val > 0 else -1 + 'desired shape = {} is incompatible with the symmetric tensor shape = {}' + .format(shape, symmetric_tensor.shape)) - def rename(self, label): - self.label = label - @property - def flow(self): - return self._flow +def compute_num_nonzero(quantum_numbers: List[np.ndarray], + flows: List[Union[bool, int]]) -> int: + """ + Compute the number of non-zero elements, given the meta-data of + a symmetric tensor. + Args: + quantum_numbers: List of np.ndarray, one for each leg. + Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + The bond dimension `D[leg]` can vary on each leg, the number of + symmetries `Q` has to be the same for each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + dict: Dictionary mapping a tuple of charges to a shape tuple. + Each element corresponds to a non-zero valued block of the tensor. + """ + + if len(quantum_numbers) == 1: + return len(quantum_numbers) + net_charges = flows[0] * quantum_numbers[0] + for i in range(1, len(flows)): + net_charges = np.reshape( + flows[i] * quantum_numbers[i][:, None] + net_charges[None, :], + len(quantum_numbers[i]) * len(net_charges)) + + return len(np.nonzero(net_charges == 0)[0]) + + +def compute_nonzero_block_shapes(quantum_numbers: List[np.ndarray], + flows: List[Union[bool, int]]) -> dict: + """ + Compute the blocks and their respective shapes of a symmetric tensor, + given its meta-data. + Args: + quantum_numbers: List of np.ndarray, one for each leg. + Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + The bond dimension `D[leg]` can vary on each leg, the number of + symmetries `Q` has to be the same for each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + dict: Dictionary mapping a tuple of charges to a shape tuple. + Each element corresponds to a non-zero valued block of the tensor. + """ + check_flows(flows) + degeneracies = [] + charges = [] + rank = len(quantum_numbers) + #find the unique quantum numbers and their degeneracy on each leg + for leg in range(rank): + c, d = np.unique(quantum_numbers[leg], return_counts=True) + charges.append(c) + degeneracies.append(dict(zip(c, d))) + + #find all possible combination of leg charges c0, c1, ... + #(with one charge per leg 0, 1, ...) + #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0 + charge_combinations = list( + itertools.product( + *[charges[leg] * flows[leg] for leg in range(len(charges))])) + net_charges = np.array([np.sum(c) for c in charge_combinations]) + zero_idxs = np.nonzero(net_charges == 0)[0] + charge_shape_dict = {} + for idx in zero_idxs: + charges = charge_combinations[idx] + shapes = [ + degeneracies[leg][flows[leg] * charges[leg]] for leg in range(rank) + ] + charge_shape_dict[charges] = shapes + return charge_shape_dict + + +def retrieve_non_zero_diagonal_blocks(data: np.ndarray, + quantum_numbers: List[np.ndarray], + flows: List[Union[bool, int]]) -> dict: + """ + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `quantum_numbers` + and `flows` + quantum_numbers: List of np.ndarray, one for each leg. + Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + The bond dimension `D[leg]` can vary on each leg, the number of + symmetries `Q` has to be the same for each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + """ + if len(quantum_numbers) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + check_flows(flows) + if len(flows) != len(quantum_numbers): + raise ValueError("`len(flows)` is different from `len(quantum_numbers)`") + + row_charges = quantum_numbers[0] # a list of charges on each row + column_charges = quantum_numbers[1] # a list of charges on each column + # for each matrix column find the number of non-zero elements in it + # Note: the matrix is assumed to be symmetric, i.e. only elements where + # ingoing and outgoing charge are identical are non-zero + num_non_zero = [len(np.nonzero(row_charges == c)[0]) for c in column_charges] + + #get the unique charges + #Note: row and column unique charges are the same due to symmetry + unique_charges, row_dims = np.unique(row_charges, return_counts=True) + _, column_dims = np.unique(column_charges, return_counts=True) + + # get the degenaricies of each row and column charge + row_degeneracies = dict(zip(unique_charges, row_dims)) + column_degeneracies = dict(zip(unique_charges, column_dims)) + blocks = {} + for c in unique_charges: + start = 0 + idxs = [] + for column in range(len(column_charges)): + charge = column_charges[column] + if charge != c: + start += num_non_zero[column] + else: + idxs.extend(start + np.arange(num_non_zero[column])) - @flow.setter - def flow(self, val): - if val == 0: + blocks[c] = np.reshape(data[idxs], + (row_degeneracies[c], column_degeneracies[c])) + return blocks + + +class BlockSparseTensor: + """ + Minimal class implementation of block sparsity. + The class currently onluy supports a single U(1) symmetry. + Currently only nump.ndarray is supported. + Attributes: + * self.data: A 1d np.ndarray storing the underlying + data of the tensor + * self.quantum_numbers: A list of `np.ndarray` of shape + (D, Q), where D is the bond dimension, and Q the number + of different symmetries (this is 1 for now). + * self.flows: A list of integers of length `k`. + `self.flows` determines the flows direction of charges + on each leg of the tensor. A value of `-1` denotes + outflowing charge, a value of `1` denotes inflowing + charge. + + The tensor data is stored in self.data, a 1d np.ndarray. + """ + + def __init__(self, data: np.ndarray, quantum_numbers: List[np.ndarray], + flows: List[Union[bool, int]]) -> None: + """ + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `quantum_numbers` + and `flows` + quantum_numbers: List of np.ndarray, one for each leg. + Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + The bond dimension `D[leg]` can vary on each leg, the number of + symmetries `Q` has to be the same for each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + """ + block_dict = compute_nonzero_block_shapes(quantum_numbers, flows) + num_non_zero_elements = np.sum([np.prod(s) for s in block_dict.values()]) + + if num_non_zero_elements != len(data.flat): + raise ValueError("number of tensor elements defined " + "by `quantum_numbers` is different from" + " len(data)={}".format(len(data.flat))) + check_flows(flows) + if len(flows) != len(quantum_numbers): raise ValueError( - "TensorIndex.flow: trying to set TensorIndex._flow to 0, use positive or negative integers only" - ) - self._flow = np.sign(val) + "len(flows) = {} is different from len(quantum_numbers) = {}".format( + len(flows), len(quantum_numbers))) + self.data = np.asarray(data.flat) #do not copy data + self.flows = flows + self.quantum_numbers = quantum_numbers + + @classmethod + def randn(cls, + quantum_numbers: List[np.ndarray], + flows: List[Union[bool, int]], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a random symmetric tensor from random normal distribution. + Args: + quantum_numbers: List of np.ndarray, one for each leg. + Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + The bond dimension `D[leg]` can vary on each leg, the number of + symmetries `Q` has to be the same for each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + num_non_zero_elements = compute_num_nonzero(quantum_numbers, flows) + backend = backend_factory.get_backend('numpy') + data = backend.randn((num_non_zero_elements,), dtype=dtype) + return cls(data=data, quantum_numbers=quantum_numbers, flows=flows) @property - def shape(self): - return self._data.shape + def shape(self) -> Tuple: + return tuple([np.shape(q)[0] for q in self.quantum_numbers]) @property - def DataFrame(self): - return pd.DataFrame.from_records(data=self._data, columns=['qn', 'D']) - - def __str__(self): - print('') - print('TensorIndex, label={0}, flow={1}'.format(self.label, self.flow)) - print(self.DataFrame) - return '' + def dtype(self) -> Type[np.number]: + return self.data.dtype From 58feabc58ac38fff39e0540d6ef7469e636452c6 Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 22:08:20 -0500 Subject: [PATCH 005/212] added reshape and lots of other stuff --- tensornetwork/block_tensor/block_tensor.py | 215 +++++++++++---------- 1 file changed, 108 insertions(+), 107 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 57cb611b4..9d78479a8 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -19,7 +19,7 @@ from tensornetwork.network_components import Node, contract, contract_between # pylint: disable=line-too-long from tensornetwork.backends import backend_factory - +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index import numpy as np import itertools from typing import List, Union, Any, Tuple, Type, Optional @@ -31,54 +31,15 @@ def check_flows(flows) -> None: raise ValueError( "flows = {} contains values different from 1 and -1".format(flows)) - if set(flows) == {1}: - raise ValueError("flows = {} has no outflowing index".format(flows)) - if set(flows) == {-1}: - raise ValueError("flows = {} has no inflowing index".format(flows)) - -def fuse_quantum_numbers(q1: Union[List, np.ndarray], - q2: Union[List, np.ndarray]) -> np.ndarray: - """ - Fuse quantumm numbers `q1` with `q2` by simple addition (valid - for U(1) charges). `q1` and `q2` are typically two consecutive - elements of `BlockSparseTensor.quantum_numbers`. - Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns - `[10, 11, 12, 100, 101, 102]`. - When using column-major ordering of indices in `BlockSparseTensor`, - the position of q1 should be "to the left" of the position of q2. - Args: - q1: Iterable of integers - q2: Iterable of integers - Returns: - np.ndarray: The result of fusing `q1` with `q2`. - """ - return np.reshape( - np.asarray(q2)[:, None] + np.asarray(q1)[None, :], - len(q1) * len(q2)) - - -def reshape(symmetric_tensor: BlockSparseTensor, shape: Tuple[int]): - n = 0 - for s in shape: - dim = 1 - while dim != s: - dim *= symmetric_tensor.shape[n] - n += 1 - if dim > s: - raise ValueError( - 'desired shape = {} is incompatible with the symmetric tensor shape = {}' - .format(shape, symmetric_tensor.shape)) - - -def compute_num_nonzero(quantum_numbers: List[np.ndarray], +def compute_num_nonzero(charges: List[np.ndarray], flows: List[Union[bool, int]]) -> int: """ Compute the number of non-zero elements, given the meta-data of a symmetric tensor. Args: - quantum_numbers: List of np.ndarray, one for each leg. - Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`. The bond dimension `D[leg]` can vary on each leg, the number of symmetries `Q` has to be the same for each leg. flows: A list of integers, one for each leg, @@ -90,25 +51,25 @@ def compute_num_nonzero(quantum_numbers: List[np.ndarray], Each element corresponds to a non-zero valued block of the tensor. """ - if len(quantum_numbers) == 1: - return len(quantum_numbers) - net_charges = flows[0] * quantum_numbers[0] + if len(charges) == 1: + return len(charges) + net_charges = flows[0] * charges[0] for i in range(1, len(flows)): net_charges = np.reshape( - flows[i] * quantum_numbers[i][:, None] + net_charges[None, :], - len(quantum_numbers[i]) * len(net_charges)) + flows[i] * charges[i][:, None] + net_charges[None, :], + len(charges[i]) * len(net_charges)) return len(np.nonzero(net_charges == 0)[0]) -def compute_nonzero_block_shapes(quantum_numbers: List[np.ndarray], +def compute_nonzero_block_shapes(charges: List[np.ndarray], flows: List[Union[bool, int]]) -> dict: """ Compute the blocks and their respective shapes of a symmetric tensor, given its meta-data. Args: - quantum_numbers: List of np.ndarray, one for each leg. - Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`. The bond dimension `D[leg]` can vary on each leg, the number of symmetries `Q` has to be the same for each leg. flows: A list of integers, one for each leg, @@ -121,44 +82,44 @@ def compute_nonzero_block_shapes(quantum_numbers: List[np.ndarray], """ check_flows(flows) degeneracies = [] - charges = [] - rank = len(quantum_numbers) + unique_charges = [] + rank = len(charges) #find the unique quantum numbers and their degeneracy on each leg for leg in range(rank): - c, d = np.unique(quantum_numbers[leg], return_counts=True) - charges.append(c) + c, d = np.unique(charges[leg], return_counts=True) + unique_charges.append(c) degeneracies.append(dict(zip(c, d))) #find all possible combination of leg charges c0, c1, ... #(with one charge per leg 0, 1, ...) #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0 charge_combinations = list( - itertools.product( - *[charges[leg] * flows[leg] for leg in range(len(charges))])) + itertools.product(*[ + unique_charges[leg] * flows[leg] + for leg in range(len(unique_charges)) + ])) net_charges = np.array([np.sum(c) for c in charge_combinations]) zero_idxs = np.nonzero(net_charges == 0)[0] charge_shape_dict = {} for idx in zero_idxs: - charges = charge_combinations[idx] - shapes = [ - degeneracies[leg][flows[leg] * charges[leg]] for leg in range(rank) - ] - charge_shape_dict[charges] = shapes + c = charge_combinations[idx] + shapes = [degeneracies[leg][flows[leg] * c[leg]] for leg in range(rank)] + charge_shape_dict[c] = shapes return charge_shape_dict def retrieve_non_zero_diagonal_blocks(data: np.ndarray, - quantum_numbers: List[np.ndarray], + charges: List[np.ndarray], flows: List[Union[bool, int]]) -> dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. Args: data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `quantum_numbers` + has to match the number of non-zero elements defined by `charges` and `flows` - quantum_numbers: List of np.ndarray, one for each leg. - Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`. The bond dimension `D[leg]` can vary on each leg, the number of symmetries `Q` has to be the same for each leg. flows: A list of integers, one for each leg, @@ -166,14 +127,14 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. """ - if len(quantum_numbers) != 2: + if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") check_flows(flows) - if len(flows) != len(quantum_numbers): - raise ValueError("`len(flows)` is different from `len(quantum_numbers)`") + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") - row_charges = quantum_numbers[0] # a list of charges on each row - column_charges = quantum_numbers[1] # a list of charges on each column + row_charges = charges[0] # a list of charges on each row + column_charges = charges[1] # a list of charges on each column # for each matrix column find the number of non-zero elements in it # Note: the matrix is assumed to be symmetric, i.e. only elements where # ingoing and outgoing charge are identical are non-zero @@ -211,7 +172,7 @@ class BlockSparseTensor: Attributes: * self.data: A 1d np.ndarray storing the underlying data of the tensor - * self.quantum_numbers: A list of `np.ndarray` of shape + * self.charges: A list of `np.ndarray` of shape (D, Q), where D is the bond dimension, and Q the number of different symmetries (this is 1 for now). * self.flows: A list of integers of length `k`. @@ -223,67 +184,107 @@ class BlockSparseTensor: The tensor data is stored in self.data, a 1d np.ndarray. """ - def __init__(self, data: np.ndarray, quantum_numbers: List[np.ndarray], - flows: List[Union[bool, int]]) -> None: + def __init__(self, data: np.ndarray, indices: List[Index]) -> None: """ Args: data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `quantum_numbers` + has to match the number of non-zero elements defined by `charges` and `flows` - quantum_numbers: List of np.ndarray, one for each leg. - Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. - The bond dimension `D[leg]` can vary on each leg, the number of - symmetries `Q` has to be the same for each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. + indices: List of `Index` objecst, one for each leg. """ - block_dict = compute_nonzero_block_shapes(quantum_numbers, flows) - num_non_zero_elements = np.sum([np.prod(s) for s in block_dict.values()]) + self.indices = indices + check_flows(self.flows) + num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) if num_non_zero_elements != len(data.flat): raise ValueError("number of tensor elements defined " - "by `quantum_numbers` is different from" + "by `charges` is different from" " len(data)={}".format(len(data.flat))) - check_flows(flows) - if len(flows) != len(quantum_numbers): - raise ValueError( - "len(flows) = {} is different from len(quantum_numbers) = {}".format( - len(flows), len(quantum_numbers))) + self.data = np.asarray(data.flat) #do not copy data - self.flows = flows - self.quantum_numbers = quantum_numbers @classmethod - def randn(cls, - quantum_numbers: List[np.ndarray], - flows: List[Union[bool, int]], + def randn(cls, indices: List[Index], dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": """ Initialize a random symmetric tensor from random normal distribution. Args: - quantum_numbers: List of np.ndarray, one for each leg. - Each np.ndarray `quantum_numbers[leg]` is of shape `(D[leg], Q)`. - The bond dimension `D[leg]` can vary on each leg, the number of - symmetries `Q` has to be the same for each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. + indices: List of `Index` objecst, one for each leg. dtype: An optional numpy dtype. The dtype of the tensor Returns: BlockSparseTensor """ - num_non_zero_elements = compute_num_nonzero(quantum_numbers, flows) + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) backend = backend_factory.get_backend('numpy') data = backend.randn((num_non_zero_elements,), dtype=dtype) - return cls(data=data, quantum_numbers=quantum_numbers, flows=flows) + return cls(data=data, indices=indices) @property def shape(self) -> Tuple: - return tuple([np.shape(q)[0] for q in self.quantum_numbers]) + return tuple([i.dimension for i in self.indices]) @property def dtype(self) -> Type[np.number]: return self.data.dtype + + @property + def flows(self): + return [i.flow for i in self.indices] + + @property + def charges(self): + return [i.charges for i in self.indices] + + +def reshape(tensor: BlockSparseTensor, shape: Tuple[int]): + # a few simple checks + if np.prod(shape) != np.prod(tensor.shape): + raise ValueError("A tensor with {} elements cannot be " + "reshaped into a tensor with {} elements".format( + np.prod(tensor.shape), np.prod(shape))) + #copy indices + result = BlockSparseTensor( + data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) + + for n in range(len(shape)): + if shape[n] > result.shape[n]: + while shape[n] > result.shape[n]: + #fuse indices + i1, i2 = result.indices.pop(n), result.indices.pop(n) + #note: the resulting flow is set to one since the flow + #is multiplied into the charges. As a result the tensor + #will then be invariant in any case. + result.indices.insert(n, fuse_index_pair(i1, i2)) + if result.shape[n] > shape[n]: + elementary_indices = [] + for i in tensor.indices: + elementary_indices.extend(i.get_elementary_indices()) + raise ValueError("The shape {} is incompatible with the " + "elementary shape {} of the tensor.".format( + shape, + tuple( + [e.dimension for e in elementary_indices]))) + + elif shape[n] < result.shape[n]: + while shape[n] < result.shape[n]: + #split index at n + try: + i1, i2 = split_index(result.indices.pop(n)) + except ValueError: + elementary_indices = [] + for i in tensor.indices: + elementary_indices.extend(i.get_elementary_indices()) + raise ValueError("The shape {} is incompatible with the " + "elementary shape {} of the tensor.".format( + shape, + tuple( + [e.dimension for e in elementary_indices]))) + result.indices.insert(n, i1) + result.indices.insert(n + 1, i2) + if result.shape[n] < shape[n]: + raise ValueError( + "shape {} is incompatible with the elementary result shape".format( + shape)) + return result From 307f2dc4ed005eaf318661a166553b8ec1cb5d3f Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 22:08:41 -0500 Subject: [PATCH 006/212] added Index, an index type for symmetric tensors --- tensornetwork/block_tensor/index.py | 177 ++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 tensornetwork/block_tensor/index.py diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py new file mode 100644 index 000000000..d17203dfb --- /dev/null +++ b/tensornetwork/block_tensor/index.py @@ -0,0 +1,177 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensornetwork.network_components import Node, contract, contract_between +# pylint: disable=line-too-long +from tensornetwork.backends import backend_factory + +import numpy as np +import copy +from typing import List, Union, Any, Optional, Tuple, Text + + +class Index: + """ + An index class to store indices of a symmetric tensor. + An index keeps track of all its childs by storing references + to them (i.e. it is a binary tree). + """ + + def __init__(self, + charges: Union[List, np.ndarray], + flow: int, + name: Optional[Text] = None, + left_child: Optional["Index"] = None, + right_child: Optional["Index"] = None): + self.charges = np.asarray(charges) + self.flow = flow + self.left_child = left_child + self.right_child = right_child + self.name = name if name else 'index' + + @property + def dimension(self): + return len(self.charges) + + def _copy_helper(self, index: "Index", copied_index: "Index") -> None: + """ + Helper function for copy + """ + if index.left_child != None: + left_copy = Index( + charges=index.left_child.charges.copy(), + flow=copy.copy(index.left_child.flow), + name=index.left_child.name) + copied_index.left_child = left_copy + self._copy_helper(index.left_child, left_copy) + if index.right_child != None: + right_copy = Index( + charges=index.right_child.charges.copy(), + flow=copy.copy(index.right_child.flow), + name=index.right_child.name) + copied_index.right_child = right_copy + self._copy_helper(index.right_child, right_copy) + + def copy(self): + """ + Returns: + Index: A deep copy of `Index`. Note that all children of + `Index` are copied as well. + """ + index_copy = Index( + charges=self.charges.copy(), flow=copy.copy(self.flow), name=self.name) + + self._copy_helper(self, index_copy) + return index_copy + + def _leave_helper(self, index: "Index", leave_list: List) -> None: + if index.left_child: + self._leave_helper(index.left_child, leave_list) + if index.right_child: + self._leave_helper(index.right_child, leave_list) + if (index.left_child is None) and (index.right_child is None): + leave_list.append(index) + + def get_elementary_indices(self) -> List: + """ + Returns: + List: A list containing the elementary indices (the leaves) + of `Index`. + """ + leave_list = [] + self._leave_helper(self, leave_list) + return leave_list + + +def fuse_charges(q1: Union[List, np.ndarray], flow1: int, + q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: + """ + Fuse charges `q1` with charges `q2` by simple addition (valid + for U(1) charges). `q1` and `q2` typically belong to two consecutive + legs of `BlockSparseTensor`. + Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns + `[10, 11, 12, 100, 101, 102]`. + When using column-major ordering of indices in `BlockSparseTensor`, + the position of q1 should be "to the left" of the position of q2. + Args: + q1: Iterable of integers + flow1: Flow direction of charge `q1`. + q2: Iterable of integers + flow2: Flow direction of charge `q2`. + Returns: + np.ndarray: The result of fusing `q1` with `q2`. + """ + return np.reshape( + flow2 * np.asarray(q2)[:, None] + flow1 * np.asarray(q1)[None, :], + len(q1) * len(q2)) + + +def fuse_index_pair(left_index: Index, + right_index: Index, + flow: Optional[int] = 1) -> Index: + """ + Fuse two consecutive indices (legs) of a symmetric tensor. + Args: + left_index: A tensor Index. + right_index: A tensor Index. + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `index1` and `index2`. + """ + #Fuse the charges of the two indices + if left_index is right_index: + raise ValueError( + "index1 and index2 are the same object. Can only fuse distinct objects") + + fused_charges = fuse_charges(left_index.charges, left_index.flow, + right_index.charges, right_index.flow) + return Index( + charges=fused_charges, + flow=flow, + left_child=left_index, + right_child=right_index) + + +def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: + """ + Fuse a list of indices (legs) of a symmetric tensor. + Args: + indices: A list of tensor Index objects + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `indices`. + """ + + index = indices[0] + for n in range(1, len(indices)): + index = fuse_index_pair(index, indices[n], flow=flow) + return index + + +def split_index(index: Index) -> Tuple[Index, Index]: + """ + Split an index (leg) of a symmetric tensor into two legs. + Args: + index: A tensor Index. + Returns: + Tuple[Index, Index]: The result of splitting `index`. + """ + if (not index.left_child) or (not index.right_child): + raise ValueError("cannot split an elementary index") + + return index.left_child, index.right_child From 1ebbc7faa6868723e49c2cac88fa9239a4a7b5a2 Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 22:28:53 -0500 Subject: [PATCH 007/212] added small tutorial --- tensornetwork/block_tensor/tutorial.py | 44 ++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 tensornetwork/block_tensor/tutorial.py diff --git a/tensornetwork/block_tensor/tutorial.py b/tensornetwork/block_tensor/tutorial.py new file mode 100644 index 000000000..0cb0c5ede --- /dev/null +++ b/tensornetwork/block_tensor/tutorial.py @@ -0,0 +1,44 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import tensornetwork as tn +import numpy as np +import tensornetwork.block_tensor.block_tensor as BT +import tensornetwork.block_tensor.index as IDX + +B = 4 # possible charges on each leg can be between [0,B) +########################################################## +##### Generate a rank 4 symmetrix tensor ####### +########################################################## + +# generate random charges on each leg of the tensor +D1, D2, D3, D4 = 4, 6, 8, 10 #bond dimensions on each leg +q1 = np.random.randint(0, B, D1) +q2 = np.random.randint(0, B, D2) +q3 = np.random.randint(0, B, D3) +q4 = np.random.randint(0, B, D4) + +# generate Index objects for each leg. neccessary for initialization of +# BlockSparseTensor +i1 = IDX.Index(charges=q1, flow=1) +i2 = IDX.Index(charges=q2, flow=-1) +i3 = IDX.Index(charges=q3, flow=1) +i4 = IDX.Index(charges=q4, flow=-1) + +# initialize a random symmetric tensor +A = BT.BlockSparseTensor.randn(indices=[i1, i2, i3, i4], dtype=np.complex128) +B = BT.reshape(A, (4, 48, 10)) From 1eb3d6f63fa65267482f0f6c07c697a22c50f8c6 Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 22:29:56 -0500 Subject: [PATCH 008/212] added docstring --- tensornetwork/block_tensor/block_tensor.py | 34 +++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 9d78479a8..3ac3691c3 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -239,6 +239,39 @@ def charges(self): def reshape(tensor: BlockSparseTensor, shape: Tuple[int]): + """ + Reshape `tensor` into `shape`. + `reshape` works essentially the same as the dense version, with the + notable exception that the tensor can only be reshaped into a form + compatible with its elementary indices. The elementary indices are + the indices at the leaves of the `Index` objects `tensors.indices`. + For example, while the following reshaping is possible for regular + dense numpy tensor, + ``` + A = np.random.rand(6,6,6) + np.reshape(A, (2,3,6,6)) + ``` + the same code for BlockSparseTensor + ``` + q1 = np.random.randint(0,10,6) + q2 = np.random.randint(0,10,6) + q3 = np.random.randint(0,10,6) + i1 = Index(charges=q1,flow=1) + i2 = Index(charges=q2,flow=-1) + i3 = Index(charges=q3,flow=1) + A=BlockSparseTensor.randn(indices=[i1,i2,i3]) + print(A.shape) #prints (6,6,6) + reshape(A, (2,3,6,6)) #raises ValueError + ``` + raises a `ValueError` since (2,3,6,6) + is incompatible with the elementary shape (6,6,6) of the tensor. + + Args: + tensor: A symmetric tensor. + shape: The new shape. + Returns: + BlockSparseTensor: A new tensor reshaped into `shape` + """ # a few simple checks if np.prod(shape) != np.prod(tensor.shape): raise ValueError("A tensor with {} elements cannot be " @@ -266,7 +299,6 @@ def reshape(tensor: BlockSparseTensor, shape: Tuple[int]): shape, tuple( [e.dimension for e in elementary_indices]))) - elif shape[n] < result.shape[n]: while shape[n] < result.shape[n]: #split index at n From d25d8aa72e3502922805633e6b5b1bd3a50585c0 Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 23:23:11 -0500 Subject: [PATCH 009/212] fixed bug in retrieve_diagonal_blocks --- tensornetwork/block_tensor/block_tensor.py | 172 +++++++++++++-------- 1 file changed, 108 insertions(+), 64 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 3ac3691c3..55e78858e 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -47,8 +47,7 @@ def compute_num_nonzero(charges: List[np.ndarray], of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. Returns: - dict: Dictionary mapping a tuple of charges to a shape tuple. - Each element corresponds to a non-zero valued block of the tensor. + int: The number of non-zero elements. """ if len(charges) == 1: @@ -127,48 +126,51 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. """ + if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") check_flows(flows) if len(flows) != len(charges): raise ValueError("`len(flows)` is different from `len(charges)`") - row_charges = charges[0] # a list of charges on each row - column_charges = charges[1] # a list of charges on each column + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column # for each matrix column find the number of non-zero elements in it # Note: the matrix is assumed to be symmetric, i.e. only elements where # ingoing and outgoing charge are identical are non-zero - num_non_zero = [len(np.nonzero(row_charges == c)[0]) for c in column_charges] - + num_non_zero = [ + len(np.nonzero((row_charges + c) == 0)[0]) for c in column_charges + ] #get the unique charges - #Note: row and column unique charges are the same due to symmetry - unique_charges, row_dims = np.unique(row_charges, return_counts=True) - _, column_dims = np.unique(column_charges, return_counts=True) + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) - # get the degenaricies of each row and column charge - row_degeneracies = dict(zip(unique_charges, row_dims)) - column_degeneracies = dict(zip(unique_charges, column_dims)) + # get the degeneracies of each row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) blocks = {} - for c in unique_charges: + for c in unique_row_charges: start = 0 idxs = [] for column in range(len(column_charges)): charge = column_charges[column] - if charge != c: + if (charge + c) != 0: start += num_non_zero[column] else: idxs.extend(start + np.arange(num_non_zero[column])) - - blocks[c] = np.reshape(data[idxs], - (row_degeneracies[c], column_degeneracies[c])) + if idxs: + blocks[c] = np.reshape(data[idxs], + (row_degeneracies[c], column_degeneracies[-c])) return blocks class BlockSparseTensor: """ Minimal class implementation of block sparsity. - The class currently onluy supports a single U(1) symmetry. - Currently only nump.ndarray is supported. + The class design follows Glen's proposal (Design 0). + The class currently only supports a single U(1) symmetry + and only nump.ndarray. Attributes: * self.data: A 1d np.ndarray storing the underlying data of the tensor @@ -221,6 +223,10 @@ def randn(cls, indices: List[Index], data = backend.randn((num_non_zero_elements,), dtype=dtype) return cls(data=data, indices=indices) + @property + def rank(self): + return len(self.indices) + @property def shape(self) -> Tuple: return tuple([i.dimension for i in self.indices]) @@ -237,6 +243,88 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] + def reshape(self, shape): + """ + Reshape `tensor` into `shape` in place. + `BlockSparseTensor.reshape` works essentially the same as the dense + version, with the notable exception that the tensor can only be + reshaped into a form compatible with its elementary indices. + The elementary indices are the indices at the leaves of the `Index` + objects `tensors.indices`. + For example, while the following reshaping is possible for regular + dense numpy tensor, + ``` + A = np.random.rand(6,6,6) + np.reshape(A, (2,3,6,6)) + ``` + the same code for BlockSparseTensor + ``` + q1 = np.random.randint(0,10,6) + q2 = np.random.randint(0,10,6) + q3 = np.random.randint(0,10,6) + i1 = Index(charges=q1,flow=1) + i2 = Index(charges=q2,flow=-1) + i3 = Index(charges=q3,flow=1) + A=BlockSparseTensor.randn(indices=[i1,i2,i3]) + print(A.shape) #prints (6,6,6) + A.reshape((2,3,6,6)) #raises ValueError + ``` + raises a `ValueError` since (2,3,6,6) + is incompatible with the elementary shape (6,6,6) of the tensor. + + Args: + tensor: A symmetric tensor. + shape: The new shape. + Returns: + BlockSparseTensor: A new tensor reshaped into `shape` + """ + + # a few simple checks + if np.prod(shape) != np.prod(self.shape): + raise ValueError("A tensor with {} elements cannot be " + "reshaped into a tensor with {} elements".format( + np.prod(self.shape), np.prod(shape))) + + def raise_error(): + elementary_indices = [] + for i in self.indices: + elementary_indices.extend(i.get_elementary_indices()) + raise ValueError("The shape {} is incompatible with the " + "elementary shape {} of the tensor.".format( + shape, + tuple([e.dimension for e in elementary_indices]))) + + for n in range(len(shape)): + if shape[n] > self.shape[n]: + while shape[n] > self.shape[n]: + #fuse indices + i1, i2 = self.indices.pop(n), self.indices.pop(n) + #note: the resulting flow is set to one since the flow + #is multiplied into the charges. As a result the tensor + #will then be invariant in any case. + self.indices.insert(n, fuse_index_pair(i1, i2)) + if self.shape[n] > shape[n]: + raise_error() + elif shape[n] < self.shape[n]: + while shape[n] < self.shape[n]: + #split index at n + try: + i1, i2 = split_index(self.indices.pop(n)) + except ValueError: + raise_error() + self.indices.insert(n, i1) + self.indices.insert(n + 1, i2) + if self.shape[n] < shape[n]: + raise_error() + + def get_diagonal_blocks(self): + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + return retrieve_non_zero_diagonal_blocks( + data=self.data, charges=self.charges, flows=self.flows) + def reshape(tensor: BlockSparseTensor, shape: Tuple[int]): """ @@ -272,51 +360,7 @@ def reshape(tensor: BlockSparseTensor, shape: Tuple[int]): Returns: BlockSparseTensor: A new tensor reshaped into `shape` """ - # a few simple checks - if np.prod(shape) != np.prod(tensor.shape): - raise ValueError("A tensor with {} elements cannot be " - "reshaped into a tensor with {} elements".format( - np.prod(tensor.shape), np.prod(shape))) - #copy indices result = BlockSparseTensor( data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) - - for n in range(len(shape)): - if shape[n] > result.shape[n]: - while shape[n] > result.shape[n]: - #fuse indices - i1, i2 = result.indices.pop(n), result.indices.pop(n) - #note: the resulting flow is set to one since the flow - #is multiplied into the charges. As a result the tensor - #will then be invariant in any case. - result.indices.insert(n, fuse_index_pair(i1, i2)) - if result.shape[n] > shape[n]: - elementary_indices = [] - for i in tensor.indices: - elementary_indices.extend(i.get_elementary_indices()) - raise ValueError("The shape {} is incompatible with the " - "elementary shape {} of the tensor.".format( - shape, - tuple( - [e.dimension for e in elementary_indices]))) - elif shape[n] < result.shape[n]: - while shape[n] < result.shape[n]: - #split index at n - try: - i1, i2 = split_index(result.indices.pop(n)) - except ValueError: - elementary_indices = [] - for i in tensor.indices: - elementary_indices.extend(i.get_elementary_indices()) - raise ValueError("The shape {} is incompatible with the " - "elementary shape {} of the tensor.".format( - shape, - tuple( - [e.dimension for e in elementary_indices]))) - result.indices.insert(n, i1) - result.indices.insert(n + 1, i2) - if result.shape[n] < shape[n]: - raise ValueError( - "shape {} is incompatible with the elementary result shape".format( - shape)) + result.reshape(shape) return result From ae8cda65f1e050dff2ddc7399f5dfb5574a5c169 Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 23:31:28 -0500 Subject: [PATCH 010/212] TODO added --- tensornetwork/block_tensor/block_tensor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 55e78858e..510710901 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -49,11 +49,13 @@ def compute_num_nonzero(charges: List[np.ndarray], Returns: int: The number of non-zero elements. """ + #TODO: this is not very efficient for large bond dimensions if len(charges) == 1: return len(charges) net_charges = flows[0] * charges[0] for i in range(1, len(flows)): + print(len(net_charges)) net_charges = np.reshape( flows[i] * charges[i][:, None] + net_charges[None, :], len(charges[i]) * len(net_charges)) From bbac9c4e75ebedd41e3841b7674dd82e56f4d134 Mon Sep 17 00:00:00 2001 From: Martin Date: Fri, 29 Nov 2019 23:52:44 -0500 Subject: [PATCH 011/212] improved initialization a bit --- tensornetwork/block_tensor/block_tensor.py | 34 ++++++++++++++++++---- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 510710901..e9a9e560f 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -50,15 +50,37 @@ def compute_num_nonzero(charges: List[np.ndarray], int: The number of non-zero elements. """ #TODO: this is not very efficient for large bond dimensions - if len(charges) == 1: return len(charges) - net_charges = flows[0] * charges[0] - for i in range(1, len(flows)): - print(len(net_charges)) + + neg_flows = np.nonzero(np.asarray(flows) == -1)[0] + pos_flows = np.nonzero(np.asarray(flows) == 1)[0] + neg_max = 0 + neg_min = 0 + for i in neg_flows: + neg_max += np.max(charges[i]) + neg_min += np.min(charges[i]) + + pos_max = 0 + pos_min = 0 + for i in pos_flows: + pos_max += np.max(charges[i]) + pos_min += np.min(charges[i]) + + net_charges = charges[pos_flows[0]] + net_charges = net_charges[net_charges <= neg_max] + for i in range(1, len(pos_flows)): + net_charges = np.reshape( + charges[pos_flows[i]][:, None] + net_charges[None, :], + len(charges[pos_flows[i]]) * len(net_charges)) + net_charges = net_charges[net_charges <= neg_max] + net_charges = net_charges[net_charges >= neg_min] + + for i in range(len(neg_flows)): net_charges = np.reshape( - flows[i] * charges[i][:, None] + net_charges[None, :], - len(charges[i]) * len(net_charges)) + -1 * charges[neg_flows[i]][:, None] + net_charges[None, :], + len(charges[neg_flows[i]]) * len(net_charges)) + net_charges = net_charges[net_charges <= neg_max] return len(np.nonzero(net_charges == 0)[0]) From db828c7140bc774f68858a50394d8c2adcff0a61 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 14:16:21 -0500 Subject: [PATCH 012/212] more efficient initialization --- tensornetwork/block_tensor/block_tensor.py | 78 ++++++++++++---------- 1 file changed, 42 insertions(+), 36 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index e9a9e560f..0817119d6 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -16,10 +16,11 @@ from __future__ import division from __future__ import print_function import numpy as np -from tensornetwork.network_components import Node, contract, contract_between # pylint: disable=line-too-long +from tensornetwork.network_components import Node, contract, contract_between from tensornetwork.backends import backend_factory -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index +# pylint: disable=line-too-long +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges import numpy as np import itertools from typing import List, Union, Any, Tuple, Type, Optional @@ -38,8 +39,9 @@ def compute_num_nonzero(charges: List[np.ndarray], Compute the number of non-zero elements, given the meta-data of a symmetric tensor. Args: - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`. + charges: List of np.ndarray, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg], Q)`. The bond dimension `D[leg]` can vary on each leg, the number of symmetries `Q` has to be the same for each leg. flows: A list of integers, one for each leg, @@ -51,38 +53,42 @@ def compute_num_nonzero(charges: List[np.ndarray], """ #TODO: this is not very efficient for large bond dimensions if len(charges) == 1: - return len(charges) - - neg_flows = np.nonzero(np.asarray(flows) == -1)[0] - pos_flows = np.nonzero(np.asarray(flows) == 1)[0] - neg_max = 0 - neg_min = 0 - for i in neg_flows: - neg_max += np.max(charges[i]) - neg_min += np.min(charges[i]) - - pos_max = 0 - pos_min = 0 - for i in pos_flows: - pos_max += np.max(charges[i]) - pos_min += np.min(charges[i]) - - net_charges = charges[pos_flows[0]] - net_charges = net_charges[net_charges <= neg_max] - for i in range(1, len(pos_flows)): - net_charges = np.reshape( - charges[pos_flows[i]][:, None] + net_charges[None, :], - len(charges[pos_flows[i]]) * len(net_charges)) - net_charges = net_charges[net_charges <= neg_max] - net_charges = net_charges[net_charges >= neg_min] - - for i in range(len(neg_flows)): - net_charges = np.reshape( - -1 * charges[neg_flows[i]][:, None] + net_charges[None, :], - len(charges[neg_flows[i]]) * len(net_charges)) - net_charges = net_charges[net_charges <= neg_max] - - return len(np.nonzero(net_charges == 0)[0]) + return len(np.nonzero(charges == 0)[0]) + #get unique charges and their degeneracies on each leg + charge_degeneracies = [ + np.unique(charge, return_counts=True) for charge in charges + ] + accumulated_charges, accumulated_degeneracies = charge_degeneracies[0] + #multiply the flow into the charges of first leg + accumulated_charges *= flows[0] + for n in range(1, len(charge_degeneracies)): + #list of unique charges and list of their degeneracies + #on the next unfused leg of the tensor + leg_charge, leg_degeneracies = charge_degeneracies[n] + + #fuse the unique charges + #Note: entries in `fused_charges` are not unique anymore. + #flow1 = 1 because the flow of leg 0 has already been + #mulitplied above + fused_charges = fuse_charges( + q1=accumulated_charges, flow1=1, q2=leg_charge, flow2=flows[n]) + #compute the degeneracies of `fused_charges` charges + #fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies) + fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies) + #compute the new degeneracies resulting of fusing the vectors of unique charges + #`accumulated_charges` and `leg_charge_2` + accumulated_charges = np.unique(fused_charges) + accumulated_degeneracies = [] + for n in range(len(accumulated_charges)): + accumulated_degeneracies.append( + np.sum(fused_degeneracies[fused_charges == accumulated_charges[n]])) + + accumulated_degeneracies = np.asarray(accumulated_degeneracies) + if len(np.nonzero(accumulated_charges == 0)[0]) == 0: + raise ValueError( + "given leg-charges `charges` and flows `flows` are incompatible " + "with a symmetric tensor") + return np.sum(accumulated_degeneracies[accumulated_charges == 0]) def compute_nonzero_block_shapes(charges: List[np.ndarray], From 99204f741520ccc8ff9d662684afeec96c074580 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 14:35:03 -0500 Subject: [PATCH 013/212] just formatting --- tensornetwork/backends/numpy/numpy_backend.py | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/tensornetwork/backends/numpy/numpy_backend.py b/tensornetwork/backends/numpy/numpy_backend.py index 7d0527b83..0246d32eb 100644 --- a/tensornetwork/backends/numpy/numpy_backend.py +++ b/tensornetwork/backends/numpy/numpy_backend.py @@ -43,9 +43,8 @@ def svd_decomposition(self, max_singular_values: Optional[int] = None, max_truncation_error: Optional[float] = None ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - return decompositions.svd_decomposition(self.np, tensor, split_axis, - max_singular_values, - max_truncation_error) + return decompositions.svd_decomposition( + self.np, tensor, split_axis, max_singular_values, max_truncation_error) def qr_decomposition( self, @@ -224,16 +223,16 @@ def eigs(self, U = U.astype(dtype) return list(eta), [U[:, n] for n in range(numeig)] - def eigsh_lanczos(self, - A: Callable, - initial_state: Optional[Tensor] = None, - num_krylov_vecs: Optional[int] = 200, - numeig: Optional[int] = 1, - tol: Optional[float] = 1E-8, - delta: Optional[float] = 1E-8, - ndiag: Optional[int] = 20, - reorthogonalize: Optional[bool] = False - ) -> Tuple[List, List]: + def eigsh_lanczos( + self, + A: Callable, + initial_state: Optional[Tensor] = None, + num_krylov_vecs: Optional[int] = 200, + numeig: Optional[int] = 1, + tol: Optional[float] = 1E-8, + delta: Optional[float] = 1E-8, + ndiag: Optional[int] = 20, + reorthogonalize: Optional[bool] = False) -> Tuple[List, List]: """ Lanczos method for finding the lowest eigenvector-eigenvalue pairs of a linear operator `A`. If no `initial_state` is provided From 73a9628d82e361a2691902516c9cf3ff81dc5128 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 14:35:14 -0500 Subject: [PATCH 014/212] added random --- tensornetwork/block_tensor/block_tensor.py | 32 ++++++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 0817119d6..089cd42ff 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -20,7 +20,7 @@ from tensornetwork.network_components import Node, contract, contract_between from tensornetwork.backends import backend_factory # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies import numpy as np import itertools from typing import List, Union, Any, Tuple, Type, Optional @@ -73,8 +73,8 @@ def compute_num_nonzero(charges: List[np.ndarray], fused_charges = fuse_charges( q1=accumulated_charges, flow1=1, q2=leg_charge, flow2=flows[n]) #compute the degeneracies of `fused_charges` charges - #fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies) - fused_degeneracies = np.kron(leg_degeneracies, accumulated_degeneracies) + fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, + leg_degeneracies) #compute the new degeneracies resulting of fusing the vectors of unique charges #`accumulated_charges` and `leg_charge_2` accumulated_charges = np.unique(fused_charges) @@ -253,6 +253,32 @@ def randn(cls, indices: List[Index], data = backend.randn((num_non_zero_elements,), dtype=dtype) return cls(data=data, indices=indices) + @classmethod + def random(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a random symmetric tensor from random normal distribution. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + dtype = dtype if dtype is not None else self.np.float64 + + def init_random(): + if ((np.dtype(dtype) is np.dtype(np.complex128)) or + (np.dtype(dtype) is np.dtype(np.complex64))): + return np.random.rand(num_non_zero_elements).astype( + dtype) - 0.5 + 1j * ( + np.random.rand(num_non_zero_elements).astype(dtype) - 0.5) + return np.random.randn(num_non_zero_elements).astype(dtype) - 0.5 + + return cls(data=init_random(), indices=indices) + @property def rank(self): return len(self.indices) From efa64a49b439efd599e854a0c2f613c4a4258935 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 14:35:46 -0500 Subject: [PATCH 015/212] added fuse_degeneracies --- tensornetwork/block_tensor/index.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index d17203dfb..327734123 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -121,6 +121,27 @@ def fuse_charges(q1: Union[List, np.ndarray], flow1: int, len(q1) * len(q2)) +def fuse_degeneracies(degen1: Union[List, np.ndarray], + degen2: Union[List, np.ndarray]) -> np.ndarray: + """ + Fuse degeneracies `degen1` and `degen2` of two leg-charges + by simple kronecker product. `degen1` and `degen2` typically belong to two + consecutive legs of `BlockSparseTensor`. + Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns + `[10, 11, 12, 100, 101, 102]`. + When using column-major ordering of indices in `BlockSparseTensor`, + the position of q1 should be "to the left" of the position of q2. + Args: + q1: Iterable of integers + flow1: Flow direction of charge `q1`. + q2: Iterable of integers + flow2: Flow direction of charge `q2`. + Returns: + np.ndarray: The result of fusing `q1` with `q2`. + """ + return np.kron(degen2, degen1) + + def fuse_index_pair(left_index: Index, right_index: Index, flow: Optional[int] = 1) -> Index: From 76191627e3ee7cfaf53a9702a8f61d5cdc24151a Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 14:57:27 -0500 Subject: [PATCH 016/212] fixed bug in reshape --- tensornetwork/block_tensor/block_tensor.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 089cd42ff..c54efc950 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -341,14 +341,23 @@ def reshape(self, shape): "reshaped into a tensor with {} elements".format( np.prod(self.shape), np.prod(shape))) + #keep a copy of the old indices for the case where reshaping fails + #FIXME: this is pretty hacky! + index_copy = [i.copy() for i in self.indices] + def raise_error(): + #if this error is raised `shape` is incompatible + #with the elementary indices. We have to reset them + #to the original. + self.indices = index_copy elementary_indices = [] for i in self.indices: elementary_indices.extend(i.get_elementary_indices()) - raise ValueError("The shape {} is incompatible with the " - "elementary shape {} of the tensor.".format( - shape, - tuple([e.dimension for e in elementary_indices]))) + print(elementary_indices) + raise ValueError("The shape {} is incompatible with the " + "elementary shape {} of the tensor.".format( + shape, + tuple([e.dimension for e in elementary_indices]))) for n in range(len(shape)): if shape[n] > self.shape[n]: From 2be30a9be5c6b2d187864609aa24ccb3c752b92c Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 15:45:25 -0500 Subject: [PATCH 017/212] dosctring, typing --- tensornetwork/block_tensor/block_tensor.py | 61 +++++++++++++++++++--- 1 file changed, 55 insertions(+), 6 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index c54efc950..05cf647aa 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -23,7 +23,8 @@ from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies import numpy as np import itertools -from typing import List, Union, Any, Tuple, Type, Optional +import time +from typing import List, Union, Any, Tuple, Type, Optional, Dict Tensor = Any @@ -92,7 +93,7 @@ def compute_num_nonzero(charges: List[np.ndarray], def compute_nonzero_block_shapes(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> dict: + flows: List[Union[bool, int]]) -> Dict: """ Compute the blocks and their respective shapes of a symmetric tensor, given its meta-data. @@ -139,10 +140,11 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], def retrieve_non_zero_diagonal_blocks(data: np.ndarray, charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> dict: + flows: List[Union[bool, int]]) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. + !!!!!!!!! This is currently very slow!!!!!!!!!!!! Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -156,6 +158,37 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. """ + #TODO: this is currently way too slow!!!! + #Run the following benchmark for testing (typical MPS use case) + #retrieving the blocks is ~ 10 times as slow as mulitplying all of them + + # D=4000 + # B=10 + # q1 = np.random.randint(0,B,D) + # q2 = np.asarray([0,1]) + # q3 = np.random.randint(0,B,D) + # i1 = Index(charges=q1,flow=1) + # i2 = Index(charges=q2,flow=1) + # i3 = Index(charges=q3,flow=-1) + # indices=[i1,i2,i3] + # A=BT.BlockSparseTensor.random(indices=indices, dtype=np.complex128) + # ts = [] + # A.reshape((D*2, D)) + # def multiply_blocks(blocks): + # for b in blocks.values(): + # np.dot(b.T, b) + # t1s=[] + # t2s=[] + # for n in range(10): + # print(n) + # t1 = time.time() + # b = A.get_diagonal_blocks() + # t1s.append(time.time() - t1) + # t1 = time.time() + # multiply_blocks(b) + # t2s.append(time.time() - t1) + # print('average retrieval time', np.average(t1s)) + # print('average multiplication time',np.average(t2s)) if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") @@ -180,9 +213,12 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) blocks = {} + for c in unique_row_charges: start = 0 idxs = [] + #TODO: this for loop can be replaced with something + #more sophisticated (i.e. using numpy lookups and sums) for column in range(len(column_charges)): charge = column_charges[column] if (charge + c) != 0: @@ -190,7 +226,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, else: idxs.extend(start + np.arange(num_non_zero[column])) if idxs: - blocks[c] = np.reshape(data[idxs], + blocks[c] = np.reshape(data[np.asarray(idxs)], (row_degeneracies[c], column_degeneracies[-c])) return blocks @@ -299,6 +335,13 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] + def transpose(self, order): + """ + Transpose the tensor into the new order `order` + + """ + raise NotImplementedError('transpose is not implemented!!') + def reshape(self, shape): """ Reshape `tensor` into `shape` in place. @@ -382,7 +425,13 @@ def raise_error(): if self.shape[n] < shape[n]: raise_error() - def get_diagonal_blocks(self): + def get_diagonal_blocks(self) -> Dict: + """ + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + """ if self.rank != 2: raise ValueError( "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" @@ -391,7 +440,7 @@ def get_diagonal_blocks(self): data=self.data, charges=self.charges, flows=self.flows) -def reshape(tensor: BlockSparseTensor, shape: Tuple[int]): +def reshape(tensor: BlockSparseTensor, shape: Tuple[int]) -> BlockSparseTensor: """ Reshape `tensor` into `shape`. `reshape` works essentially the same as the dense version, with the From 742824f1c2a63211f66df5444ce6a453eba1ce66 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 15:57:49 -0500 Subject: [PATCH 018/212] removed TODO --- tensornetwork/block_tensor/block_tensor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 05cf647aa..7b83a2778 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -52,7 +52,6 @@ def compute_num_nonzero(charges: List[np.ndarray], Returns: int: The number of non-zero elements. """ - #TODO: this is not very efficient for large bond dimensions if len(charges) == 1: return len(np.nonzero(charges == 0)[0]) #get unique charges and their degeneracies on each leg From 2e6c3957b30a68494e4f23f64de75667468e11d4 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 15:58:35 -0500 Subject: [PATCH 019/212] removed confusing code line --- tensornetwork/block_tensor/block_tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 7b83a2778..6dacc7330 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -88,7 +88,7 @@ def compute_num_nonzero(charges: List[np.ndarray], raise ValueError( "given leg-charges `charges` and flows `flows` are incompatible " "with a symmetric tensor") - return np.sum(accumulated_degeneracies[accumulated_charges == 0]) + return accumulated_degeneracies[accumulated_charges == 0] def compute_nonzero_block_shapes(charges: List[np.ndarray], From ab13d4a24573eca1eab7e6f997aaf3cc27844278 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 15:59:14 -0500 Subject: [PATCH 020/212] bug removed --- tensornetwork/block_tensor/block_tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 6dacc7330..b768a918e 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -88,7 +88,7 @@ def compute_num_nonzero(charges: List[np.ndarray], raise ValueError( "given leg-charges `charges` and flows `flows` are incompatible " "with a symmetric tensor") - return accumulated_degeneracies[accumulated_charges == 0] + return accumulated_degeneracies[accumulated_charges == 0][0] def compute_nonzero_block_shapes(charges: List[np.ndarray], From d375b1d6744d6e1fb5b25f804ef62fe030538954 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 1 Dec 2019 16:41:13 -0500 Subject: [PATCH 021/212] comment --- tensornetwork/block_tensor/block_tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index b768a918e..154ae4993 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -159,7 +159,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, """ #TODO: this is currently way too slow!!!! #Run the following benchmark for testing (typical MPS use case) - #retrieving the blocks is ~ 10 times as slow as mulitplying all of them + #retrieving the blocks is ~ 10 times as slow as multiplying them # D=4000 # B=10 From 2727cd07797768b065b2d7e83960be7d36030fcc Mon Sep 17 00:00:00 2001 From: Martin Date: Mon, 2 Dec 2019 09:21:15 -0500 Subject: [PATCH 022/212] added __mul__ to Index --- tensornetwork/block_tensor/index.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 327734123..1549a422e 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -97,6 +97,15 @@ def get_elementary_indices(self) -> List: self._leave_helper(self, leave_list) return leave_list + def __mul__(self, index: "Index") -> "Index": + """ + Merge `index` and self into a single larger index. + The flow of the resulting index is set to 1. + Flows of `self` and `index` are multiplied into + the charges upon fusing. + """ + return fuse_index_pair(self, index) + def fuse_charges(q1: Union[List, np.ndarray], flow1: int, q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: From 283e36478b8cec4c037c0c6d40d47e8b5eb7ed14 Mon Sep 17 00:00:00 2001 From: Martin Date: Mon, 2 Dec 2019 09:21:39 -0500 Subject: [PATCH 023/212] added sparse_shape and updated reshape to accept both int and Index lists --- tensornetwork/block_tensor/block_tensor.py | 79 ++++++++++++++++------ 1 file changed, 60 insertions(+), 19 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 154ae4993..225dacc45 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -24,7 +24,7 @@ import numpy as np import itertools import time -from typing import List, Union, Any, Tuple, Type, Optional, Dict +from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable Tensor = Any @@ -170,8 +170,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, # i2 = Index(charges=q2,flow=1) # i3 = Index(charges=q3,flow=-1) # indices=[i1,i2,i3] - # A=BT.BlockSparseTensor.random(indices=indices, dtype=np.complex128) - # ts = [] + # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128) # A.reshape((D*2, D)) # def multiply_blocks(blocks): # for b in blocks.values(): @@ -235,7 +234,7 @@ class BlockSparseTensor: Minimal class implementation of block sparsity. The class design follows Glen's proposal (Design 0). The class currently only supports a single U(1) symmetry - and only nump.ndarray. + and only numpy.ndarray. Attributes: * self.data: A 1d np.ndarray storing the underlying data of the tensor @@ -318,8 +317,40 @@ def init_random(): def rank(self): return len(self.indices) + #TODO: we should consider to switch the names + #`BlockSparseTensor.sparse_shape` and `BlockSparseTensor.shape`, + #i.e. have `BlockSparseTensor.shape`return the sparse shape of the tensor. + #This may be more convenient for building tensor-type and backend + #agnostic code. For example, in MPS code we essentially never + #explicitly set a shape to a certain value (apart from initialization). + #That is, code like this + #``` + #tensor = np.random.rand(10,10,10) + #``` + #is never used. Rather one inquires shapes of tensors and + #multiplies them to get new shapes: + #``` + #new_tensor = reshape(tensor, [tensor.shape[0]*tensor.shape[1], tensor.shape[2]]) + #``` + #Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly + #(apart from debugging). + @property + def sparse_shape(self) -> Tuple: + """ + The sparse shape of the tensor. + Returns a copy of self.indices. Note that copying + can be relatively expensive for deeply nested indices. + Returns: + Tuple: A tuple of `Index` objects. + """ + + return tuple([i.copy() for i in self.indices]) + @property def shape(self) -> Tuple: + """ + The dense shape of the tensor. + """ return tuple([i.dimension for i in self.indices]) @property @@ -339,9 +370,10 @@ def transpose(self, order): Transpose the tensor into the new order `order` """ + raise NotImplementedError('transpose is not implemented!!') - def reshape(self, shape): + def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: """ Reshape `tensor` into `shape` in place. `BlockSparseTensor.reshape` works essentially the same as the dense @@ -372,16 +404,23 @@ def reshape(self, shape): Args: tensor: A symmetric tensor. - shape: The new shape. + shape: The new shape. Can either be a list of `Index` + or a list of `int`. Returns: BlockSparseTensor: A new tensor reshaped into `shape` """ - + dense_shape = [] + for s in shape: + if isinstance(s, Index): + dense_shape.append(s.dimension) + else: + dense_shape.append(s) # a few simple checks - if np.prod(shape) != np.prod(self.shape): + + if np.prod(dense_shape) != np.prod(self.shape): raise ValueError("A tensor with {} elements cannot be " "reshaped into a tensor with {} elements".format( - np.prod(self.shape), np.prod(shape))) + np.prod(self.shape), np.prod(dense_shape))) #keep a copy of the old indices for the case where reshaping fails #FIXME: this is pretty hacky! @@ -398,22 +437,22 @@ def raise_error(): print(elementary_indices) raise ValueError("The shape {} is incompatible with the " "elementary shape {} of the tensor.".format( - shape, + dense_shape, tuple([e.dimension for e in elementary_indices]))) - for n in range(len(shape)): - if shape[n] > self.shape[n]: - while shape[n] > self.shape[n]: + for n in range(len(dense_shape)): + if dense_shape[n] > self.shape[n]: + while dense_shape[n] > self.shape[n]: #fuse indices i1, i2 = self.indices.pop(n), self.indices.pop(n) #note: the resulting flow is set to one since the flow #is multiplied into the charges. As a result the tensor #will then be invariant in any case. self.indices.insert(n, fuse_index_pair(i1, i2)) - if self.shape[n] > shape[n]: + if self.shape[n] > dense_shape[n]: raise_error() - elif shape[n] < self.shape[n]: - while shape[n] < self.shape[n]: + elif dense_shape[n] < self.shape[n]: + while dense_shape[n] < self.shape[n]: #split index at n try: i1, i2 = split_index(self.indices.pop(n)) @@ -421,7 +460,7 @@ def raise_error(): raise_error() self.indices.insert(n, i1) self.indices.insert(n + 1, i2) - if self.shape[n] < shape[n]: + if self.shape[n] < dense_shape[n]: raise_error() def get_diagonal_blocks(self) -> Dict: @@ -439,7 +478,8 @@ def get_diagonal_blocks(self) -> Dict: data=self.data, charges=self.charges, flows=self.flows) -def reshape(tensor: BlockSparseTensor, shape: Tuple[int]) -> BlockSparseTensor: +def reshape(tensor: BlockSparseTensor, + shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: """ Reshape `tensor` into `shape`. `reshape` works essentially the same as the dense version, with the @@ -469,7 +509,8 @@ def reshape(tensor: BlockSparseTensor, shape: Tuple[int]) -> BlockSparseTensor: Args: tensor: A symmetric tensor. - shape: The new shape. + shape: The new shape. Can either be a list of `Index` + or a list of `int`. Returns: BlockSparseTensor: A new tensor reshaped into `shape` """ From 7328ad406561e72359d89dd04521583b9fb360dc Mon Sep 17 00:00:00 2001 From: Martin Date: Mon, 2 Dec 2019 09:27:19 -0500 Subject: [PATCH 024/212] more in tutorial --- tensornetwork/block_tensor/tutorial.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/tutorial.py b/tensornetwork/block_tensor/tutorial.py index 0cb0c5ede..fe824ee87 100644 --- a/tensornetwork/block_tensor/tutorial.py +++ b/tensornetwork/block_tensor/tutorial.py @@ -41,4 +41,17 @@ # initialize a random symmetric tensor A = BT.BlockSparseTensor.randn(indices=[i1, i2, i3, i4], dtype=np.complex128) -B = BT.reshape(A, (4, 48, 10)) +B = BT.reshape(A, (4, 48, 10)) #creates a new tensor (copy) +shape_A = A.shape #returns the dense shape of A +A.reshape([shape_A[0] * shape_A[1], shape_A[2], + shape_A[3]]) #in place reshaping +A.reshape(shape_A) #reshape back into original shape + +sparse_shape = A.sparse_shape #returns a copy of `A.indices`. Each `Index` object is copied + +new_sparse_shape = [ + sparse_shape[0] * sparse_shape[1], sparse_shape[2], sparse_shape[3] +] +B = BT.reshape(A, new_sparse_shape) #return a copy +A.reshape(new_sparse_shape) #in place reshaping +A.reshape(sparse_shape) #bring A back into original shape From e5b614772e53253448c224bd2ea9b36429724bff Mon Sep 17 00:00:00 2001 From: Martin Date: Mon, 2 Dec 2019 09:28:06 -0500 Subject: [PATCH 025/212] comment --- tensornetwork/block_tensor/tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/tutorial.py b/tensornetwork/block_tensor/tutorial.py index fe824ee87..01e5eabf0 100644 --- a/tensornetwork/block_tensor/tutorial.py +++ b/tensornetwork/block_tensor/tutorial.py @@ -47,7 +47,7 @@ shape_A[3]]) #in place reshaping A.reshape(shape_A) #reshape back into original shape -sparse_shape = A.sparse_shape #returns a copy of `A.indices`. Each `Index` object is copied +sparse_shape = A.sparse_shape #returns a deep copy of `A.indices`. new_sparse_shape = [ sparse_shape[0] * sparse_shape[1], sparse_shape[2], sparse_shape[3] From eb91c7942a29dfa11fc5b62b5a8872354196f479 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Mon, 2 Dec 2019 14:05:11 -0500 Subject: [PATCH 026/212] added new test function --- tensornetwork/block_tensor/block_tensor.py | 81 ++++++++++++++++++---- 1 file changed, 68 insertions(+), 13 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 225dacc45..7c6c2b499 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -40,11 +40,10 @@ def compute_num_nonzero(charges: List[np.ndarray], Compute the number of non-zero elements, given the meta-data of a symmetric tensor. Args: - charges: List of np.ndarray, one for each leg of the + charges: List of np.ndarray of int, one for each leg of the underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg], Q)`. - The bond dimension `D[leg]` can vary on each leg, the number of - symmetries `Q` has to be the same for each leg. + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. flows: A list of integers, one for each leg, with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing @@ -98,9 +97,8 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], given its meta-data. Args: charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`. - The bond dimension `D[leg]` can vary on each leg, the number of - symmetries `Q` has to be the same for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. flows: A list of integers, one for each leg, with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing @@ -149,9 +147,8 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, has to match the number of non-zero elements defined by `charges` and `flows` charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg], Q)`. - The bond dimension `D[leg]` can vary on each leg, the number of - symmetries `Q` has to be the same for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. flows: A list of integers, one for each leg, with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing @@ -229,6 +226,64 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, return blocks +def retrieve_non_zero_diagonal_blocks_test(data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]] + ) -> Dict: + """ + Testing function, does the same as `retrieve_non_zero_diagonal_blocks`, + but should be faster + """ + + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + #a 1d array of the net charges. + net_charges = fuse_charges( + q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1]) + #a 1d array containing row charges added with zero column charges + #used to find the positions of the unique charges + tmp = fuse_charges( + q1=charges[0], + flow1=flows[0], + q2=np.zeros(charges[1].shape[0], dtype=charges[1].dtype), + flow2=1) + unique_charges = np.unique(charges[0] * flows[0]) + symmetric_indices = net_charges == 0 + charge_lookup = tmp[symmetric_indices] + blocks = {} + for c in unique_charges: + blocks[c] = data[charge_lookup == c] + return blocks + + +def compute_mapping_table(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> int: + """ + Compute a mapping table mapping the linear positions of the non-zero + elements to their multi-index label. + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` + the rank of the tensor. + """ + tables = np.meshgrid([np.arange(c.shape[0]) for c in charges], indexing='ij') + tables = tables[::-1] #reverse the order + pass + + class BlockSparseTensor: """ Minimal class implementation of block sparsity. @@ -239,8 +294,9 @@ class BlockSparseTensor: * self.data: A 1d np.ndarray storing the underlying data of the tensor * self.charges: A list of `np.ndarray` of shape - (D, Q), where D is the bond dimension, and Q the number - of different symmetries (this is 1 for now). + (D,), where D is the bond dimension. Once we go beyond + a single U(1) symmetry, this has to be updated. + * self.flows: A list of integers of length `k`. `self.flows` determines the flows direction of charges on each leg of the tensor. A value of `-1` denotes @@ -368,7 +424,6 @@ def charges(self): def transpose(self, order): """ Transpose the tensor into the new order `order` - """ raise NotImplementedError('transpose is not implemented!!') From a544dbc719d10a85cd6646fd3aad5821a4dccf55 Mon Sep 17 00:00:00 2001 From: Martin Date: Mon, 2 Dec 2019 15:15:15 -0500 Subject: [PATCH 027/212] testing function hacking --- tensornetwork/block_tensor/block_tensor.py | 40 +++++++++++++--------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 7c6c2b499..2df966a77 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -226,13 +226,11 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, return blocks -def retrieve_non_zero_diagonal_blocks_test(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]] - ) -> Dict: +def retrieve_non_zero_diagonal_blocks_test( + data: np.ndarray, charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> Dict: """ - Testing function, does the same as `retrieve_non_zero_diagonal_blocks`, - but should be faster + Testing function, does the same as `retrieve_non_zero_diagonal_blocks`. """ if len(charges) != 2: @@ -241,22 +239,32 @@ def retrieve_non_zero_diagonal_blocks_test(data: np.ndarray, if len(flows) != len(charges): raise ValueError("`len(flows)` is different from `len(charges)`") + #get the unique charges + unique_row_charges, row_dims = np.unique( + flows[0] * charges[0], return_counts=True) + unique_column_charges, column_dims = np.unique( + flows[1] * charges[1], return_counts=True) + #a 1d array of the net charges. net_charges = fuse_charges( q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1]) #a 1d array containing row charges added with zero column charges - #used to find the positions of the unique charges - tmp = fuse_charges( - q1=charges[0], - flow1=flows[0], - q2=np.zeros(charges[1].shape[0], dtype=charges[1].dtype), - flow2=1) - unique_charges = np.unique(charges[0] * flows[0]) + #used to find the indices of in data corresponding to a given charge + #(see below) + tmp = np.tile(charges[0] * flows[0], len(charges[1])) + symmetric_indices = net_charges == 0 charge_lookup = tmp[symmetric_indices] + + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) blocks = {} - for c in unique_charges: - blocks[c] = data[charge_lookup == c] + + common_charges = np.intersect1d(unique_row_charges, -unique_column_charges) + for c in common_charges: + blocks[c] = np.reshape(data[charge_lookup == c], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks @@ -281,7 +289,7 @@ def compute_mapping_table(charges: List[np.ndarray], """ tables = np.meshgrid([np.arange(c.shape[0]) for c in charges], indexing='ij') tables = tables[::-1] #reverse the order - pass + raise NotImplementedError() class BlockSparseTensor: From 0457cca404c6a40ec3db7e52022139204bd67bca Mon Sep 17 00:00:00 2001 From: Martin Date: Mon, 2 Dec 2019 15:16:30 -0500 Subject: [PATCH 028/212] docstring --- tensornetwork/block_tensor/block_tensor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 2df966a77..4de358f2c 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -231,6 +231,7 @@ def retrieve_non_zero_diagonal_blocks_test( flows: List[Union[bool, int]]) -> Dict: """ Testing function, does the same as `retrieve_non_zero_diagonal_blocks`. + This is very slow for high rank tensors with many blocks """ if len(charges) != 2: From 95958a740d387693bb65766f606c4ce6839bbd8e Mon Sep 17 00:00:00 2001 From: Martin Date: Tue, 3 Dec 2019 14:33:58 -0500 Subject: [PATCH 029/212] small speed up --- tensornetwork/block_tensor/block_tensor.py | 33 +++++++++++++--------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 4de358f2c..73e1063b0 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -193,33 +193,36 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, row_charges = flows[0] * charges[0] # a list of charges on each row column_charges = flows[1] * charges[1] # a list of charges on each column - # for each matrix column find the number of non-zero elements in it - # Note: the matrix is assumed to be symmetric, i.e. only elements where - # ingoing and outgoing charge are identical are non-zero - num_non_zero = [ - len(np.nonzero((row_charges + c) == 0)[0]) for c in column_charges - ] + #get the unique charges unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) unique_column_charges, column_dims = np.unique( column_charges, return_counts=True) + common_charges = np.intersect1d(flows[0] * unique_row_charges, + flows[1] * unique_column_charges) + + # for each matrix column find the number of non-zero elements in it + # Note: the matrix is assumed to be symmetric, i.e. only elements where + # ingoing and outgoing charge are identical are non-zero # get the degeneracies of each row and column charge row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) blocks = {} - - for c in unique_row_charges: + #TODO: the nested loops could probably be easily moved to cython + for c in common_charges: start = 0 idxs = [] #TODO: this for loop can be replaced with something #more sophisticated (i.e. using numpy lookups and sums) for column in range(len(column_charges)): charge = column_charges[column] + if charge not in common_charges: + continue if (charge + c) != 0: - start += num_non_zero[column] + start += row_degeneracies[c] else: - idxs.extend(start + np.arange(num_non_zero[column])) + idxs.extend(start + np.arange(row_degeneracies[c])) if idxs: blocks[c] = np.reshape(data[np.asarray(idxs)], (row_degeneracies[c], column_degeneracies[-c])) @@ -230,10 +233,12 @@ def retrieve_non_zero_diagonal_blocks_test( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]]) -> Dict: """ - Testing function, does the same as `retrieve_non_zero_diagonal_blocks`. - This is very slow for high rank tensors with many blocks + For testing purposes. Produces the same output as `retrieve_non_zero_diagonal_blocks`, + but computes it in a different way. + This is currently very slow for high rank tensors with many blocks, but can be faster than + `retrieve_non_zero_diagonal_blocks` in certain other cases. + It's pretty memory heavy too. """ - if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") check_flows(flows) @@ -247,11 +252,13 @@ def retrieve_non_zero_diagonal_blocks_test( flows[1] * charges[1], return_counts=True) #a 1d array of the net charges. + #this can use a lot of memory net_charges = fuse_charges( q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1]) #a 1d array containing row charges added with zero column charges #used to find the indices of in data corresponding to a given charge #(see below) + #this can be very large tmp = np.tile(charges[0] * flows[0], len(charges[1])) symmetric_indices = net_charges == 0 From ac3d980dcfdd74da0fb1fe0499afaaa29fe0a9dc Mon Sep 17 00:00:00 2001 From: Cutter Coryell <14116109+coryell@users.noreply.github.com> Date: Tue, 3 Dec 2019 12:34:50 -0800 Subject: [PATCH 030/212] Remove gui directory (migrated to another repo) (#399) --- gui/README.md | 5 - gui/css/index.css | 78 ------- gui/index.html | 39 ---- gui/js/app.js | 46 ---- gui/js/edge.js | 88 ------- gui/js/initialState.js | 78 ------- gui/js/mixins.js | 80 ------- gui/js/node.js | 344 --------------------------- gui/js/output.js | 84 ------- gui/js/toolbar.js | 519 ----------------------------------------- gui/js/workspace.js | 182 --------------- 11 files changed, 1543 deletions(-) delete mode 100644 gui/README.md delete mode 100644 gui/css/index.css delete mode 100644 gui/index.html delete mode 100644 gui/js/app.js delete mode 100644 gui/js/edge.js delete mode 100644 gui/js/initialState.js delete mode 100644 gui/js/mixins.js delete mode 100644 gui/js/node.js delete mode 100644 gui/js/output.js delete mode 100644 gui/js/toolbar.js delete mode 100644 gui/js/workspace.js diff --git a/gui/README.md b/gui/README.md deleted file mode 100644 index 45c410a62..000000000 --- a/gui/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# TensorNetwork GUI - -⚠️ **UNDER CONSTRUCTION** 🏗️ - -A graphical interface for defining tensor networks. Compiles to TensorNetwork Python code. diff --git a/gui/css/index.css b/gui/css/index.css deleted file mode 100644 index 1dcce637d..000000000 --- a/gui/css/index.css +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2019 The TensorNetwork Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -.app { - display: flex; - flex-direction: row; - font: normal 15px sans-serif; -} - -svg.workspace { - float: left; - background-color: #f9f9f9; -} - -svg.workspace .drag-selector { - stroke: #fff; - stroke-width: 2; - fill: rgba(200, 200, 200, 0.5); -} - -a.export { - position: absolute; -} - -svg text { - user-select: none; - -moz-user-select: none; - -ms-user-select: none; - -webkit-user-select: none; -} - -.toolbar { - width: 300px; - background-color: #fff; - box-shadow: 0 1px 3px rgba(0,0,0,0.1), 0 1px 2px rgba(0,0,0,0.2); -} - -section { - padding: 10px 20px; - border-bottom: 1px solid #ddd; -} - -.tensor-creator .svg-container { - height: 200px; -} - -.delete { - text-align: right; - float: right; - color: darkred; -} - -.button-holder { - padding: 20px 0; -} - -.code-output { - position: absolute; - top: 600px; - width: 900px; - padding: 10px; -} - -label { - padding: 10px; -} \ No newline at end of file diff --git a/gui/index.html b/gui/index.html deleted file mode 100644 index f235bcc6a..000000000 --- a/gui/index.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - - - - - - - - - - - TensorNetwork GUI - - - - -
- - - - - - - - - - - - - - diff --git a/gui/js/app.js b/gui/js/app.js deleted file mode 100644 index 71c8dc85b..000000000 --- a/gui/js/app.js +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -let app = new Vue({ - el: '#app', - data: { - state: initialState // now state object is reactive, whereas initialState is not - }, - methods: { - exportSVG: function(event) { - event.preventDefault(); - let serializer = new XMLSerializer(); - let workspace = document.getElementById('workspace'); - let blob = new Blob([serializer.serializeToString(workspace)], {type:"image/svg+xml;charset=utf-8"}); - let url = URL.createObjectURL(blob); - let link = document.createElement('a'); - link.href = url; - link.download = "export.svg"; - document.body.appendChild(link); - link.click(); - document.body.removeChild(link); - } - }, - template: ` -
-
- - Export SVG - -
- -
- - ` -}); diff --git a/gui/js/edge.js b/gui/js/edge.js deleted file mode 100644 index 1e41f6189..000000000 --- a/gui/js/edge.js +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -Vue.component( - 'edge', - { - mixins: [mixinGet, mixinGeometry], - props: { - edge: Array, - state: Object - }, - computed: { - node1: function() { - return this.getNode(this.edge[0][0]); - }, - node2: function() { - return this.getNode(this.edge[1][0]); - }, - angle1: function() { - return this.node1.axes[this.edge[0][1]].angle; - }, - angle2: function() { - return this.node2.axes[this.edge[1][1]].angle; - }, - x1: function() { - return this.node1.position.x + this.getAxisPoints(this.node1.axes[this.edge[0][1]].position, this.angle1, this.node1.rotation).x2; - }, - y1: function() { - return this.node1.position.y + this.getAxisPoints(this.node1.axes[this.edge[0][1]].position, this.angle1, this.node1.rotation).y2; - }, - x2: function() { - return this.node2.position.x + this.getAxisPoints(this.node2.axes[this.edge[1][1]].position, this.angle2, this.node2.rotation).x2; - }, - y2: function() { - return this.node2.position.y + this.getAxisPoints(this.node2.axes[this.edge[1][1]].position, this.angle2, this.node2.rotation).y2; - } - }, - template: ` - - - - {{edge[2]}} - - - ` - } -); - -Vue.component( - 'proto-edge', - { - mixins: [mixinGeometry], - props: { - x: Number, - y: Number, - node: Object, - axis: Number, - }, - computed: { - angle: function() { - return this.node.axes[this.axis].angle; - }, - x0: function() { - return this.node.position.x + this.getAxisPoints(this.node.axes[this.axis].position, this.angle, this.node.rotation).x2; - }, - y0: function() { - return this.node.position.y + this.getAxisPoints(this.node.axes[this.axis].position, this.angle, this.node.rotation).y2; - } - }, - template: ` - - ` - } -); diff --git a/gui/js/initialState.js b/gui/js/initialState.js deleted file mode 100644 index 1bca1533b..000000000 --- a/gui/js/initialState.js +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -let initialState = { - renderLaTeX: true, - selectedNodes: [], - draggingNode: false, - nodes: [ - { - name: 't1', - displayName: 't_1', - size: [1, 1], - axes: [ - {name: null, angle: 0, position: [0, 0]}, - {name: null, angle: Math.PI / 2, position: [0, 0]}, - {name: null, angle: Math.PI, position: [0, 0]}, - ], - position: {x: 200, y: 300}, - rotation: 0, - hue: 30 - }, - { - name: 't2', - displayName: 't_2', - size: [1, 1], - axes: [ - {name: null, angle: 0, position: [0, 0]}, - {name: null, angle: Math.PI / 2, position: [0, 0]}, - {name: null, angle: Math.PI, position: [0, 0]}, - ], - position: {x: 367, y: 300}, - rotation: 0, - hue: 30 - }, - { - name: 't3', - displayName: 't_3', - size: [1, 1], - axes: [ - {name: null, angle: 0, position: [0, 0]}, - {name: null, angle: Math.PI / 2, position: [0, 0]}, - {name: null, angle: Math.PI, position: [0, 0]}, - ], - position: {x: 533, y: 300}, - rotation: 0, - hue: 30 - }, - { - name: 't4', - displayName: 't_4', - size: [1, 1], - axes: [ - {name: null, angle: 0, position: [0, 0]}, - {name: null, angle: Math.PI / 2, position: [0, 0]}, - {name: null, angle: Math.PI, position: [0, 0]}, - ], - position: {x: 700, y: 300}, - rotation: 0, - hue: 30 - } - ], - edges: [ - [['t1', 0], ['t2', 2], null], - [['t2', 0], ['t3', 2], null], - [['t3', 0], ['t4', 2], null], - ] -}; diff --git a/gui/js/mixins.js b/gui/js/mixins.js deleted file mode 100644 index 8bf36ecd0..000000000 --- a/gui/js/mixins.js +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -let mixinGet = { - methods: { - getNeighborsOf: function(name) { - let neighbors = []; - let edges = this.state.edges; - for (let i = 0; i < edges.length; i++) { - let edge = edges[i]; - if (edge[0][0] === name) { - neighbors.push({ - axis: edge[0][1], - neighbor: edge[1], - edgeName: edge[2] - }); - } - if (edge[1][0] === name) { - neighbors.push({ - axis: edge[1][1], - neighbor: edge[0], - edgeName: edge[2] - }); - } - } - return neighbors; - }, - getNode: function(name) { - for (let i = 0; i < this.state.nodes.length; i++) { - if (this.state.nodes[i].name === name) { - return this.state.nodes[i]; - } - } - return null; - }, - getAxis: function(address) { - let [nodeName, axisIndex] = address; - let node = this.getNode(nodeName); - return node.axes[axisIndex]; - }, - } -}; - -let mixinGeometry = { - data: function() { - return { - axisLength: 50, - baseNodeWidth: 50, - nodeCornerRadius: 10, - axisLabelRadius: 1.2 - } - }, - methods: { - getAxisPoints: function (position, angle, rotation) { - let x0 = position[0] * this.baseNodeWidth; - let y0 = position[1] * this.baseNodeWidth; - let x1 = Math.cos(rotation) * x0 - Math.sin(rotation) * y0; - let y1 = Math.sin(rotation) * x0 + Math.cos(rotation) * y0; - let x2 = x1 + this.axisLength * Math.cos(angle + rotation); - let y2 = y1 + this.axisLength * Math.sin(angle + rotation); - return { - x1: x1, - y1: y1, - x2: x2, - y2: y2 - } - }, - } -}; \ No newline at end of file diff --git a/gui/js/node.js b/gui/js/node.js deleted file mode 100644 index 588eae7a6..000000000 --- a/gui/js/node.js +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -let mixinNode = { - props: { - node: Object, - state: Object, - }, - methods: { - neighborAt: function(axis) { - for (let i = 0; i < this.neighbors.length; i++) { - if (this.neighbors[i].axis === axis) { - return this.neighbors[i].neighbor; - } - } - return null; - }, - edgeNameAt: function(axis) { - for (let i = 0; i < this.neighbors.length; i++) { - if (this.neighbors[i].axis === axis) { - return this.neighbors[i].edgeName; - } - } - return null; - } - }, - computed: { - neighbors: function() { - return this.getNeighborsOf(this.node.name); - } - }, -}; - -Vue.component( - 'node', - { - mixins: [mixinGet, mixinGeometry, mixinNode], - props: { - disableDragging: { - type: Boolean, - default: false - }, - shadow: { - type: Boolean, - default: false - } - }, - data: function() { - return { - mouse: { - x: null, - y: null - }, - render: true, - labelOpacity: 1 - } - }, - mounted: function() { - window.MathJax.typeset(); - }, - watch: { - 'node.displayName': function() { - if (!this.renderLaTeX) { - return; - } - // Ugly race condition to make sure MathJax renders, and old renders are discarded - this.render = false; - this.labelOpacity = 0; - if (this.renderTimeout) { - clearTimeout(this.renderTimeout); - } - let t = this; - this.renderTimeout = setTimeout(function() { - t.render = true; - }, 50); - this.renderTimeout = setTimeout(function() { - window.MathJax.typeset(); - t.labelOpacity = 1; - }, 100); - } - }, - methods: { - onMouseDown: function(event) { - event.stopPropagation(); - if (this.disableDragging) { - return; - } - - if (!this.state.selectedNodes.includes(this.node)) { - if (event.shiftKey) { - this.state.selectedNodes.push(this.node); - } - else { - this.state.selectedNodes = [this.node]; - } - } - else { - if (event.shiftKey) { - let t = this; - this.state.selectedNodes = this.state.selectedNodes.filter(function(node) { - return node !== t.node; - }); - } - } - - document.addEventListener('mousemove', this.onMouseMove); - document.addEventListener('mouseup', this.onMouseUp); - this.state.draggingNode = true; - - this.mouse.x = event.pageX; - this.mouse.y = event.pageY; - }, - onMouseMove: function(event) { - let dx = event.pageX - this.mouse.x; - let dy = event.pageY - this.mouse.y; - this.mouse.x = event.pageX; - this.mouse.y = event.pageY; - this.state.selectedNodes.forEach(function(node) { - node.position.x += dx; - node.position.y += dy; - }); - }, - onMouseUp: function() { - document.removeEventListener('mousemove', this.onMouseMove); - document.removeEventListener('mouseup', this.onMouseUp); - - this.state.draggingNode = false; - - let workspace = document.getElementById('workspace').getBoundingClientRect(); - let t = this; - this.state.selectedNodes.forEach(function(node) { - if (node.position.x < t.baseNodeWidth / 2) { - node.position.x = t.baseNodeWidth / 2; - } - if (node.position.y < t.baseNodeWidth / 2) { - node.position.y = t.baseNodeWidth / 2; - } - if (node.position.x > workspace.width - t.baseNodeWidth / 2) { - node.position.x = workspace.width - t.baseNodeWidth / 2; - } - if (node.position.y > workspace.height - t.baseNodeWidth / 2) { - node.position.y = workspace.height - t.baseNodeWidth / 2; - } - }); - }, - onAxisMouseDown: function(axis) { - this.$emit('axismousedown', axis); - }, - onAxisMouseUp: function(axis) { - this.$emit('axismouseup', axis); - } - }, - computed: { - nodeWidth: function() { - return this.baseNodeWidth * Math.min(this.node.size[0], 3); - }, - nodeHeight: function() { - return this.baseNodeWidth * Math.min(this.node.size[1], 3); - }, - translation: function() { - return 'translate(' + this.node.position.x + ' ' + this.node.position.y + ')'; - }, - rotation: function() { - return 'rotate(' + (this.node.rotation * 180 / Math.PI) + ')'; - }, - brightness: function() { - if (this.state.selectedNodes.includes(this.node)) { - return 50; - } - else { - return 80; - } - }, - style: function() { - if (this.shadow) { - return 'fill: #ddd'; - } - else { - return 'fill: hsl(' + this.node.hue + ', 80%, ' + this.brightness + '%);'; - } - }, - renderLaTeX: function() { - return this.state.renderLaTeX && window.MathJax; - }, - label: function() { - return '\\(\\displaystyle{' + this.node.displayName + '}\\)'; - }, - labelStyle: function() { - return 'opacity: ' + this.labelOpacity + ';'; - } - }, - created: function() { - if (this.node.hue == null) { - this.node.hue = Math.random() * 360; - } - }, - template: ` - - - - - {{node.name}} - - -
- {{label}} -
-
-
- ` - } -); - -Vue.component( - 'axis', - { - mixins: [mixinGet, mixinGeometry, mixinNode], - props: { - node: Object, - index: Number, - state: Object, - shadow: { - type: Boolean, - default: false - }, - }, - data: function() { - return { - dragging: false, - highlighted: false - } - }, - methods: { - onMouseDown: function(event) { - event.stopPropagation(); - this.$emit('axismousedown'); - this.dragging = true; - document.addEventListener('mouseup', this.onDragEnd); - }, - onMouseUp: function() { - this.$emit('axismouseup'); - }, - onDragEnd: function() { - this.dragging = false; - document.removeEventListener('mouseup', this.onDragEnd); - }, - onMouseEnter: function() { - if (this.state.draggingNode) { - return; - } - if (this.neighborAt(this.index) != null) { - return; // don't highlight an axis that is occupied - } - if (this.dragging) { - return; // don't highlight self if self is being dragged - } - this.highlighted = true; - }, - onMouseLeave: function() { - this.highlighted = false; - }, - }, - computed: { - axisPoints: function() { - return this.getAxisPoints(this.node.axes[this.index].position, this.node.axes[this.index].angle, - this.node.rotation) - }, - x1: function() { - return this.axisPoints.x1; - }, - y1: function() { - return this.axisPoints.y1; - }, - x2: function() { - return this.axisPoints.x2; - }, - y2: function() { - return this.axisPoints.y2; - }, - brightness: function() { - return this.highlighted ? 50 : 80; - }, - stroke: function() { - if (this.shadow) { - return this.highlighted ? '#bbb' : '#ddd'; - } - else { - return 'hsl(' + this.node.hue + ', 80%, ' + this.brightness + '%)'; - } - }, - }, - template: ` - - - - {{index}} - - {{node.axes[index].name}} - - - ` - } -); - -Vue.component( - 'node-description', - { - mixins: [mixinGet, mixinNode], - template: ` -

Node {{node.name}} has {{node.axes.length}} axes: -

    -
  • - Axis {{i}} ({{axisName}}) - is connected to axis {{neighborAt(i)[1]}} - ({{getAxis(neighborAt(i))}}) - of node {{getNode(neighborAt(i)[0]).name}} - by edge "{{edgeNameAt(i)}}" - - is free -
  • -
-

- ` - } -); diff --git a/gui/js/output.js b/gui/js/output.js deleted file mode 100644 index 6347db6d1..000000000 --- a/gui/js/output.js +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -Vue.component( - 'code-output', - { - props: { - state: Object - }, - computed: { - outputCode: function() { - let code = `import numpy as np\nimport tensornetwork as tn\n`; - - code += `\n# Node definitions\n`; - code += `# TODO: replace np.zeros with actual values\n\n`; - - for (let i = 0; i < this.state.nodes.length; i++) { - let node = this.state.nodes[i]; - let values = this.placeholderValues(node); - let axes = this.axisNames(node); - code += `${node.name} = tn.Node(${values}, name="${node.name}"${axes})\n`; - } - - code += `\n# Edge definitions\n\n`; - - for (let i = 0; i < this.state.edges.length; i++) { - let edge = this.state.edges[i]; - let name = this.edgeName(edge); - code += `tn.connect(${edge[0][0]}[${edge[0][1]}], ${edge[1][0]}[${edge[1][1]}]${name})\n`; - } - - return code; - } - }, - methods: { - placeholderValues: function(node) { - let code = `np.zeros((`; - for (let i = 0; i < node.axes.length; i++) { - code += `0, `; - } - code += `))`; - return code; - }, - axisNames: function(node) { - let code = `, axis_names=[`; - let willOutput = false; - for (let i = 0; i < node.axes.length; i++) { - let axis = node.axes[i].name; - if (axis) { - willOutput = true; - code += `"${axis}", ` - } - else { - code += `None, ` - } - } - code += `]`; - return willOutput ? code : ``; - }, - edgeName: function(edge) { - let name = edge[2]; - return name ? `, name="${name}"` : ``; - } - }, - template: ` -
-

TensorNetwork Output

-
{{outputCode}}
-
- ` - } -); - diff --git a/gui/js/toolbar.js b/gui/js/toolbar.js deleted file mode 100644 index 823ffe150..000000000 --- a/gui/js/toolbar.js +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -Vue.component( - 'toolbar', - { - props: { - state: Object - }, - data: function() { - return { - copyNodeName: '', - } - }, - methods: { - deselectNode: function() { - this.state.selectedNodes = []; - }, - deleteNode: function(event) { - event.preventDefault(); - let selectedName = this.state.selectedNodes[0].name; - - this.state.edges = this.state.edges.filter(function(edge) { - if (edge[0][0] === selectedName || edge[1][0] === selectedName) { - return false; - } - else { - return true; - } - }); - this.state.nodes = this.state.nodes.filter(function(node) { - return node.name !== selectedName; - }); - this.selectedNodes = []; - }, - copyNode: function(event) { - event.preventDefault(); - let workspace = document.getElementById('workspace').getBoundingClientRect(); - - let node = JSON.parse(JSON.stringify(this.node)); - node.name = this.copyNodeName; - node.position = {x: workspace.width / 2, y: workspace.height / 2}; - - this.state.nodes.push(node); - this.state.selectedNodes = [node]; - this.copyNodeName = ''; - }, - rotate: function(angle) { - this.node.rotation += angle; - } - }, - computed: { - node: function() { - return this.state.selectedNodes[0]; - }, - copyNodeDisabled: function() { - return this.nameTaken || this.copyNodeName == null || this.copyNodeName === ''; - }, - nameTaken: function() { - for (let i = 0; i < this.state.nodes.length; i++) { - if (this.copyNodeName === this.state.nodes[i].name) { - return true; - } - } - return false; - } - }, - template: ` -
-
- -
-

Selecting nodes

-

Click a node to select it for editing.

-

Drag-select or shift-click multiple nodes to drag as a group and adjust alignment and - spacing.

-
-
-
-
-
- -
-
-
-
- delete -

Node: {{node.name}}

-
-

Set LaTeX Label

- -

Copy Node

-
- - -
-

Rotate

- - -
- - -
-
- -
-
- ` - } -); - -Vue.component( - 'tensor-creator', - { - mixins: [mixinGeometry], - props: { - state: Object - }, - data: function() { - return { - size1: 1, - size2: 1, - hue: 0, - node: {}, - width: 250, - height: 250, - dragSelector: { - dragging: false, - startX: null, - startY: null, - endX: null, - endY: null - }, - }; - }, - created: function() { - this.reset(); - }, - watch: { - size1: function() { - this.reset(); - }, - size2: function() { - this.reset(); - }, - hue: function() { - this.node.hue = parseFloat(this.hue); - } - }, - methods: { - reset: function () { - this.node = JSON.parse(JSON.stringify(this.nodeInitial)); - }, - createNode: function (event) { - event.preventDefault(); - let workspace = document.getElementById('workspace').getBoundingClientRect(); - - this.node.position = {x: workspace.width / 2, y: workspace.height / 2}; - - this.state.nodes.push(this.node); - this.reset(); - }, - onShadowAxisMouseDown: function (node, axis) { - let candidateAxis = this.nodeShadow.axes[axis]; - for (let j = 0; j < this.node.axes.length; j++) { - let existingAxis = this.node.axes[j]; - if (candidateAxis.angle === existingAxis.angle - && candidateAxis.position[0] === existingAxis.position[0] - && candidateAxis.position[1] === existingAxis.position[1]) { - return; - } - } - this.node.axes.push(JSON.parse(JSON.stringify(candidateAxis))); - }, - onNodeAxisMouseDown: function (node, axis) { - this.node.axes.splice(axis, 1); - }, - axes: function (size1, size2) { - let makeAxis = function (direction, position) { - return {name: null, angle: direction * Math.PI / 4, position: position}; - }; - let output = []; - - let x = function(n) { - let x_end = Math.min((size1 - 1) / 2, 1); - return size1 !== 1 ? (-x_end * (size1 - 1 - n) + x_end * n) / (size1 - 1) : 0; // Avoid div by 0 - }; - let y = function(m) { - let y_end = Math.min((size2 - 1) / 2, 1); - return size2 !== 1 ? (-y_end * (size2 - 1 - m) + y_end * m) / (size2 - 1) : 0; - }; - - let n = 0; - let m = 0; - output.push(makeAxis(5, [x(n), y(m)])); - - for (n = 0; n < size1; n++) { - output.push(makeAxis(6, [x(n), y(m)])); - } - n = size1 - 1; - - output.push(makeAxis(7, [x(n), y(m)])); - - for (m = 0; m < size2; m++) { - output.push(makeAxis(0, [x(n), y(m)])); - } - m = size2 - 1; - - output.push(makeAxis(1, [x(n), y(m)])); - - for (n = size1 - 1; n >= 0; n--) { - output.push(makeAxis(2, [x(n), y(m)])) - } - n = 0; - - output.push(makeAxis(3, [x(n), y(m)])); - - for (m = size2 - 1; m >= 0; m--) { - output.push(makeAxis(4, [x(n), y(m)])); - } - - return output; - }, - onMouseDown: function (event) { - document.addEventListener('mousemove', this.onMouseMove); - document.addEventListener('mouseup', this.onMouseUp); - - let workspace = document.getElementById('tensor-creator-workspace').getBoundingClientRect(); - - this.dragSelector.dragging = true; - this.dragSelector.startX = event.pageX - workspace.left; - this.dragSelector.startY = event.pageY - workspace.top; - this.dragSelector.endX = event.pageX - workspace.left; - this.dragSelector.endY = event.pageY - workspace.top; - }, - onMouseMove: function (event) { - let workspace = document.getElementById('tensor-creator-workspace').getBoundingClientRect(); - - this.dragSelector.endX = event.pageX - workspace.left; - this.dragSelector.endY = event.pageY - workspace.top; - }, - onMouseUp: function () { - document.removeEventListener('mousemove', this.onMouseMove); - document.removeEventListener('mouseup', this.onMouseUp); - - this.dragSelector.dragging = false; - - let x1 = this.dragSelector.startX; - let x2 = this.dragSelector.endX; - let y1 = this.dragSelector.startY; - let y2 = this.dragSelector.endY; - - for (let i = 0; i < this.nodeShadow.axes.length; i++) { - let axis = this.nodeShadow.axes[i]; - let duplicate = false; - for (let j = 0; j < this.node.axes.length; j++) { - let existingAxis = this.node.axes[j]; - if (axis.angle === existingAxis.angle && axis.position[0] === existingAxis.position[0] - && axis.position[1] === existingAxis.position[1]) { - duplicate = true; - break - } - } - if (duplicate) { - continue; - } - let axisPoints = this.getAxisPoints(axis.position, axis.angle, 0); - let axisX = this.nodeShadow.position.x + axisPoints.x2; - let axisY = this.nodeShadow.position.y + axisPoints.y2; - if ((x1 <= axisX && axisX <= x2) || (x2 <= axisX && axisX <= x1)) { - if ((y1 <= axisY && axisY <= y2) || (y2 <= axisY && axisY <= y1)) { - this.node.axes.push(JSON.parse(JSON.stringify(axis))); - } - } - } - } - }, - computed: { - createNodeDisabled: function() { - return this.nameTaken || this.node.name == null || this.node.name === ''; - }, - nameTaken: function() { - for (let i = 0; i < this.state.nodes.length; i++) { - if (this.node.name === this.state.nodes[i].name) { - return true; - } - } - return false; - }, - nodeInitial: function() { - return { - name: "", - size: [parseFloat(this.size1), parseFloat(this.size2)], - axes: [], - position: {x: 125, y: 125}, - rotation: 0, - hue: parseFloat(this.hue) - }; - }, - nodeShadow: function() { - return { - name: "", - size: [parseFloat(this.size1), parseFloat(this.size2)], - axes: this.axes(parseFloat(this.size1), parseFloat(this.size2)), - position: {x: 125, y: 125}, - rotation: 0, - hue: null - }; - }, - renderLaTeX: function() { - return this.state.renderLaTeX && window.MathJax; - - } - }, - template: ` -
-

Create New Node

-

Click on an axis to add or remove it.

-
- - - - - -
- - - - - - - -
- - -
-
-
- - - -
-
-
- - ` - } -); - -Vue.component( - 'toolbar-edge-section', - { - props: { - state: Object - }, - methods: { - deleteEdge: function(event, edge) { - event.preventDefault(); - this.state.edges = this.state.edges.filter(function(candidate) { - return candidate !== edge; - }); - } - }, - computed: { - node: function() { - return this.state.selectedNodes[0]; - } - }, - template: ` -
-

Edges

-
-
-
- delete -

{{edge[0][0]}}[{{edge[0][1]}}] to {{edge[1][0]}}[{{edge[1][1]}}]

-
- - -
-
-
- ` - } -); - -Vue.component( - 'toolbar-axis-section', - { - props: { - state: Object - }, - computed: { - node: function() { - return this.state.selectedNodes[0]; - } - }, - template: ` -
-

Axes

-
-
-

{{node.name}}[{{index}}]

-
- - -
-
- ` - } -); - -Vue.component( - 'toolbar-multinode-section', - { - props: { - state: Object - }, - data: function() { - return { - alignmentY: null, - alignmentX: null, - spacingY: null, - spacingX: null - } - }, - created: function() { - this.alignmentY = this.state.selectedNodes[0].position.y; - this.alignmentX = this.state.selectedNodes[0].position.x; - this.spacingY = this.state.selectedNodes[1].position.y - this.state.selectedNodes[0].position.y; - this.spacingX = this.state.selectedNodes[1].position.x - this.state.selectedNodes[0].position.x; - }, - methods: { - alignVertically: function(event) { - event.preventDefault(); - for (let i = 0; i < this.state.selectedNodes.length; i++) { - this.state.selectedNodes[i].position.y = parseFloat(this.alignmentY); - } - }, - alignHorizontally: function(event) { - event.preventDefault(); - for (let i = 0; i < this.state.selectedNodes.length; i++) { - this.state.selectedNodes[i].position.x = parseFloat(this.alignmentX); - } - }, - spaceVertically: function(event) { - event.preventDefault(); - let baseline = this.state.selectedNodes[0].position.y; - for (let i = 1; i < this.state.selectedNodes.length; i++) { - this.state.selectedNodes[i].position.y = baseline + i * parseFloat(this.spacingY); - } - }, - spaceHorizontally: function(event) { - event.preventDefault(); - let baseline = this.state.selectedNodes[0].position.x; - for (let i = 1; i < this.state.selectedNodes.length; i++) { - this.state.selectedNodes[i].position.x = baseline + i * parseFloat(this.spacingX); - } - }, - disabledFor: function(length) { - return length == null || length == "" || isNaN(parseFloat(length)); - } - }, - template: ` -
-
-

Multiple Nodes

-
-

{{node.name}} - x: {{node.position.x}}, y: {{node.position.y}}

-
- Shift-click a node in the workspace to deselect it. -
-
-

Align Vertically

-
- - -
-
-
-

Align Horizontally

-
- - -
-
-
-

Space Vertically

-
- - -
-
-
-

Space Horizontally

-
- - -
-
-
- ` - } -); diff --git a/gui/js/workspace.js b/gui/js/workspace.js deleted file mode 100644 index dea17bbca..000000000 --- a/gui/js/workspace.js +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019 The TensorNetwork Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -Vue.component( - 'workspace', - { - props: { - state: Object - }, - data: function() { - return { - width: 900, - height: 600, - dragSelector: { - dragging: false, - startX: null, - startY: null, - endX: null, - endY: null - }, - protoEdge: { - x: null, - y: null, - node: null, - axis: null, - dragging: false - } - }; - }, - methods: { - onMouseDown: function(event) { - this.state.selectedNodes = []; - - document.addEventListener('mousemove', this.onMouseMove); - document.addEventListener('mouseup', this.onMouseUp); - - let workspace = document.getElementById('workspace').getBoundingClientRect(); - - this.dragSelector.dragging = true; - this.dragSelector.startX = event.pageX - workspace.left; - this.dragSelector.startY = event.pageY - workspace.top; - this.dragSelector.endX = event.pageX - workspace.left; - this.dragSelector.endY = event.pageY - workspace.top; - }, - onMouseMove: function(event) { - let workspace = document.getElementById('workspace').getBoundingClientRect(); - - this.dragSelector.endX = event.pageX - workspace.left; - this.dragSelector.endY = event.pageY - workspace.top; - }, - onMouseUp: function() { - document.removeEventListener('mousemove', this.onMouseMove); - document.removeEventListener('mouseup', this.onMouseUp); - - this.dragSelector.dragging = false; - - let x1 = this.dragSelector.startX; - let x2 = this.dragSelector.endX; - let y1 = this.dragSelector.startY; - let y2 = this.dragSelector.endY; - - this.state.selectedNodes = []; - let selected = this.state.selectedNodes; - this.state.nodes.forEach(function(node) { - let x = node.position.x; - let y = node.position.y; - if ((x1 <= x && x <= x2) || (x2 <= x && x <= x1)) { - if ((y1 <= y && y <= y2) || (y2 <= y && y <= y1)) { - selected.push(node); - } - } - }); - this.state.selectedNodes.sort(function(node1, node2) { - let distance1 = (node1.position.x - x1) ** 2 + (node1.position.y - y1) ** 2; - let distance2 = (node2.position.x - x1) ** 2 + (node2.position.y - y1) ** 2; - return distance1 - distance2; - }) - }, - onAxisMouseDown: function(node, axis) { - if (this.axisOccupied(node, axis)) { - return; - } - document.addEventListener('mousemove', this.dragAxis); - document.addEventListener('mouseup', this.releaseAxisDrag); - this.protoEdge.node = node; - this.protoEdge.axis = axis; - }, - dragAxis: function(event) { - let workspace = document.getElementById('workspace').getBoundingClientRect(); - this.protoEdge.dragging = true; - this.protoEdge.x = event.clientX - workspace.left; - this.protoEdge.y = event.clientY - workspace.top; - }, - releaseAxisDrag: function() { - document.removeEventListener('mousemove', this.dragAxis); - document.removeEventListener('mouseup', this.releaseAxisDrag); - this.protoEdge.dragging = false; - this.protoEdge.node = null; - this.protoEdge.axis = null; - }, - onAxisMouseUp: function(node, axis) { - if (this.protoEdge.dragging) { - if (this.axisOccupied(node, axis)) { - return; - } - if (this.protoEdge.node.name === node.name - && this.protoEdge.axis === axis) { - return; // don't allow connection of an axis to itself - } - this.state.edges.push([ - [this.protoEdge.node.name, this.protoEdge.axis], - [node.name, axis], - null - ]) - } - }, - axisOccupied: function(node, axis) { - for (let i = 0; i < this.state.edges.length; i++) { - let edge = this.state.edges[i]; - if ((node.name === edge[0][0] && axis === edge[0][1]) - || (node.name === edge[1][0] && axis === edge[1][1])) { - return true; - } - } - return false; - } - }, - template: ` - - - - - - - ` - } -); - -Vue.component( - 'drag-selector', - { - props: { - startX: Number, - startY: Number, - endX: Number, - endY: Number, - }, - computed: { - x: function() { - return Math.min(this.startX, this.endX); - }, - y: function() { - return Math.min(this.startY, this.endY); - }, - width: function() { - return Math.abs(this.startX - this.endX); - }, - height: function() { - return Math.abs(this.startY - this.endY); - } - }, - template: ` - - ` - } -); From 5d2d2bad0a0162fd6f3350776a923e6559d23e4c Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 6 Dec 2019 21:53:08 -0500 Subject: [PATCH 031/212] a slightly more elegant code --- tensornetwork/block_tensor/block_tensor.py | 35 +++++++++++----------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 73e1063b0..d51c73dad 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -198,8 +198,7 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) unique_column_charges, column_dims = np.unique( column_charges, return_counts=True) - common_charges = np.intersect1d(flows[0] * unique_row_charges, - flows[1] * unique_column_charges) + common_charges = np.intersect1d(unique_row_charges, -unique_column_charges) # for each matrix column find the number of non-zero elements in it # Note: the matrix is assumed to be symmetric, i.e. only elements where @@ -209,23 +208,23 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) blocks = {} - #TODO: the nested loops could probably be easily moved to cython - for c in common_charges: - start = 0 - idxs = [] + + number_of_seen_elements = 0 + idxs = {c: [] for c in common_charges} + for column in range(len(column_charges)): #TODO: this for loop can be replaced with something - #more sophisticated (i.e. using numpy lookups and sums) - for column in range(len(column_charges)): - charge = column_charges[column] - if charge not in common_charges: - continue - if (charge + c) != 0: - start += row_degeneracies[c] - else: - idxs.extend(start + np.arange(row_degeneracies[c])) - if idxs: - blocks[c] = np.reshape(data[np.asarray(idxs)], - (row_degeneracies[c], column_degeneracies[-c])) + #more sophisticated (if.e. using numpy lookups and sums) + charge = column_charges[column] + if -charge not in common_charges: + continue + + idxs[-charge].extend(number_of_seen_elements + + np.arange(row_degeneracies[-charge])) + number_of_seen_elements += row_degeneracies[-charge] + + for c, idx in idxs.items(): + blocks[c] = np.reshape(data[np.asarray(idx)], + (row_degeneracies[c], column_degeneracies[-c])) return blocks From 04eadf377be532ee50dd6751748f9cf0e0e38666 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 6 Dec 2019 22:14:44 -0500 Subject: [PATCH 032/212] use one more np function --- tensornetwork/block_tensor/block_tensor.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index d51c73dad..803c9ce01 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -198,7 +198,9 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) unique_column_charges, column_dims = np.unique( column_charges, return_counts=True) - common_charges = np.intersect1d(unique_row_charges, -unique_column_charges) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + #common_charges = np.intersect1d(row_charges, -column_charges) # for each matrix column find the number of non-zero elements in it # Note: the matrix is assumed to be symmetric, i.e. only elements where @@ -211,13 +213,9 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, number_of_seen_elements = 0 idxs = {c: [] for c in common_charges} - for column in range(len(column_charges)): - #TODO: this for loop can be replaced with something - #more sophisticated (if.e. using numpy lookups and sums) - charge = column_charges[column] - if -charge not in common_charges: - continue - + mask = np.isin(column_charges, -common_charges) + #TODO: move this for loop to cython + for charge in column_charges[mask]: idxs[-charge].extend(number_of_seen_elements + np.arange(row_degeneracies[-charge])) number_of_seen_elements += row_degeneracies[-charge] From 2ea5674e426bec8d753fe1551e9f2facb642b0a1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 6 Dec 2019 22:30:30 -0500 Subject: [PATCH 033/212] removed some crazy slow code --- tensornetwork/block_tensor/block_tensor.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 803c9ce01..a23928545 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -216,12 +216,14 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, mask = np.isin(column_charges, -common_charges) #TODO: move this for loop to cython for charge in column_charges[mask]: - idxs[-charge].extend(number_of_seen_elements + - np.arange(row_degeneracies[-charge])) + idxs[-charge].append( + np.arange(number_of_seen_elements, + row_degeneracies[-charge] + number_of_seen_elements)) number_of_seen_elements += row_degeneracies[-charge] for c, idx in idxs.items(): - blocks[c] = np.reshape(data[np.asarray(idx)], + indexes = np.concatenate(idx) + blocks[c] = np.reshape(data[indexes], (row_degeneracies[c], column_degeneracies[-c])) return blocks From 5d8c86ad75d1b9bbe4ee994620af121193ae48aa Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 6 Dec 2019 22:32:45 -0500 Subject: [PATCH 034/212] faster code --- tensornetwork/block_tensor/block_tensor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index a23928545..a35a8941a 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -214,7 +214,6 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, number_of_seen_elements = 0 idxs = {c: [] for c in common_charges} mask = np.isin(column_charges, -common_charges) - #TODO: move this for loop to cython for charge in column_charges[mask]: idxs[-charge].append( np.arange(number_of_seen_elements, From 4eae410ae5bbc8bac2ca26f44c929c3ff50d765b Mon Sep 17 00:00:00 2001 From: Chase Roberts Date: Sun, 8 Dec 2019 23:38:49 -0800 Subject: [PATCH 035/212] Update README.md (#404) --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 18e0b50f5..d83fcaa59 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,11 @@ pip3 install tensornetwork For details about the TensorNetwork API, see the [reference documentation.](https://tensornetwork.readthedocs.io) -We also have a basic [tutorial colab](https://colab.research.google.com/drive/1Fp9DolkPT-P_Dkg_s9PLbTOKSq64EVSu) for a more "hands-on" example. +## Tutorials + +[Basic API tutorial](https://colab.research.google.com/drive/1Fp9DolkPT-P_Dkg_s9PLbTOKSq64EVSu) + +[Tensor Networks inside Neural Networks using Keras](https://colab.research.google.com/drive/1JUh84N5sbfQYk6HWowWCGl0IZ1idQi6z) ## Basic Example Here, we build a simple 2 node contraction. From 04c8573cdbc652669f8fc13c95f6399877162595 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 9 Dec 2019 13:49:41 -0500 Subject: [PATCH 036/212] add return_data --- tensornetwork/block_tensor/block_tensor.py | 45 ++++++++++++++++++---- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index a35a8941a..0e393f05f 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -135,13 +135,14 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def retrieve_non_zero_diagonal_blocks(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: +def retrieve_non_zero_diagonal_blocks( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. - !!!!!!!!! This is currently very slow!!!!!!!!!!!! Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -153,6 +154,16 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. """ #TODO: this is currently way too slow!!!! #Run the following benchmark for testing (typical MPS use case) @@ -209,7 +220,6 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, # get the degeneracies of each row and column charge row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) - blocks = {} number_of_seen_elements = 0 idxs = {c: [] for c in common_charges} @@ -220,10 +230,22 @@ def retrieve_non_zero_diagonal_blocks(data: np.ndarray, row_degeneracies[-charge] + number_of_seen_elements)) number_of_seen_elements += row_degeneracies[-charge] + blocks = {} + if not return_data: + for c, idx in idxs.items(): + num_elements = np.sum([len(t) for t in idx]) + indexes = np.empty(num_elements, dtype=np.int64) + np.concatenate(idx, out=indexes) + blocks[c] = [indexes, (row_degeneracies[c], column_degeneracies[-c])] + return blocks + for c, idx in idxs.items(): - indexes = np.concatenate(idx) + num_elements = np.sum([len(t) for t in idx]) + indexes = np.empty(num_elements, dtype=np.int64) + np.concatenate(idx, out=indexes) blocks[c] = np.reshape(data[indexes], (row_degeneracies[c], column_degeneracies[-c])) + return blocks @@ -532,12 +554,21 @@ def raise_error(): if self.shape[n] < dense_shape[n]: raise_error() - def get_diagonal_blocks(self) -> Dict: + def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. Returns: dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + """ if self.rank != 2: raise ValueError( From 33d1a40e9d678416e0f5dba9c89e833929602ced Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 9 Dec 2019 13:56:36 -0500 Subject: [PATCH 037/212] doc --- tensornetwork/block_tensor/block_tensor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 0e393f05f..e5392b7a8 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -161,6 +161,7 @@ def retrieve_non_zero_diagonal_blocks( containing the locations of the tensor elements within A.data, i.e. `A.data[locations]` contains the elements belonging to the tensor with quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray or a python list of locations and shapes, depending on the value of `return_data`. From fb1978a9d50cb9cc91abc62e465cc56827a49eef Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 9 Dec 2019 14:12:11 -0500 Subject: [PATCH 038/212] bug fix --- tensornetwork/block_tensor/block_tensor.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index e5392b7a8..1ad942293 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -576,7 +576,10 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) return retrieve_non_zero_diagonal_blocks( - data=self.data, charges=self.charges, flows=self.flows) + data=self.data, + charges=self.charges, + flows=self.flows, + return_data=return_data) def reshape(tensor: BlockSparseTensor, From 0d4a6258a780e9034de19ac35886bc5bd59cffdf Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 10 Dec 2019 23:01:14 -0500 Subject: [PATCH 039/212] a little faster --- tensornetwork/block_tensor/block_tensor.py | 80 +++++++++++++++++----- 1 file changed, 63 insertions(+), 17 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 1ad942293..053a118dd 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -207,11 +207,18 @@ def retrieve_non_zero_diagonal_blocks( column_charges = flows[1] * charges[1] # a list of charges on each column #get the unique charges + t1 = time.time() unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + # print('finding unique row charges', time.time() - t1) + # t1 = time.time() unique_column_charges, column_dims = np.unique( column_charges, return_counts=True) + # print('finding unique column charges', time.time() - t1) + # t1 = time.time() common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) + # print('finding unique intersections', time.time() - t1) + # t1 = time.time() #common_charges = np.intersect1d(row_charges, -column_charges) # for each matrix column find the number of non-zero elements in it @@ -223,31 +230,70 @@ def retrieve_non_zero_diagonal_blocks( column_degeneracies = dict(zip(unique_column_charges, column_dims)) number_of_seen_elements = 0 - idxs = {c: [] for c in common_charges} + #idxs = {c: [] for c in common_charges} + idxs = { + c: np.empty( + row_degeneracies[c] * column_degeneracies[-c], dtype=np.int64) + for c in common_charges + } + idxs_stops = {c: 0 for c in common_charges} + t1 = time.time() mask = np.isin(column_charges, -common_charges) - for charge in column_charges[mask]: - idxs[-charge].append( - np.arange(number_of_seen_elements, - row_degeneracies[-charge] + number_of_seen_elements)) + masked_charges = column_charges[mask] + print('finding mask', time.time() - t1) + # print(len(column_charges), len(masked_charges)) + t1 = time.time() + elements = {c: np.arange(row_degeneracies[c]) for c in common_charges} + for charge in masked_charges: + # idxs[-charge].append((number_of_seen_elements, + # row_degeneracies[-charge] + number_of_seen_elements)) + + idxs[-charge][ + idxs_stops[-charge]:idxs_stops[-charge] + + row_degeneracies[-charge]] = number_of_seen_elements + elements[-charge] + + # np.arange( + # number_of_seen_elements, + # row_degeneracies[-charge] + number_of_seen_elements) + number_of_seen_elements += row_degeneracies[-charge] + idxs_stops[-charge] += row_degeneracies[-charge] + print('getting start and stop', time.time() - t1) + # t1 = time.time() + # for charge in masked_charges: + # tmp = np.arange(number_of_seen_elements, + # row_degeneracies[-charge] + number_of_seen_elements) + # number_of_seen_elements += row_degeneracies[-charge] + # print('running the partial loop', time.time() - t1) + + ####################################################################################### + #looks like this takes pretty long for rectangular matrices where shape[1] >> shape[0] + #it's mostly np.arange that causes the overhead. + # t1 = time.time() + # for charge in masked_charges: + # idxs[-charge].append( + # np.arange(number_of_seen_elements, + # row_degeneracies[-charge] + number_of_seen_elements)) + # number_of_seen_elements += row_degeneracies[-charge] + # print('running the full loop', time.time() - t1) + ####################################################################################### blocks = {} if not return_data: for c, idx in idxs.items(): - num_elements = np.sum([len(t) for t in idx]) - indexes = np.empty(num_elements, dtype=np.int64) - np.concatenate(idx, out=indexes) - blocks[c] = [indexes, (row_degeneracies[c], column_degeneracies[-c])] + #num_elements = np.sum([len(t) for t in idx]) + #indexes = np.empty(num_elements, dtype=np.int64) + #np.concatenate(idx, out=indexes) + blocks[c] = [idx, (row_degeneracies[c], column_degeneracies[-c])] return blocks - for c, idx in idxs.items(): - num_elements = np.sum([len(t) for t in idx]) - indexes = np.empty(num_elements, dtype=np.int64) - np.concatenate(idx, out=indexes) - blocks[c] = np.reshape(data[indexes], - (row_degeneracies[c], column_degeneracies[-c])) - - return blocks + # for c, idx in idxs.items(): + # num_elements = np.sum([len(t) for t in idx]) + # indexes = np.empty(num_elements, dtype=np.int64) + # np.concatenate(idx, out=indexes) + # blocks[c] = np.reshape(data[indexes], + # (row_degeneracies[c], column_degeneracies[-c])) + #return blocks def retrieve_non_zero_diagonal_blocks_test( From 82a4148401cd852255e7debc6f76f4be0b2008e0 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 00:05:16 -0500 Subject: [PATCH 040/212] substantial speedup --- tensornetwork/block_tensor/block_tensor.py | 117 +++++++++++++++++++-- 1 file changed, 110 insertions(+), 7 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 053a118dd..0bf4ceb62 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -206,6 +206,109 @@ def retrieve_non_zero_diagonal_blocks( row_charges = flows[0] * charges[0] # a list of charges on each row column_charges = flows[1] * charges[1] # a list of charges on each column + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + mask = np.isin(column_charges, -common_charges) + masked_charges = column_charges[mask] + degeneracy_vector = np.empty(len(masked_charges), dtype=np.int64) + masks = {} + for c in common_charges: + mask = masked_charges == -c + masks[c] = mask + degeneracy_vector[mask] = row_degeneracies[c] + summed_degeneracies = np.cumsum(degeneracy_vector) + blocks = {} + + for c in common_charges: + a = np.expand_dims(summed_degeneracies[masks[c]] - row_degeneracies[c], 0) + b = np.expand_dims(np.arange(row_degeneracies[c]), 1) + if not return_data: + blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + else: + blocks[c] = np.reshape(data[a + b], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def retrieve_non_zero_diagonal_blocks_bkp( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + #TODO: this is currently way too slow!!!! + #Run the following benchmark for testing (typical MPS use case) + #retrieving the blocks is ~ 10 times as slow as multiplying them + + # D=4000 + # B=10 + # q1 = np.random.randint(0,B,D) + # q2 = np.asarray([0,1]) + # q3 = np.random.randint(0,B,D) + # i1 = Index(charges=q1,flow=1) + # i2 = Index(charges=q2,flow=1) + # i3 = Index(charges=q3,flow=-1) + # indices=[i1,i2,i3] + # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128) + # A.reshape((D*2, D)) + # def multiply_blocks(blocks): + # for b in blocks.values(): + # np.dot(b.T, b) + # t1s=[] + # t2s=[] + # for n in range(10): + # print(n) + # t1 = time.time() + # b = A.get_diagonal_blocks() + # t1s.append(time.time() - t1) + # t1 = time.time() + # multiply_blocks(b) + # t2s.append(time.time() - t1) + # print('average retrieval time', np.average(t1s)) + # print('average multiplication time',np.average(t2s)) + + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column + #get the unique charges t1 = time.time() unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) @@ -287,13 +390,13 @@ def retrieve_non_zero_diagonal_blocks( blocks[c] = [idx, (row_degeneracies[c], column_degeneracies[-c])] return blocks - # for c, idx in idxs.items(): - # num_elements = np.sum([len(t) for t in idx]) - # indexes = np.empty(num_elements, dtype=np.int64) - # np.concatenate(idx, out=indexes) - # blocks[c] = np.reshape(data[indexes], - # (row_degeneracies[c], column_degeneracies[-c])) - #return blocks + for c, idx in idxs.items(): + num_elements = np.sum([len(t) for t in idx]) + indexes = np.empty(num_elements, dtype=np.int64) + np.concatenate(idx, out=indexes) + blocks[c] = np.reshape(data[indexes], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks def retrieve_non_zero_diagonal_blocks_test( From 7bd7be72eefea3007b88b9a8273b9661ac8feecf Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 00:05:40 -0500 Subject: [PATCH 041/212] renaming --- tensornetwork/block_tensor/block_tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 0bf4ceb62..c39fa38e7 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -238,7 +238,7 @@ def retrieve_non_zero_diagonal_blocks( return blocks -def retrieve_non_zero_diagonal_blocks_bkp( +def retrieve_non_zero_diagonal_blocks_deprecated( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], From d9c094b3a46d410266c7ba40abded46b8bfcac62 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 00:14:26 -0500 Subject: [PATCH 042/212] removed todo --- tensornetwork/block_tensor/block_tensor.py | 31 ---------------------- 1 file changed, 31 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index c39fa38e7..8849f2b30 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -166,37 +166,6 @@ def retrieve_non_zero_diagonal_blocks( dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray or a python list of locations and shapes, depending on the value of `return_data`. """ - #TODO: this is currently way too slow!!!! - #Run the following benchmark for testing (typical MPS use case) - #retrieving the blocks is ~ 10 times as slow as multiplying them - - # D=4000 - # B=10 - # q1 = np.random.randint(0,B,D) - # q2 = np.asarray([0,1]) - # q3 = np.random.randint(0,B,D) - # i1 = Index(charges=q1,flow=1) - # i2 = Index(charges=q2,flow=1) - # i3 = Index(charges=q3,flow=-1) - # indices=[i1,i2,i3] - # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128) - # A.reshape((D*2, D)) - # def multiply_blocks(blocks): - # for b in blocks.values(): - # np.dot(b.T, b) - # t1s=[] - # t2s=[] - # for n in range(10): - # print(n) - # t1 = time.time() - # b = A.get_diagonal_blocks() - # t1s.append(time.time() - t1) - # t1 = time.time() - # multiply_blocks(b) - # t2s.append(time.time() - t1) - # print('average retrieval time', np.average(t1s)) - # print('average multiplication time',np.average(t2s)) - if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") check_flows(flows) From 06c3f3cad6bd2537aac8fbf192e85f8576fe6ba8 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 00:17:24 -0500 Subject: [PATCH 043/212] some comments --- tensornetwork/block_tensor/block_tensor.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 8849f2b30..b3d773078 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -172,6 +172,7 @@ def retrieve_non_zero_diagonal_blocks( if len(flows) != len(charges): raise ValueError("`len(flows)` is different from `len(charges)`") + #we multiply the flows into the charges row_charges = flows[0] * charges[0] # a list of charges on each row column_charges = flows[1] * charges[1] # a list of charges on each column @@ -179,14 +180,20 @@ def retrieve_non_zero_diagonal_blocks( unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) unique_column_charges, column_dims = np.unique( column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) + #convenience container for obtaining the degeneracies of each + #charge row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) + # we only care about common charges mask = np.isin(column_charges, -common_charges) masked_charges = column_charges[mask] + + #some numpy magic to get the index locations of the blocks degeneracy_vector = np.empty(len(masked_charges), dtype=np.int64) masks = {} for c in common_charges: From 426fd1a2d3b9270d63accf512eb6bd33a40654f0 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 10:30:19 -0500 Subject: [PATCH 044/212] comments --- tensornetwork/block_tensor/block_tensor.py | 36 +++++++++++++++++----- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index b3d773078..0e751fcfc 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -184,27 +184,47 @@ def retrieve_non_zero_diagonal_blocks( common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) - #convenience container for obtaining the degeneracies of each - #charge + #convenience container for storing the degeneracies of each + #row and column charge row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) - # we only care about common charges + # we only care about charges common to row and columns mask = np.isin(column_charges, -common_charges) - masked_charges = column_charges[mask] + relevant_column_charges = column_charges[mask] #some numpy magic to get the index locations of the blocks - degeneracy_vector = np.empty(len(masked_charges), dtype=np.int64) + #we generate a vector of `len(relevant_column_charges) which, + #for each charge `c` in `relevant_column_charges` holds the + #row-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. masks = {} for c in common_charges: - mask = masked_charges == -c + mask = relevant_column_charges == -c masks[c] = mask degeneracy_vector[mask] = row_degeneracies[c] - summed_degeneracies = np.cumsum(degeneracy_vector) + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each column + # within the data vector. + # E.g. for `relevant_column_charges` = [0,1,0,0,3], and + # row_degeneracies[0] = 10 + # row_degeneracies[1] = 20 + # row_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in column-major order) in + # each column with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - row_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) blocks = {} for c in common_charges: - a = np.expand_dims(summed_degeneracies[masks[c]] - row_degeneracies[c], 0) + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) b = np.expand_dims(np.arange(row_degeneracies[c]), 1) if not return_data: blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] From 7f3e148c215b372b7ad60d2c5eae922a1239c529 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 13:39:36 -0500 Subject: [PATCH 045/212] fixed some bug in reshape --- tensornetwork/block_tensor/block_tensor.py | 227 +++++++-------------- 1 file changed, 69 insertions(+), 158 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 0e751fcfc..8c06f5c83 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -135,7 +135,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def retrieve_non_zero_diagonal_blocks( +def retrieve_non_zero_diagonal_blocks_deprecated( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], @@ -143,6 +143,8 @@ def retrieve_non_zero_diagonal_blocks( """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. + This is a deprecated version which in general performs worse than the + current main implementation. Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -234,7 +236,7 @@ def retrieve_non_zero_diagonal_blocks( return blocks -def retrieve_non_zero_diagonal_blocks_deprecated( +def retrieve_non_zero_diagonal_blocks( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], @@ -265,180 +267,51 @@ def retrieve_non_zero_diagonal_blocks_deprecated( dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray or a python list of locations and shapes, depending on the value of `return_data`. """ - #TODO: this is currently way too slow!!!! - #Run the following benchmark for testing (typical MPS use case) - #retrieving the blocks is ~ 10 times as slow as multiplying them - - # D=4000 - # B=10 - # q1 = np.random.randint(0,B,D) - # q2 = np.asarray([0,1]) - # q3 = np.random.randint(0,B,D) - # i1 = Index(charges=q1,flow=1) - # i2 = Index(charges=q2,flow=1) - # i3 = Index(charges=q3,flow=-1) - # indices=[i1,i2,i3] - # A = BlockSparseTensor.random(indices=indices, dtype=np.complex128) - # A.reshape((D*2, D)) - # def multiply_blocks(blocks): - # for b in blocks.values(): - # np.dot(b.T, b) - # t1s=[] - # t2s=[] - # for n in range(10): - # print(n) - # t1 = time.time() - # b = A.get_diagonal_blocks() - # t1s.append(time.time() - t1) - # t1 = time.time() - # multiply_blocks(b) - # t2s.append(time.time() - t1) - # print('average retrieval time', np.average(t1s)) - # print('average multiplication time',np.average(t2s)) - if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") check_flows(flows) if len(flows) != len(charges): raise ValueError("`len(flows)` is different from `len(charges)`") + #we multiply the flows into the charges row_charges = flows[0] * charges[0] # a list of charges on each row column_charges = flows[1] * charges[1] # a list of charges on each column - #get the unique charges - t1 = time.time() - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - # print('finding unique row charges', time.time() - t1) - # t1 = time.time() - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) - # print('finding unique column charges', time.time() - t1) - # t1 = time.time() - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - # print('finding unique intersections', time.time() - t1) - # t1 = time.time() - #common_charges = np.intersect1d(row_charges, -column_charges) + # we only care about charges common to rows and columns + common_charges = np.unique(np.intersect1d(row_charges, -column_charges)) + row_charges = row_charges[np.isin(row_charges, common_charges)] + column_charges = column_charges[np.isin(column_charges, -common_charges)] - # for each matrix column find the number of non-zero elements in it - # Note: the matrix is assumed to be symmetric, i.e. only elements where - # ingoing and outgoing charge are identical are non-zero + #get the unique charges + unique_row_charges, row_locations, row_dims = np.unique( + row_charges, return_inverse=True, return_counts=True) + unique_column_charges, column_locations, column_dims = np.unique( + column_charges, return_inverse=True, return_counts=True) - # get the degeneracies of each row and column charge + #convenience container for storing the degeneracies of each + #row and column charge row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) - number_of_seen_elements = 0 - #idxs = {c: [] for c in common_charges} - idxs = { - c: np.empty( - row_degeneracies[c] * column_degeneracies[-c], dtype=np.int64) - for c in common_charges - } - idxs_stops = {c: 0 for c in common_charges} - t1 = time.time() - mask = np.isin(column_charges, -common_charges) - masked_charges = column_charges[mask] - print('finding mask', time.time() - t1) - # print(len(column_charges), len(masked_charges)) - t1 = time.time() - elements = {c: np.arange(row_degeneracies[c]) for c in common_charges} - for charge in masked_charges: - # idxs[-charge].append((number_of_seen_elements, - # row_degeneracies[-charge] + number_of_seen_elements)) - - idxs[-charge][ - idxs_stops[-charge]:idxs_stops[-charge] + - row_degeneracies[-charge]] = number_of_seen_elements + elements[-charge] - - # np.arange( - # number_of_seen_elements, - # row_degeneracies[-charge] + number_of_seen_elements) - - number_of_seen_elements += row_degeneracies[-charge] - idxs_stops[-charge] += row_degeneracies[-charge] - print('getting start and stop', time.time() - t1) - # t1 = time.time() - # for charge in masked_charges: - # tmp = np.arange(number_of_seen_elements, - # row_degeneracies[-charge] + number_of_seen_elements) - # number_of_seen_elements += row_degeneracies[-charge] - # print('running the partial loop', time.time() - t1) - - ####################################################################################### - #looks like this takes pretty long for rectangular matrices where shape[1] >> shape[0] - #it's mostly np.arange that causes the overhead. - # t1 = time.time() - # for charge in masked_charges: - # idxs[-charge].append( - # np.arange(number_of_seen_elements, - # row_degeneracies[-charge] + number_of_seen_elements)) - # number_of_seen_elements += row_degeneracies[-charge] - # print('running the full loop', time.time() - t1) - ####################################################################################### - - blocks = {} - if not return_data: - for c, idx in idxs.items(): - #num_elements = np.sum([len(t) for t in idx]) - #indexes = np.empty(num_elements, dtype=np.int64) - #np.concatenate(idx, out=indexes) - blocks[c] = [idx, (row_degeneracies[c], column_degeneracies[-c])] - return blocks - - for c, idx in idxs.items(): - num_elements = np.sum([len(t) for t in idx]) - indexes = np.empty(num_elements, dtype=np.int64) - np.concatenate(idx, out=indexes) - blocks[c] = np.reshape(data[indexes], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def retrieve_non_zero_diagonal_blocks_test( - data: np.ndarray, charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: - """ - For testing purposes. Produces the same output as `retrieve_non_zero_diagonal_blocks`, - but computes it in a different way. - This is currently very slow for high rank tensors with many blocks, but can be faster than - `retrieve_non_zero_diagonal_blocks` in certain other cases. - It's pretty memory heavy too. - """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") - check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #get the unique charges - unique_row_charges, row_dims = np.unique( - flows[0] * charges[0], return_counts=True) - unique_column_charges, column_dims = np.unique( - flows[1] * charges[1], return_counts=True) - - #a 1d array of the net charges. - #this can use a lot of memory - net_charges = fuse_charges( - q1=charges[0], flow1=flows[0], q2=charges[1], flow2=flows[1]) - #a 1d array containing row charges added with zero column charges - #used to find the indices of in data corresponding to a given charge - #(see below) - #this can be very large - tmp = np.tile(charges[0] * flows[0], len(charges[1])) + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_column_charges) which, + #for each charge `c` in `relevant_column_charges` holds the + #row-degeneracy of charge `c` - symmetric_indices = net_charges == 0 - charge_lookup = tmp[symmetric_indices] + degeneracy_vector = row_dims[column_locations] + stop_positions = np.cumsum(degeneracy_vector) - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) blocks = {} - - common_charges = np.intersect1d(unique_row_charges, -unique_column_charges) for c in common_charges: - blocks[c] = np.reshape(data[charge_lookup == c], - (row_degeneracies[c], column_degeneracies[-c])) - + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims( + stop_positions[column_locations == -c] - row_degeneracies[c], 0) + b = np.expand_dims(np.arange(row_degeneracies[c]), 1) + if not return_data: + blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + else: + blocks[c] = np.reshape(data[a + b], + (row_degeneracies[c], column_degeneracies[-c])) return blocks @@ -610,6 +483,16 @@ def transpose(self, order): raise NotImplementedError('transpose is not implemented!!') + def reset_shape(self) -> None: + """ + Bring the tensor back into its elementary shape. + """ + elementary_indices = [] + for i in self.indices: + elementary_indices.extend(i.get_elementary_indices()) + + self.indices = elementary_indices + def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: """ Reshape `tensor` into `shape` in place. @@ -677,6 +560,7 @@ def raise_error(): dense_shape, tuple([e.dimension for e in elementary_indices]))) + self.reset_shape() for n in range(len(dense_shape)): if dense_shape[n] > self.shape[n]: while dense_shape[n] > self.shape[n]: @@ -726,6 +610,33 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: flows=self.flows, return_data=return_data) + def get_diagonal_blocks_deprecated( + self, return_data: Optional[bool] = True) -> Dict: + """ + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + return retrieve_non_zero_diagonal_blocks_deprecated( + data=self.data, + charges=self.charges, + flows=self.flows, + return_data=return_data) + def reshape(tensor: BlockSparseTensor, shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: From 19c3fe8fc12d393aadd0ec92a6de6200feb25687 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 13:41:31 -0500 Subject: [PATCH 046/212] comments --- tensornetwork/block_tensor/block_tensor.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 8c06f5c83..cf6bb8f67 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -547,20 +547,19 @@ def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: index_copy = [i.copy() for i in self.indices] def raise_error(): - #if this error is raised `shape` is incompatible - #with the elementary indices. We have to reset them - #to the original. + #if this error is raised then `shape` is incompatible + #with the elementary indices. We then reset the shape + #to what is was before the call to `reshape`. self.indices = index_copy elementary_indices = [] for i in self.indices: elementary_indices.extend(i.get_elementary_indices()) - print(elementary_indices) raise ValueError("The shape {} is incompatible with the " "elementary shape {} of the tensor.".format( dense_shape, tuple([e.dimension for e in elementary_indices]))) - self.reset_shape() + self.reset_shape() #bring tensor back into its elementary shape for n in range(len(dense_shape)): if dense_shape[n] > self.shape[n]: while dense_shape[n] > self.shape[n]: From 5c8fd3e982aea8d9ca0df0f26c9369f4430fa894 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 13:46:06 -0500 Subject: [PATCH 047/212] default value changed --- tensornetwork/block_tensor/block_tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index cf6bb8f67..cb2976a00 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -240,7 +240,7 @@ def retrieve_non_zero_diagonal_blocks( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: + return_data: Optional[bool] = False) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. From 94c8c2cbe344d83387b6ce2b79bb063ecf48a86a Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 22:09:09 -0500 Subject: [PATCH 048/212] fixed bug, old version is now faster again --- tensornetwork/block_tensor/block_tensor.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index cb2976a00..57d7d8607 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -135,7 +135,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def retrieve_non_zero_diagonal_blocks_deprecated( +def retrieve_non_zero_diagonal_blocks( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], @@ -143,8 +143,6 @@ def retrieve_non_zero_diagonal_blocks_deprecated( """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. - This is a deprecated version which in general performs worse than the - current main implementation. Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -236,7 +234,7 @@ def retrieve_non_zero_diagonal_blocks_deprecated( return blocks -def retrieve_non_zero_diagonal_blocks( +def retrieve_non_zero_diagonal_blocks_deprecated( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], @@ -244,6 +242,9 @@ def retrieve_non_zero_diagonal_blocks( """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. + This is a deprecated version which in general performs worse than the + current main implementation. + Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -287,7 +288,6 @@ def retrieve_non_zero_diagonal_blocks( row_charges, return_inverse=True, return_counts=True) unique_column_charges, column_locations, column_dims = np.unique( column_charges, return_inverse=True, return_counts=True) - #convenience container for storing the degeneracies of each #row and column charge row_degeneracies = dict(zip(unique_row_charges, row_dims)) @@ -300,12 +300,11 @@ def retrieve_non_zero_diagonal_blocks( degeneracy_vector = row_dims[column_locations] stop_positions = np.cumsum(degeneracy_vector) - blocks = {} for c in common_charges: #numpy broadcasting is substantially faster than kron! a = np.expand_dims( - stop_positions[column_locations == -c] - row_degeneracies[c], 0) + stop_positions[column_charges == -c] - row_degeneracies[c], 0) b = np.expand_dims(np.arange(row_degeneracies[c]), 1) if not return_data: blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] From 7eec7f05fcb05b6cd4d3be21608a2f9c711e611e Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 22:13:48 -0500 Subject: [PATCH 049/212] cleaned up reshape --- tensornetwork/block_tensor/block_tensor.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 57d7d8607..a4388ce79 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -571,16 +571,7 @@ def raise_error(): if self.shape[n] > dense_shape[n]: raise_error() elif dense_shape[n] < self.shape[n]: - while dense_shape[n] < self.shape[n]: - #split index at n - try: - i1, i2 = split_index(self.indices.pop(n)) - except ValueError: - raise_error() - self.indices.insert(n, i1) - self.indices.insert(n + 1, i2) - if self.shape[n] < dense_shape[n]: - raise_error() + raise_error() def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ From c188ab9ccbb56a6e9be5bbe11f25cf2e1f7b02b2 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 11 Dec 2019 22:31:15 -0500 Subject: [PATCH 050/212] started adding tests --- tensornetwork/block_tensor/index_test.py | 46 ++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tensornetwork/block_tensor/index_test.py diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py new file mode 100644 index 000000000..ff331a36a --- /dev/null +++ b/tensornetwork/block_tensor/index_test.py @@ -0,0 +1,46 @@ +import numpy as np +# pylint: disable=line-too-long +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies + + +def test_fuse_charges(): + q1 = np.asarray([0, 1]) + q2 = np.asarray([2, 3, 4]) + fused_charges = fuse_charges(q1, 1, q2, 1) + assert np.all(fused_charges == np.asarray([2, 3, 3, 4, 4, 5])) + fused_charges = fuse_charges(q1, 1, q2, -1) + assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3])) + + +def test_index_fusion_mul(): + D = 100 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1)) + + +def test_index_fusion(): + D = 100 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = fuse_index_pair(i1, i2) + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1)) From abdcd8edd906f5936fe817f9704b43e1b8bc465b Mon Sep 17 00:00:00 2001 From: Ashley Milsted Date: Wed, 11 Dec 2019 21:24:37 -0800 Subject: [PATCH 051/212] Quantum abstractions (#379) * Initial attempt at quantum classes. * .tensor() and QuantumIdentity. * check_hilberts -> check_spaces * Add some blurb. * Rename to Qu. * Finish Qu. * Fix matmul in case operators share network components. * Add some scalar methods. * Improve a docstring. * Redo scalars and make identity work using copy tensors. A QuOperator can now have a scalar component (disconnected scalar subnetwork). Also introduce `ignore_edges` for power users. * Remove obsolete parts of QuScalar. * Add contraction stuff. * Add from_tensor() constructors. * Doctstring. * Doc/comments fixes. * Add typing. * Remove some lint. * Fix a bug. * Add very simple constructor tests. * Default edge ordering for eval(). Better docstrings. * A bunch more tests. * tensor_prod -> tensor_product, outer_product * .is_scalar -> is_scalar() etc. * Improve docstrings on axis ordering. * Improve and fix scalar multiplication. * Kill outer_product(). * CopyNode needs a backend and dtype. * Fix __mul__ and add __rmul__. * More docstrings and add axis arguments to vector from_tensor()s. * Add backends to tests. * Delint. * CopyNode should not inflate its tensor just to tell you the dtype. * Correct two docstrings. * Improve some tests. Particulary, test identity some more, since we now try to be efficient with CopyNode identity tensors. * Treat CopyNode identity tensors efficiently. Also rename shape -> space. * Add support for copying CopyNodes. * Propagate output edges properly. * Test that CopyNodes are propagated. Also do a CopyNode sandwich test. * Improve typing. Also more shape -> space. --- tensornetwork/network_components.py | 5 + tensornetwork/network_operations.py | 33 +- tensornetwork/quantum/quantum.py | 641 ++++++++++++++++++++++++++ tensornetwork/quantum/quantum_test.py | 201 ++++++++ 4 files changed, 865 insertions(+), 15 deletions(-) create mode 100644 tensornetwork/quantum/quantum.py create mode 100644 tensornetwork/quantum/quantum_test.py diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index 1c9fa5c45..f8d713f58 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -626,6 +626,11 @@ def __init__(self, backend=backend_obj, shape=(dimension,) * rank) + @property + def dtype(self): + # Override so we don't construct the dense tensor when asked for the dtype! + return self.copy_node_dtype + def get_tensor(self) -> Tensor: return self.tensor diff --git a/tensornetwork/network_operations.py b/tensornetwork/network_operations.py index 0d93f45f4..fa718a430 100644 --- a/tensornetwork/network_operations.py +++ b/tensornetwork/network_operations.py @@ -125,23 +125,29 @@ def copy(nodes: Iterable[BaseNode], node_dict: A dictionary mapping the nodes to their copies. edge_dict: A dictionary mapping the edges to their copies. """ - #TODO: add support for copying CopyTensor - if conjugate: - node_dict = { - node: Node( + node_dict = {} + for node in nodes: + if isinstance(node, CopyNode): + node_dict[node] = CopyNode( + node.rank, + node.dimension, + name=node.name, + axis_names=node.axis_names, + backend=node.backend, + dtype=node.dtype) + else: + if conjugate: + node_dict[node] = Node( node.backend.conj(node.tensor), name=node.name, axis_names=node.axis_names, - backend=node.backend) for node in nodes - } - else: - node_dict = { - node: Node( + backend=node.backend) + else: + node_dict[node] = Node( node.tensor, name=node.name, axis_names=node.axis_names, - backend=node.backend) for node in nodes - } + backend=node.backend) edge_dict = {} for edge in get_all_edges(nodes): node1 = edge.node1 @@ -184,9 +190,6 @@ def remove_node(node: BaseNode) -> Tuple[Dict[Text, Edge], Dict[int, Edge]]: the newly broken edges. disconnected_edges_by_axis: A Dictionary mapping `node`'s axis numbers to the newly broken edges. - - Raises: - ValueError: If the node isn't in the network. """ disconnected_edges_by_name = {} disconnected_edges_by_axis = {} @@ -607,7 +610,7 @@ def reachable(inputs: Union[BaseNode, Iterable[BaseNode], Edge, Iterable[Edge]] Args: inputs: A `BaseNode`/`Edge` or collection of `BaseNodes`/`Edges` Returns: - A list of `BaseNode` objects that can be reached from `node` + A set of `BaseNode` objects that can be reached from `node` via connected edges. Raises: ValueError: If an unknown value for `strategy` is passed. diff --git a/tensornetwork/quantum/quantum.py b/tensornetwork/quantum/quantum.py new file mode 100644 index 000000000..33b1313d5 --- /dev/null +++ b/tensornetwork/quantum/quantum.py @@ -0,0 +1,641 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Abstractions for quantum vectors and operators. + +Quantum mechanics involves a lot of linear algebra on vector spaces that often +have a preferred tensor-product factorization. Tensor networks are a natural +way to represent vectors and operators (matrices) involving these spaces. Hence +we provide some simple abstractions to ease linear algebra operations in which +the vectors and operators are represented by tensor networks. +""" +from typing import Any, Union, Callable, Optional, Sequence, Collection, Text +from typing import Tuple, Set, List, Type +import numpy as np +from tensornetwork.network_components import BaseNode, Node, Edge, connect +from tensornetwork.network_components import CopyNode +from tensornetwork.network_operations import get_all_nodes, copy, reachable +from tensornetwork.network_operations import get_subgraph_dangling, remove_node +from tensornetwork.contractors import greedy +Tensor = Any + + +def quantum_constructor(out_edges: Sequence[Edge], in_edges: Sequence[Edge], + ref_nodes: Optional[Collection[BaseNode]] = None, + ignore_edges: Optional[Collection[Edge]] = None +) -> "QuOperator": + """Constructs an appropriately specialized QuOperator. + + If there are no edges, creates a QuScalar. If the are only output (input) + edges, creates a QuVector (QuAdjointVector). Otherwise creates a + QuOperator. + + Args: + out_edges: output edges. + in_edges: in edges. + ref_nodes: reference nodes for the tensor network (needed if there is a + scalar component). + ignore_edges: edges to ignore when checking the dimensionality of the + tensor network. + Returns: + The object. + """ + if len(out_edges) == 0 and len(in_edges) == 0: + return QuScalar(ref_nodes, ignore_edges) + if len(out_edges) == 0: + return QuAdjointVector(in_edges, ref_nodes, ignore_edges) + if len(in_edges) == 0: + return QuVector(out_edges, ref_nodes, ignore_edges) + return QuOperator(out_edges, in_edges, ref_nodes, ignore_edges) + + +def identity(space: Sequence[int], backend: Optional[Text] = None, + dtype: Type[np.number] = np.float64) -> "QuOperator": + """Construct a `QuOperator` representing the identity on a given space. + + Internally, this is done by constructing `CopyNode`s for each edge, with + dimension according to `space`. + + Args: + space: A sequence of integers for the dimensions of the tensor product + factors of the space (the edges in the tensor network). + backend: Optionally specify the backend to use for computations. + dtype: The data type (for conversion to dense). + Returns: + The desired identity operator. + """ + nodes = [CopyNode(2, d, backend=backend, dtype=dtype) for d in space] + out_edges = [n[0] for n in nodes] + in_edges = [n[1] for n in nodes] + return quantum_constructor(out_edges, in_edges) + + +def check_spaces(edges_1: Sequence[Edge], edges_2: Sequence[Edge]) -> None: + """Check the vector spaces represented by two lists of edges are compatible. + + The number of edges must be the same and the dimensions of each pair of edges + must match. Otherwise, an exception is raised. + + Args: + edges_1: List of edges representing a many-body Hilbert space. + edges_2: List of edges representing a many-body Hilbert space. + """ + if len(edges_1) != len(edges_2): + raise ValueError("Hilbert-space mismatch: Cannot connect {} subsystems " + "with {} subsystems.".format(len(edges_1), len(edges_2))) + + for (i, (e1, e2)) in enumerate(zip(edges_1, edges_2)): + if e1.dimension != e2.dimension: + raise ValueError("Hilbert-space mismatch on subsystems {}: Input " + "dimension {} != output dimension {}.".format( + i, e1.dimension, e2.dimension)) + + +def eliminate_identities(nodes: Collection[BaseNode]) -> Tuple[dict, dict]: + """Eliminates any connected CopyNodes that are identity matrices. + + This will modify the network represented by `nodes`. + Only identities that are connected to other nodes are eliminated. + + Args: + nodes: Collection of nodes to search. + Returns: + nodes_dict: Dictionary mapping remaining Nodes to any replacements. + dangling_edges_dict: Dictionary specifying all dangling-edge replacements. + """ + nodes_dict = {} + dangling_edges_dict = {} + for n in nodes: + if isinstance(n, CopyNode) and n.get_rank() == 2 and not ( + n[0].is_dangling() and n[1].is_dangling()): + old_edges = [n[0], n[1]] + _, new_edges = remove_node(n) + if 0 in new_edges and 1 in new_edges: + e = connect(new_edges[0], new_edges[1]) + elif 0 in new_edges: # 1 was dangling + dangling_edges_dict[old_edges[1]] = new_edges[0] + elif 1 in new_edges: # 0 was dangling + dangling_edges_dict[old_edges[0]] = new_edges[1] + else: + # Trace of identity, so replace with a scalar node! + d = n.get_dimension(0) + # NOTE: Assume CopyNodes have numpy dtypes. + nodes_dict[n] = Node(np.array(d, dtype=n.dtype), backend=n.backend) + else: + for e in n.get_all_dangling(): + dangling_edges_dict[e] = e + nodes_dict[n] = n + + return nodes_dict, dangling_edges_dict + + +class QuOperator(): + """Represents a linear operator via a tensor network. + + To interpret a tensor network as a linear operator, some of the dangling + edges must be designated as `out_edges` (output edges) and the rest as + `in_edges` (input edges). + + Considered as a matrix, the `out_edges` represent the row index and the + `in_edges` represent the column index. + + The (right) action of the operator on another then consists of connecting + the `in_edges` of the first operator to the `out_edges` of the second. + + Can be used to do simple linear algebra with tensor networks. + """ + __array_priority__ = 100.0 # for correct __rmul__ with scalar ndarrays + + def __init__(self, out_edges: Sequence[Edge], in_edges: Sequence[Edge], + ref_nodes: Optional[Collection[BaseNode]] = None, + ignore_edges: Optional[Collection[Edge]] = None) -> None: + """Creates a new `QuOperator` from a tensor network. + + This encapsulates an existing tensor network, interpreting it as a linear + operator. + + The network is checked for consistency: All dangling edges must either be + in `out_edges`, `in_edges`, or `ignore_edges`. + + Args: + out_edges: The edges of the network to be used as the output edges. + in_edges: The edges of the network to be used as the input edges. + ref_nodes: Nodes used to refer to parts of the tensor network that are + not connected to any input or output edges (for example: a scalar + factor). + ignore_edges: Optional collection of dangling edges to ignore when + performing consistency checks. + """ + # TODO: Decide whether the user must also supply all nodes involved. + # This would enable extra error checking and is probably clearer + # than `ref_nodes`. + if len(in_edges) == 0 and len(out_edges) == 0 and not ref_nodes: + raise ValueError("At least one reference node is required to specify a " + "scalar. None provided!") + self.out_edges = list(out_edges) + self.in_edges = list(in_edges) + self.ignore_edges = set(ignore_edges) if ignore_edges else set() + self.ref_nodes = set(ref_nodes) if ref_nodes else set() + self.check_network() + + @classmethod + def from_tensor(cls, tensor: Tensor, out_axes: Sequence[int], + in_axes: Sequence[int], backend: Optional[Text] = None + ) -> "QuOperator": + """Construct a `QuOperator` directly from a single tensor. + + This first wraps the tensor in a `Node`, then constructs the `QuOperator` + from that `Node`. + + Args: + tensor: The tensor. + out_axes: The axis indices of `tensor` to use as `out_edges`. + in_axes: The axis indices of `tensor` to use as `in_edges`. + backend: Optionally specify the backend to use for computations. + Returns: + The new operator. + """ + n = Node(tensor, backend=backend) + out_edges = [n[i] for i in out_axes] + in_edges = [n[i] for i in in_axes] + return cls(out_edges, in_edges, set([n])) + + @property + def nodes(self) -> Set[BaseNode]: + """All tensor-network nodes involved in the operator. + """ + return reachable( + get_all_nodes(self.out_edges + self.in_edges) | self.ref_nodes) + + @property + def in_space(self) -> List[int]: + return [e.dimension for e in self.in_edges] + + @property + def out_space(self) -> List[int]: + return [e.dimension for e in self.out_edges] + + def is_scalar(self) -> bool: + return len(self.out_edges) == 0 and len(self.in_edges) == 0 + + def is_vector(self) -> bool: + return len(self.out_edges) > 0 and len(self.in_edges) == 0 + + def is_adjoint_vector(self) -> bool: + return len(self.out_edges) == 0 and len(self.in_edges) > 0 + + def check_network(self) -> None: + """Check that the network has the expected dimensionality. + + This checks that all input and output edges are dangling and that there + are no other dangling edges (except any specified in `ignore_edges`). + If not, an exception is raised. + """ + for (i, e) in enumerate(self.out_edges): + if not e.is_dangling(): + raise ValueError("Output edge {} is not dangling!".format(i)) + for (i, e) in enumerate(self.in_edges): + if not e.is_dangling(): + raise ValueError("Input edge {} is not dangling!".format(i)) + for e in self.ignore_edges: + if not e.is_dangling(): + raise ValueError("ignore_edges contains non-dangling edge: {}".format( + str(e))) + + known_edges = set(self.in_edges) | set(self.out_edges) | self.ignore_edges + all_dangling_edges = get_subgraph_dangling(self.nodes) + if known_edges != all_dangling_edges: + raise ValueError("The network includes unexpected dangling edges (that " + "are not members of ignore_edges).") + + def adjoint(self) -> "QuOperator": + """The adjoint of the operator. + + This creates a new `QuOperator` with complex-conjugate copies of all + tensors in the network and with the input and output edges switched. + """ + nodes_dict, edge_dict = copy(self.nodes, True) + out_edges = [edge_dict[e] for e in self.in_edges] + in_edges = [edge_dict[e] for e in self.out_edges] + ref_nodes = [nodes_dict[n] for n in self.ref_nodes] + ignore_edges = [edge_dict[e] for e in self.ignore_edges] + return quantum_constructor( + out_edges, in_edges, ref_nodes, ignore_edges) + + def trace(self) -> "QuOperator": + """The trace of the operator. + """ + return self.partial_trace(range(len(self.in_edges))) + + def norm(self) -> "QuOperator": + """The norm of the operator. + This is the 2-norm (also known as the Frobenius or Hilbert-Schmidt norm). + """ + return (self.adjoint() @ self).trace() + + def partial_trace(self, subsystems_to_trace_out: Collection[int] + ) -> "QuOperator": + """The partial trace of the operator. + + Subsystems to trace out are supplied as indices, so that dangling edges + are connected to eachother as: + `out_edges[i] ^ in_edges[i] for i in subsystems_to_trace_out` + + This does not modify the original network. The original ordering of the + remaining subsystems is maintained. + + Args: + subsystems_to_trace_out: Indices of subsystems to trace out. + Returns: + A new QuOperator or QuScalar representing the result. + """ + out_edges_trace = [self.out_edges[i] for i in subsystems_to_trace_out] + in_edges_trace = [self.in_edges[i] for i in subsystems_to_trace_out] + + check_spaces(in_edges_trace, out_edges_trace) + + nodes_dict, edge_dict = copy(self.nodes, False) + for (e1, e2) in zip(out_edges_trace, in_edges_trace): + edge_dict[e1] = edge_dict[e1] ^ edge_dict[e2] + + # get leftover edges in the original order + out_edges_trace = set(out_edges_trace) + in_edges_trace = set(in_edges_trace) + out_edges = [edge_dict[e] for e in self.out_edges + if e not in out_edges_trace] + in_edges = [edge_dict[e] for e in self.in_edges + if e not in in_edges_trace] + ref_nodes = [n for _, n in nodes_dict.items()] + ignore_edges = [edge_dict[e] for e in self.ignore_edges] + + return quantum_constructor(out_edges, in_edges, ref_nodes, ignore_edges) + + def __matmul__(self, other: "QuOperator") -> "QuOperator": + """The action of this operator on another. + + Given `QuOperator`s `A` and `B`, produces a new `QuOperator` for `A @ B`, + where `A @ B` means: "the action of A, as a linear operator, on B". + + Under the hood, this produces copies of the tensor networks defining `A` + and `B` and then connects the copies by hooking up the `in_edges` of + `A.copy()` to the `out_edges` of `B.copy()`. + """ + check_spaces(self.in_edges, other.out_edges) + + # Copy all nodes involved in the two operators. + # We must do this separately for self and other, in case self and other + # are defined via the same network components (e.g. if self === other). + nodes_dict1, edges_dict1 = copy(self.nodes, False) + nodes_dict2, edges_dict2 = copy(other.nodes, False) + + # connect edges to create network for the result + for (e1, e2) in zip(self.in_edges, other.out_edges): + _ = edges_dict1[e1] ^ edges_dict2[e2] + + in_edges = [edges_dict2[e] for e in other.in_edges] + out_edges = [edges_dict1[e] for e in self.out_edges] + ref_nodes = ([n for _, n in nodes_dict1.items()] + + [n for _, n in nodes_dict2.items()]) + ignore_edges = ([edges_dict1[e] for e in self.ignore_edges] + + [edges_dict2[e] for e in other.ignore_edges]) + + return quantum_constructor(out_edges, in_edges, ref_nodes, ignore_edges) + + def __mul__(self, other: Union["QuOperator", BaseNode, Tensor] + ) -> "QuOperator": + """Scalar multiplication of operators. + + Given two operators `A` and `B`, one of the which is a scalar (it has no + input or output edges), `A * B` produces a new operator representing the + scalar multiplication of `A` and `B`. + + For convenience, one of `A` or `B` may be a number or scalar-valued tensor + or `Node` (it will automatically be wrapped in a `QuScalar`). + + Note: This is a special case of `tensor_product()`. + """ + if not isinstance(other, QuOperator): + if isinstance(other, BaseNode): + node = other + else: + node = Node(other, backend=self.nodes.pop().backend) + if node.shape: + raise ValueError("Cannot perform elementwise multiplication by a " + "non-scalar tensor.") + other = QuScalar([node]) + + if self.is_scalar() or other.is_scalar(): + return self.tensor_product(other) + + raise ValueError("Elementwise multiplication is only supported if at " + "least one of the arguments is a scalar.") + + def __rmul__(self, other: Union["QuOperator", BaseNode, Tensor]) -> "QuOperator": + """Scalar multiplication of operators. See `.__mul__()`. + """ + return self.__mul__(other) + + def tensor_product(self, other: "QuOperator") -> "QuOperator": + """Tensor product with another operator. + + Given two operators `A` and `B`, produces a new operator `AB` representing + `A` ⊗ `B`. The `out_edges` (`in_edges`) of `AB` is simply the + concatenation of the `out_edges` (`in_edges`) of `A.copy()` with that of + `B.copy()`: + + `new_out_edges = [*out_edges_A_copy, *out_edges_B_copy]` + `new_in_edges = [*in_edges_A_copy, *in_edges_B_copy]` + + Args: + other: The other operator (`B`). + Returns: + The result (`AB`). + """ + nodes_dict1, edges_dict1 = copy(self.nodes, False) + nodes_dict2, edges_dict2 = copy(other.nodes, False) + + in_edges = ([edges_dict1[e] for e in self.in_edges] + + [edges_dict2[e] for e in other.in_edges]) + out_edges = ([edges_dict1[e] for e in self.out_edges] + + [edges_dict2[e] for e in other.out_edges]) + ref_nodes = ([n for _, n in nodes_dict1.items()] + + [n for _, n in nodes_dict2.items()]) + ignore_edges = ([edges_dict1[e] for e in self.ignore_edges] + + [edges_dict2[e] for e in other.ignore_edges]) + + return quantum_constructor(out_edges, in_edges, ref_nodes, ignore_edges) + + def contract(self, contractor: Callable = greedy, + final_edge_order: Optional[Sequence[Edge]] = None + ) -> "QuOperator": + """Contract the tensor network in place. + + This modifies the tensor network representation of the operator (or vector, + or scalar), reducing it to a single tensor, without changing the value. + + Args: + contractor: A function that performs the contraction. Defaults to + `greedy`, which uses the greedy algorithm from `opt_einsum` to + determine a contraction order. + final_edge_order: Manually specify the axis ordering of the final tensor. + Returns: + The present object. + """ + nodes_dict, dangling_edges_dict = eliminate_identities(self.nodes) + self.in_edges = [dangling_edges_dict[e] for e in self.in_edges] + self.out_edges = [dangling_edges_dict[e] for e in self.out_edges] + self.ignore_edges = set(dangling_edges_dict[e] for e in self.ignore_edges) + self.ref_nodes = set( + nodes_dict[n] for n in self.ref_nodes if n in nodes_dict) + self.check_network() + + if final_edge_order: + final_edge_order = [dangling_edges_dict[e] for e in final_edge_order] + self.ref_nodes = set( + [contractor(self.nodes, output_edge_order=final_edge_order)]) + else: + self.ref_nodes = set([contractor(self.nodes, ignore_edge_order=True)]) + return self + + def eval(self, contractor: Callable = greedy, + final_edge_order: Optional[Sequence[Edge]] = None) -> Tensor: + """Contracts the tensor network in place and returns the final tensor. + + Note that this modifies the tensor network representing the operator. + + The default ordering for the axes of the final tensor is: + `*out_edges, *in_edges`. + + If there are any "ignored" edges, their axes come first: + `*ignored_edges, *out_edges, *in_edges`. + + Args: + contractor: A function that performs the contraction. Defaults to + `greedy`, which uses the greedy algorithm from `opt_einsum` to + determine a contraction order. + final_edge_order: Manually specify the axis ordering of the final tensor. + The default ordering is determined by `out_edges` and `in_edges` (see + above). + Returns: + The final tensor representing the operator. + """ + if not final_edge_order: + final_edge_order = (list(self.ignore_edges) + self.out_edges + + self.in_edges) + self.contract(contractor, final_edge_order) + nodes = self.nodes + if len(nodes) != 1: + raise ValueError("Node count '{}' > 1 after contraction!".format( + len(nodes))) + return list(nodes)[0].tensor + + +class QuVector(QuOperator): + """Represents a (column) vector via a tensor network. + """ + def __init__(self, subsystem_edges: Sequence[Edge], + ref_nodes: Optional[Collection[BaseNode]] = None, + ignore_edges: Optional[Collection[Edge]] = None) -> None: + """Constructs a new `QuVector` from a tensor network. + + This encapsulates an existing tensor network, interpreting it as a (column) + vector. + + Args: + subsystem_edges: The edges of the network to be used as the output edges. + ref_nodes: Nodes used to refer to parts of the tensor network that are + not connected to any input or output edges (for example: a scalar + factor). + ignore_edges: Optional collection of edges to ignore when performing + consistency checks. + """ + super().__init__(subsystem_edges, [], ref_nodes, ignore_edges) + + @classmethod + def from_tensor(cls, tensor: Tensor, + subsystem_axes: Optional[Sequence[int]] = None, + backend: Optional[Text] = None) -> "QuVector": + """Construct a `QuVector` directly from a single tensor. + + This first wraps the tensor in a `Node`, then constructs the `QuVector` + from that `Node`. + + Args: + tensor: The tensor. + subsystem_axes: Sequence of integer indices specifying the order in which + to interpret the axes as subsystems (output edges). If not specified, + the axes are taken in ascending order. + backend: Optionally specify the backend to use for computations. + Returns: + The new operator. + """ + n = Node(tensor, backend=backend) + if subsystem_axes is not None: + subsystem_edges = [n[i] for i in subsystem_axes] + else: + subsystem_edges = n.get_all_edges() + return cls(subsystem_edges) + + @property + def subsystem_edges(self) -> List[Edge]: + return self.out_edges + + @property + def space(self) -> List[int]: + return self.out_space + + def projector(self) -> "QuOperator": + return self @ self.adjoint() + + def reduced_density(self, subsystems_to_trace_out: Collection[int] + ) -> "QuOperator": + rho = self.projector() + return rho.partial_trace(subsystems_to_trace_out) + + +class QuAdjointVector(QuOperator): + """Represents an adjoint (row) vector via a tensor network. + """ + def __init__(self, subsystem_edges: Sequence[Edge], + ref_nodes: Optional[Collection[BaseNode]] = None, + ignore_edges: Optional[Collection[Edge]] = None) -> None: + """Constructs a new `QuAdjointVector` from a tensor network. + + This encapsulates an existing tensor network, interpreting it as an adjoint + vector (row vector). + + Args: + subsystem_edges: The edges of the network to be used as the input edges. + ref_nodes: Nodes used to refer to parts of the tensor network that are + not connected to any input or output edges (for example: a scalar + factor). + ignore_edges: Optional collection of edges to ignore when performing + consistency checks. + """ + super().__init__([], subsystem_edges, ref_nodes, ignore_edges) + + @classmethod + def from_tensor(cls, tensor: Tensor, + subsystem_axes: Optional[Sequence[int]] = None, + backend: Optional[Text] = None) -> "QuAdjointVector": + """Construct a `QuAdjointVector` directly from a single tensor. + + This first wraps the tensor in a `Node`, then constructs the + `QuAdjointVector` from that `Node`. + + Args: + tensor: The tensor. + subsystem_axes: Sequence of integer indices specifying the order in which + to interpret the axes as subsystems (input edges). If not specified, + the axes are taken in ascending order. + backend: Optionally specify the backend to use for computations. + Returns: + The new operator. + """ + n = Node(tensor, backend=backend) + if subsystem_axes is not None: + subsystem_edges = [n[i] for i in subsystem_axes] + else: + subsystem_edges = n.get_all_edges() + return cls(subsystem_edges) + + @property + def subsystem_edges(self) -> List[Edge]: + return self.in_edges + + @property + def space(self) -> List[int]: + return self.in_space + + def projector(self) -> "QuOperator": + return self.adjoint() @ self + + def reduced_density(self, subsystems_to_trace_out: Collection[int] + ) -> "QuOperator": + rho = self.projector() + return rho.partial_trace(subsystems_to_trace_out) + + +class QuScalar(QuOperator): + """Represents a scalar via a tensor network. + """ + def __init__(self, ref_nodes: Collection[BaseNode], + ignore_edges: Optional[Collection[Edge]] = None) -> None: + """Constructs a new `QuScalar` from a tensor network. + + This encapsulates an existing tensor network, interpreting it as a scalar. + + Args: + ref_nodes: Nodes used to refer to the tensor network (need not be + exhaustive - one node from each disconnected subnetwork is sufficient). + ignore_edges: Optional collection of edges to ignore when performing + consistency checks. + """ + super().__init__([], [], ref_nodes, ignore_edges) + + @classmethod + def from_tensor(cls, tensor: Tensor, backend: Optional[Text] = None + ) -> "QuScalar": + """Construct a `QuScalar` directly from a single tensor. + + This first wraps the tensor in a `Node`, then constructs the + `QuScalar` from that `Node`. + + Args: + tensor: The tensor. + backend: Optionally specify the backend to use for computations. + Returns: + The new operator. + """ + n = Node(tensor, backend=backend) + return cls(set([n])) diff --git a/tensornetwork/quantum/quantum_test.py b/tensornetwork/quantum/quantum_test.py new file mode 100644 index 000000000..4a9ae113d --- /dev/null +++ b/tensornetwork/quantum/quantum_test.py @@ -0,0 +1,201 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import numpy as np +import tensornetwork as tn +import quantum as qu + + +def test_constructor(backend): + psi_tensor = np.random.rand(2, 2) + psi_node = tn.Node(psi_tensor, backend=backend) + + op = qu.quantum_constructor([psi_node[0]], [psi_node[1]]) + assert not op.is_scalar() + assert not op.is_vector() + assert not op.is_adjoint_vector() + assert len(op.out_edges) == 1 + assert len(op.in_edges) == 1 + assert op.out_edges[0] is psi_node[0] + assert op.in_edges[0] is psi_node[1] + + op = qu.quantum_constructor([psi_node[0], psi_node[1]], []) + assert not op.is_scalar() + assert op.is_vector() + assert not op.is_adjoint_vector() + assert len(op.out_edges) == 2 + assert len(op.in_edges) == 0 + assert op.out_edges[0] is psi_node[0] + assert op.out_edges[1] is psi_node[1] + + op = qu.quantum_constructor([], [psi_node[0], psi_node[1]]) + assert not op.is_scalar() + assert not op.is_vector() + assert op.is_adjoint_vector() + assert len(op.out_edges) == 0 + assert len(op.in_edges) == 2 + assert op.in_edges[0] is psi_node[0] + assert op.in_edges[1] is psi_node[1] + + with pytest.raises(ValueError): + op = qu.quantum_constructor([], [], [psi_node]) + + _ = psi_node[0] ^ psi_node[1] + op = qu.quantum_constructor([], [], [psi_node]) + assert op.is_scalar() + assert not op.is_vector() + assert not op.is_adjoint_vector() + assert len(op.out_edges) == 0 + assert len(op.in_edges) == 0 + + +def test_checks(backend): + node1 = tn.Node(np.random.rand(2, 2), backend=backend) + node2 = tn.Node(np.random.rand(2, 2), backend=backend) + _ = node1[1] ^ node2[0] + + # extra dangling edges must be explicitly ignored + with pytest.raises(ValueError): + _ = qu.QuVector([node1[0]]) + + # correctly ignore the extra edge + _ = qu.QuVector([node1[0]], ignore_edges=[node2[1]]) + + # in/out edges must be dangling + with pytest.raises(ValueError): + _ = qu.QuVector([node1[0], node1[1], node2[1]]) + + +def test_from_tensor(backend): + psi_tensor = np.random.rand(2, 2) + + op = qu.QuOperator.from_tensor(psi_tensor, [0], [1], backend=backend) + assert not op.is_scalar() + assert not op.is_vector() + assert not op.is_adjoint_vector() + np.testing.assert_almost_equal(op.eval(), psi_tensor) + + op = qu.QuVector.from_tensor(psi_tensor, [0, 1], backend=backend) + assert not op.is_scalar() + assert op.is_vector() + assert not op.is_adjoint_vector() + np.testing.assert_almost_equal(op.eval(), psi_tensor) + + op = qu.QuAdjointVector.from_tensor(psi_tensor, [0, 1], backend=backend) + assert not op.is_scalar() + assert not op.is_vector() + assert op.is_adjoint_vector() + np.testing.assert_almost_equal(op.eval(), psi_tensor) + + op = qu.QuScalar.from_tensor(1.0, backend=backend) + assert op.is_scalar() + assert not op.is_vector() + assert not op.is_adjoint_vector() + assert op.eval() == 1.0 + + +def test_identity(backend): + E = qu.identity((2, 3, 4), backend=backend) + for n in E.nodes: + assert isinstance(n, tn.CopyNode) + twentyfour = E.trace() + for n in twentyfour.nodes: + assert isinstance(n, tn.CopyNode) + assert twentyfour.eval() == 24 + + tensor = np.random.rand(2, 2) + psi = qu.QuVector.from_tensor(tensor, backend=backend) + E = qu.identity((2, 2), backend=backend) + np.testing.assert_allclose((E @ psi).eval(), psi.eval()) + + np.testing.assert_allclose( + (psi.adjoint() @ E @ psi).eval(), psi.norm().eval()) + + op = qu.QuOperator.from_tensor(tensor, [0], [1], backend=backend) + op_I = op.tensor_product(E) + op_times_4 = op_I.partial_trace([1, 2]) + np.testing.assert_allclose(op_times_4.eval(), 4 * op.eval()) + + +def test_tensor_product(backend): + psi = qu.QuVector.from_tensor(np.random.rand(2, 2), backend=backend) + psi_psi = psi.tensor_product(psi) + assert len(psi_psi.subsystem_edges) == 4 + np.testing.assert_almost_equal(psi_psi.norm().eval(), psi.norm().eval()**2) + + +def test_matmul(backend): + mat = np.random.rand(2, 2) + op = qu.QuOperator.from_tensor(mat, [0], [1], backend=backend) + res = (op @ op).eval() + np.testing.assert_allclose(res, mat @ mat) + + +def test_mul(backend): + mat = np.eye(2) + scal = np.float64(0.5) + op = qu.QuOperator.from_tensor(mat, [0], [1], backend=backend) + scal_op = qu.QuScalar.from_tensor(scal, backend=backend) + + res = (op * scal_op).eval() + np.testing.assert_allclose(res, mat * 0.5) + + res = (scal_op * op).eval() + np.testing.assert_allclose(res, mat * 0.5) + + res = (scal_op * scal_op).eval() + np.testing.assert_almost_equal(res, 0.25) + + res = (op * np.float64(0.5)).eval() + np.testing.assert_allclose(res, mat * 0.5) + + res = (np.float64(0.5) * op).eval() + np.testing.assert_allclose(res, mat * 0.5) + + with pytest.raises(ValueError): + _ = (op * op) + + with pytest.raises(ValueError): + _ = (op * mat) + +def test_expectations(backend): + if backend == 'pytorch': + psi_tensor = np.random.rand(2, 2, 2) + op_tensor = np.random.rand(2, 2) + else: + psi_tensor = np.random.rand(2, 2, 2) + 1.j * np.random.rand(2, 2, 2) + op_tensor = np.random.rand(2, 2) + 1.j * np.random.rand(2, 2) + + psi = qu.QuVector.from_tensor(psi_tensor, backend=backend) + op = qu.QuOperator.from_tensor(op_tensor, [0], [1], backend=backend) + + op_3 = op.tensor_product( + qu.identity((2, 2), backend=backend, dtype=psi_tensor.dtype)) + res1 = (psi.adjoint() @ op_3 @ psi).eval() + + rho_1 = psi.reduced_density([1, 2]) # trace out sites 2 and 3 + res2 = (op @ rho_1).trace().eval() + + np.testing.assert_almost_equal(res1, res2) + + +def test_projector(backend): + psi_tensor = np.random.rand(2, 2) + psi_tensor /= np.linalg.norm(psi_tensor) + psi = qu.QuVector.from_tensor(psi_tensor, backend=backend) + P = psi.projector() + np.testing.assert_allclose((P @ psi).eval(), psi_tensor) + + np.testing.assert_allclose((P @ P).eval(), P.eval()) From a8ab55ad1858c99d194924b6efb2373bd67a700e Mon Sep 17 00:00:00 2001 From: Ivan PANICO Date: Thu, 12 Dec 2019 10:47:14 -0800 Subject: [PATCH 052/212] adding random uniform initialization (#412) * adding random uniform initialization * fixes dumb pylint * couple of nit picks --- tensornetwork/backends/base_backend.py | 21 ++++++++ tensornetwork/backends/jax/jax_backend.py | 36 ++++++++++++++ .../backends/jax/jax_backend_test.py | 49 +++++++++++++++++++ tensornetwork/backends/numpy/numpy_backend.py | 18 +++++++ .../backends/numpy/numpy_backend_test.py | 48 ++++++++++++++++++ .../backends/pytorch/pytorch_backend.py | 10 ++++ .../backends/pytorch/pytorch_backend_test.py | 41 ++++++++++++++++ tensornetwork/backends/shell/shell_backend.py | 7 +++ .../backends/shell/shell_backend_test.py | 5 ++ .../backends/tensorflow/tensorflow_backend.py | 20 ++++++++ .../tensorflow/tensorflow_backend_test.py | 42 ++++++++++++++++ 11 files changed, 297 insertions(+) diff --git a/tensornetwork/backends/base_backend.py b/tensornetwork/backends/base_backend.py index 55b9f40ea..1d9d249b7 100644 --- a/tensornetwork/backends/base_backend.py +++ b/tensornetwork/backends/base_backend.py @@ -265,6 +265,27 @@ def randn(self, raise NotImplementedError("Backend '{}' has not implemented randn.".format( self.name)) + def random_uniform(self, + shape: Tuple[int, ...], + boundaries: Optional[Tuple[float, float]] = (0.0, 1.0), + dtype: Optional[Type[np.number]] = None, + seed: Optional[int] = None) -> Tensor: + """Return a random uniform matrix of dimension `dim`. + Depending on specific backends, `dim` has to be either an int + (numpy, torch, tensorflow) or a `ShapeType` object + (for block-sparse backends). Block-sparse + behavior is currently not supported + Args: + shape (int): The dimension of the returned matrix. + boundaries (tuple): The boundaries of the uniform distribution. + dtype: The dtype of the returned matrix. + seed: The seed for the random number generator + Returns: + Tensor : random uniform initialized tensor. + """ + raise NotImplementedError(("Backend '{}' has not implemented " + "random_uniform.").format(self.name)) + def conj(self, tensor: Tensor) -> Tensor: """ Return the complex conjugate of `tensor` diff --git a/tensornetwork/backends/jax/jax_backend.py b/tensornetwork/backends/jax/jax_backend.py index f9235391a..9773f6026 100644 --- a/tensornetwork/backends/jax/jax_backend.py +++ b/tensornetwork/backends/jax/jax_backend.py @@ -72,6 +72,42 @@ def cmplx_randn(complex_dtype, real_dtype): return self.jax.random.normal(key, shape).astype(dtype) + def random_uniform(self, + shape: Tuple[int, ...], + boundaries: Optional[Tuple[float, float]] = (0.0, 1.0), + dtype: Optional[np.dtype] = None, + seed: Optional[int] = None) -> Tensor: + if not seed: + seed = np.random.randint(0, 2**63) + key = self.jax.random.PRNGKey(seed) + + dtype = dtype if dtype is not None else np.dtype(np.float64) + + def cmplx_random_uniform(complex_dtype, real_dtype): + real_dtype = np.dtype(real_dtype) + complex_dtype = np.dtype(complex_dtype) + + key_2 = self.jax.random.PRNGKey(seed + 1) + + real_part = self.jax.random.uniform(key, shape, dtype=real_dtype, + minval=boundaries[0], + maxval=boundaries[1]) + complex_part = self.jax.random.uniform(key_2, shape, dtype=real_dtype, + minval=boundaries[0], + maxval=boundaries[1]) + unit = ( + np.complex64(1j) + if complex_dtype == np.dtype(np.complex64) else np.complex128(1j)) + return real_part + unit * complex_part + + if np.dtype(dtype) is np.dtype(self.np.complex128): + return cmplx_random_uniform(dtype, self.np.float64) + if np.dtype(dtype) is np.dtype(self.np.complex64): + return cmplx_random_uniform(dtype, self.np.float32) + + return self.jax.random.uniform(key, shape, minval=boundaries[0], + maxval=boundaries[1]).astype(dtype) + def eigs(self, A: Callable, initial_state: Optional[Tensor] = None, diff --git a/tensornetwork/backends/jax/jax_backend_test.py b/tensornetwork/backends/jax/jax_backend_test.py index fbed4eebe..08b21059d 100644 --- a/tensornetwork/backends/jax/jax_backend_test.py +++ b/tensornetwork/backends/jax/jax_backend_test.py @@ -161,6 +161,13 @@ def test_randn(dtype): assert a.shape == (4, 4) +@pytest.mark.parametrize("dtype", np_randn_dtypes) +def test_random_uniform(dtype): + backend = jax_backend.JaxBackend() + a = backend.random_uniform((4, 4), dtype=dtype) + assert a.shape == (4, 4) + + @pytest.mark.parametrize("dtype", [np.complex64, np.complex128]) def test_randn_non_zero_imag(dtype): backend = jax_backend.JaxBackend() @@ -168,6 +175,13 @@ def test_randn_non_zero_imag(dtype): assert np.linalg.norm(np.imag(a)) != 0.0 +@pytest.mark.parametrize("dtype", [np.complex64, np.complex128]) +def test_random_uniform_non_zero_imag(dtype): + backend = jax_backend.JaxBackend() + a = backend.random_uniform((4, 4), dtype=dtype) + assert np.linalg.norm(np.imag(a)) != 0.0 + + @pytest.mark.parametrize("dtype", np_dtypes) def test_eye_dtype(dtype): backend = jax_backend.JaxBackend() @@ -196,6 +210,13 @@ def test_randn_dtype(dtype): assert a.dtype == dtype +@pytest.mark.parametrize("dtype", np_randn_dtypes) +def test_random_uniform_dtype(dtype): + backend = jax_backend.JaxBackend() + a = backend.random_uniform((4, 4), dtype=dtype) + assert a.dtype == dtype + + @pytest.mark.parametrize("dtype", np_randn_dtypes) def test_randn_seed(dtype): backend = jax_backend.JaxBackend() @@ -204,6 +225,34 @@ def test_randn_seed(dtype): np.testing.assert_allclose(a, b) +@pytest.mark.parametrize("dtype", np_randn_dtypes) +def test_random_uniform_seed(dtype): + backend = jax_backend.JaxBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), seed=10, dtype=dtype) + np.testing.assert_allclose(a, b) + + +@pytest.mark.parametrize("dtype", np_randn_dtypes) +def test_random_uniform_boundaries(dtype): + lb = 1.2 + ub = 4.8 + backend = jax_backend.JaxBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype) + assert((a >= 0).all() and (a <= 1).all() and + (b >= lb).all() and (b <= ub).all()) + + +def test_random_uniform_behavior(): + seed = 10 + key = jax.random.PRNGKey(seed) + backend = jax_backend.JaxBackend() + a = backend.random_uniform((4, 4), seed=seed) + b = jax.random.uniform(key, (4, 4)) + np.testing.assert_allclose(a, b) + + def test_conj(): backend = jax_backend.JaxBackend() real = np.random.rand(2, 2, 2) diff --git a/tensornetwork/backends/numpy/numpy_backend.py b/tensornetwork/backends/numpy/numpy_backend.py index 7d0527b83..e7c4e8ecd 100644 --- a/tensornetwork/backends/numpy/numpy_backend.py +++ b/tensornetwork/backends/numpy/numpy_backend.py @@ -132,6 +132,24 @@ def randn(self, dtype) + 1j * self.np.random.randn(*shape).astype(dtype) return self.np.random.randn(*shape).astype(dtype) + def random_uniform(self, + shape: Tuple[int, ...], + boundaries: Optional[Tuple[float, float]] = (0.0, 1.0), + dtype: Optional[numpy.dtype] = None, + seed: Optional[int] = None) -> Tensor: + + if seed: + self.np.random.seed(seed) + dtype = dtype if dtype is not None else self.np.float64 + if ((self.np.dtype(dtype) is self.np.dtype(self.np.complex128)) or + (self.np.dtype(dtype) is self.np.dtype(self.np.complex64))): + return self.np.random.uniform(boundaries[0], boundaries[1], shape).astype( + dtype) + 1j * self.np.random.uniform(boundaries[0], + boundaries[1], + shape).astype(dtype) + return self.np.random.uniform(boundaries[0], + boundaries[1], shape).astype(dtype) + def conj(self, tensor: Tensor) -> Tensor: return self.np.conj(tensor) diff --git a/tensornetwork/backends/numpy/numpy_backend_test.py b/tensornetwork/backends/numpy/numpy_backend_test.py index 55fe9edb6..49645d876 100644 --- a/tensornetwork/backends/numpy/numpy_backend_test.py +++ b/tensornetwork/backends/numpy/numpy_backend_test.py @@ -159,6 +159,13 @@ def test_randn(dtype): assert a.shape == (4, 4) +@pytest.mark.parametrize("dtype", np_dtypes) +def test_random_uniform(dtype): + backend = numpy_backend.NumPyBackend() + a = backend.random_uniform((4, 4), dtype=dtype, seed=10) + assert a.shape == (4, 4) + + @pytest.mark.parametrize("dtype", [np.complex64, np.complex128]) def test_randn_non_zero_imag(dtype): backend = numpy_backend.NumPyBackend() @@ -166,6 +173,13 @@ def test_randn_non_zero_imag(dtype): assert np.linalg.norm(np.imag(a)) != 0.0 +@pytest.mark.parametrize("dtype", [np.complex64, np.complex128]) +def test_random_uniform_non_zero_imag(dtype): + backend = numpy_backend.NumPyBackend() + a = backend.random_uniform((4, 4), dtype=dtype, seed=10) + assert np.linalg.norm(np.imag(a)) != 0.0 + + @pytest.mark.parametrize("dtype", np_dtypes) def test_eye_dtype(dtype): backend = numpy_backend.NumPyBackend() @@ -194,6 +208,13 @@ def test_randn_dtype(dtype): assert a.dtype == dtype +@pytest.mark.parametrize("dtype", np_dtypes) +def test_random_uniform_dtype(dtype): + backend = numpy_backend.NumPyBackend() + a = backend.random_uniform((4, 4), dtype=dtype, seed=10) + assert a.dtype == dtype + + @pytest.mark.parametrize("dtype", np_randn_dtypes) def test_randn_seed(dtype): backend = numpy_backend.NumPyBackend() @@ -202,6 +223,33 @@ def test_randn_seed(dtype): np.testing.assert_allclose(a, b) +@pytest.mark.parametrize("dtype", np_dtypes) +def test_random_uniform_seed(dtype): + backend = numpy_backend.NumPyBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), seed=10, dtype=dtype) + np.testing.assert_allclose(a, b) + + +@pytest.mark.parametrize("dtype", np_randn_dtypes) +def test_random_uniform_boundaries(dtype): + lb = 1.2 + ub = 4.8 + backend = numpy_backend.NumPyBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype) + assert((a >= 0).all() and (a <= 1).all() and + (b >= lb).all() and (b <= ub).all()) + + +def test_random_uniform_behavior(): + backend = numpy_backend.NumPyBackend() + a = backend.random_uniform((4, 4), seed=10) + np.random.seed(10) + b = np.random.uniform(size=(4, 4)) + np.testing.assert_allclose(a, b) + + def test_conj(): backend = numpy_backend.NumPyBackend() real = np.random.rand(2, 2, 2) diff --git a/tensornetwork/backends/pytorch/pytorch_backend.py b/tensornetwork/backends/pytorch/pytorch_backend.py index 9a979b1c8..0caba598a 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend.py +++ b/tensornetwork/backends/pytorch/pytorch_backend.py @@ -128,6 +128,16 @@ def randn(self, dtype = dtype if dtype is not None else self.torch.float64 return self.torch.randn(shape, dtype=dtype) + def random_uniform(self, + shape: Tuple[int, ...], + boundaries: Optional[Tuple[float, float]] = (0.0, 1.0), + dtype: Optional[Any] = None, + seed: Optional[int] = None) -> Tensor: + if seed: + self.torch.manual_seed(seed) + dtype = dtype if dtype is not None else self.torch.float64 + return self.torch.empty(shape, dtype=dtype).uniform_(*boundaries) + def conj(self, tensor: Tensor) -> Tensor: return tensor #pytorch does not support complex dtypes diff --git a/tensornetwork/backends/pytorch/pytorch_backend_test.py b/tensornetwork/backends/pytorch/pytorch_backend_test.py index 5e3ead3f9..ca0cd92f3 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend_test.py +++ b/tensornetwork/backends/pytorch/pytorch_backend_test.py @@ -148,6 +148,13 @@ def test_randn(dtype): assert a.shape == (4, 4) +@pytest.mark.parametrize("dtype", torch_randn_dtypes) +def test_random_uniform(dtype): + backend = pytorch_backend.PyTorchBackend() + a = backend.random_uniform((4, 4), dtype=dtype) + assert a.shape == (4, 4) + + @pytest.mark.parametrize("dtype", torch_eye_dtypes) def test_eye_dtype(dtype): backend = pytorch_backend.PyTorchBackend() @@ -176,6 +183,13 @@ def test_randn_dtype(dtype): assert a.dtype == dtype +@pytest.mark.parametrize("dtype", torch_randn_dtypes) +def test_random_uniform_dtype(dtype): + backend = pytorch_backend.PyTorchBackend() + a = backend.random_uniform((4, 4), dtype=dtype) + assert a.dtype == dtype + + @pytest.mark.parametrize("dtype", torch_randn_dtypes) def test_randn_seed(dtype): backend = pytorch_backend.PyTorchBackend() @@ -184,6 +198,33 @@ def test_randn_seed(dtype): np.testing.assert_allclose(a, b) +@pytest.mark.parametrize("dtype", torch_randn_dtypes) +def test_random_uniform_seed(dtype): + backend = pytorch_backend.PyTorchBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), seed=10, dtype=dtype) + torch.allclose(a, b) + + +@pytest.mark.parametrize("dtype", torch_randn_dtypes) +def test_random_uniform_boundaries(dtype): + lb = 1.2 + ub = 4.8 + backend = pytorch_backend.PyTorchBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype) + assert(torch.ge(a, 0).byte().all() and torch.le(a, 1).byte().all() and + torch.ge(b, lb).byte().all() and torch.le(b, ub).byte().all()) + + +def test_random_uniform_behavior(): + backend = pytorch_backend.PyTorchBackend() + a = backend.random_uniform((4, 4), seed=10) + torch.manual_seed(10) + b = torch.empty((4, 4), dtype=torch.float64).uniform_() + torch.allclose(a, b) + + def test_conj(): backend = pytorch_backend.PyTorchBackend() real = np.random.rand(2, 2, 2) diff --git a/tensornetwork/backends/shell/shell_backend.py b/tensornetwork/backends/shell/shell_backend.py index 4e73f638d..3365fae5e 100644 --- a/tensornetwork/backends/shell/shell_backend.py +++ b/tensornetwork/backends/shell/shell_backend.py @@ -207,6 +207,13 @@ def randn(self, seed: Optional[int] = None) -> Tensor: return ShellTensor(shape) + def random_uniform(self, + shape: Tuple[int, ...], + boundaries: Optional[Tuple[float, float]] = (0.0, 1.0), + dtype: Optional[Type[np.number]] = None, + seed: Optional[int] = None) -> Tensor: + return ShellTensor(shape) + def conj(self, tensor: Tensor) -> Tensor: return tensor diff --git a/tensornetwork/backends/shell/shell_backend_test.py b/tensornetwork/backends/shell/shell_backend_test.py index 45a713965..3974dc1f7 100644 --- a/tensornetwork/backends/shell/shell_backend_test.py +++ b/tensornetwork/backends/shell/shell_backend_test.py @@ -157,6 +157,11 @@ def test_randn(): assertBackendsAgree("randn", args) +def test_random_uniform(): + args = {"shape": (10, 4)} + assertBackendsAgree("random_uniform", args) + + def test_eigsh_lanczos_1(): backend = shell_backend.ShellBackend() D = 16 diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend.py b/tensornetwork/backends/tensorflow/tensorflow_backend.py index 2602984ed..5f7cd1201 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend.py @@ -131,6 +131,26 @@ def randn(self, self.tf.random.normal(shape=shape, dtype=dtype.real_dtype)) return self.tf.random.normal(shape=shape, dtype=dtype) + def random_uniform(self, + shape: Tuple[int, ...], + boundaries: Optional[Tuple[float, float]] = (0.0, 1.0), + dtype: Optional[Type[np.number]] = None, + seed: Optional[int] = None) -> Tensor: + if seed: + self.tf.random.set_seed(seed) + + dtype = dtype if dtype is not None else self.tf.float64 + if (dtype is self.tf.complex128) or (dtype is self.tf.complex64): + return self.tf.complex( + self.tf.random.uniform(shape=shape, minval=boundaries[0], + maxval=boundaries[1], dtype=dtype.real_dtype), + self.tf.random.uniform(shape=shape, minval=boundaries[0], + maxval=boundaries[1], dtype=dtype.real_dtype)) + self.tf.random.set_seed(10) + a = self.tf.random.uniform(shape=shape, minval=boundaries[0], + maxval=boundaries[1], dtype=dtype) + return a + def conj(self, tensor: Tensor) -> Tensor: return self.tf.math.conj(tensor) diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py index 8df3fcce0..25110d66c 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py @@ -151,6 +151,13 @@ def test_randn(dtype): assert a.shape == (4, 4) +@pytest.mark.parametrize("dtype", tf_dtypes) +def test_random_uniform(dtype): + backend = tensorflow_backend.TensorFlowBackend() + a = backend.random_uniform((4, 4), dtype=dtype, seed=10) + assert a.shape == (4, 4) + + @pytest.mark.parametrize("dtype", [tf.complex64, tf.complex128]) def test_randn_non_zero_imag(dtype): backend = tensorflow_backend.TensorFlowBackend() @@ -158,6 +165,13 @@ def test_randn_non_zero_imag(dtype): assert tf.math.greater(tf.linalg.norm(tf.math.imag(a)), 0.0) +@pytest.mark.parametrize("dtype", [tf.complex64, tf.complex128]) +def test_random_uniform_non_zero_imag(dtype): + backend = tensorflow_backend.TensorFlowBackend() + a = backend.random_uniform((4, 4), dtype=dtype, seed=10) + assert tf.math.greater(tf.linalg.norm(tf.math.imag(a)), 0.0) + + @pytest.mark.parametrize("dtype", tf_dtypes) def test_eye_dtype(dtype): backend = tensorflow_backend.TensorFlowBackend() @@ -186,6 +200,13 @@ def test_randn_dtype(dtype): assert a.dtype == dtype +@pytest.mark.parametrize("dtype", tf_dtypes) +def test_random_uniform_dtype(dtype): + backend = tensorflow_backend.TensorFlowBackend() + a = backend.random_uniform((4, 4), dtype=dtype, seed=10) + assert a.dtype == dtype + + @pytest.mark.parametrize("dtype", tf_randn_dtypes) def test_randn_seed(dtype): backend = tensorflow_backend.TensorFlowBackend() @@ -194,6 +215,27 @@ def test_randn_seed(dtype): np.testing.assert_allclose(a, b) +@pytest.mark.parametrize("dtype", tf_dtypes) +def test_random_uniform_seed(dtype): + test = tf.test.TestCase() + backend = tensorflow_backend.TensorFlowBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), seed=10, dtype=dtype) + test.assertAllCloseAccordingToType(a, b) + + +@pytest.mark.parametrize("dtype", tf_randn_dtypes) +def test_random_uniform_boundaries(dtype): + test = tf.test.TestCase() + lb = 1.2 + ub = 4.8 + backend = tensorflow_backend.TensorFlowBackend() + a = backend.random_uniform((4, 4), seed=10, dtype=dtype) + b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype) + test.assertAllInRange(a, 0, 1) + test.assertAllInRange(b, lb, ub) + + def test_conj(): backend = tensorflow_backend.TensorFlowBackend() real = np.random.rand(2, 2, 2) From 46aeec135bbb0e024efcd77c50b2068628a20e18 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 13 Dec 2019 22:19:02 -0500 Subject: [PATCH 053/212] replace kron with broadcasting --- tensornetwork/block_tensor/index.py | 43 +++++++++++++++++++---------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index fc6b36cd8..326311ec1 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -36,15 +36,19 @@ def __init__(self, name: Optional[Text] = None, left_child: Optional["Index"] = None, right_child: Optional["Index"] = None): - self.charges = np.asarray(charges) + self._charges = np.asarray(charges) self.flow = flow self.left_child = left_child self.right_child = right_child self.name = name if name else 'index' + @property + def is_leave(self): + return (self.left_child is None) and (self.right_child is None) + @property def dimension(self): - return len(self.charges) + return np.prod([len(i.charges) for i in self.get_elementary_indices()]) def _copy_helper(self, index: "Index", copied_index: "Index") -> None: """ @@ -52,16 +56,17 @@ def _copy_helper(self, index: "Index", copied_index: "Index") -> None: """ if index.left_child != None: left_copy = Index( - charges=index.left_child.charges.copy(), + charges=copy.copy(index.left_child.charges), flow=copy.copy(index.left_child.flow), - name=index.left_child.name) + name=copy.copy(index.left_child.name)) + copied_index.left_child = left_copy self._copy_helper(index.left_child, left_copy) if index.right_child != None: right_copy = Index( - charges=index.right_child.charges.copy(), + charges=copy.copy(index.right_child.charges), flow=copy.copy(index.right_child.flow), - name=index.right_child.name) + name=copy.copy(index.right_child.name)) copied_index.right_child = right_copy self._copy_helper(index.right_child, right_copy) @@ -72,7 +77,7 @@ def copy(self): `Index` are copied as well. """ index_copy = Index( - charges=self.charges.copy(), flow=copy.copy(self.flow), name=self.name) + charges=self._charges.copy(), flow=copy.copy(self.flow), name=self.name) self._copy_helper(self, index_copy) return index_copy @@ -100,10 +105,20 @@ def __mul__(self, index: "Index") -> "Index": Merge `index` and self into a single larger index. The flow of the resulting index is set to 1. Flows of `self` and `index` are multiplied into - the charges upon fusing. + the charges upon fusing.n """ return fuse_index_pair(self, index) + @property + def charges(self): + if self.is_leave: + return self._charges + fused_charges = fuse_charges(self.left_child.charges, self.left_child.flow, + self.right_child.charges, + self.right_child.flow) + + return fused_charges + def fuse_charges(q1: Union[List, np.ndarray], flow1: int, q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: @@ -146,7 +161,8 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray], Returns: np.ndarray: The result of fusing `q1` with `q2`. """ - return np.kron(degen2, degen1) + return np.reshape(degen2[:, None] * degen1[None, :], + len(degen1) * len(degen2)) def fuse_index_pair(left_index: Index, @@ -166,13 +182,10 @@ def fuse_index_pair(left_index: Index, raise ValueError( "index1 and index2 are the same object. Can only fuse distinct objects") - fused_charges = fuse_charges(left_index.charges, left_index.flow, - right_index.charges, right_index.flow) + # fused_charges = fuse_charges(left_index.charges, left_index.flow, + # right_index.charges, right_index.flow) return Index( - charges=fused_charges, - flow=flow, - left_child=left_index, - right_child=right_index) + charges=None, flow=flow, left_child=left_index, right_child=right_index) def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: From 6844f2cc487ab3e9f913961935f5829ba2c56945 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sun, 15 Dec 2019 14:55:41 -0500 Subject: [PATCH 054/212] column-major -> row-major --- tensornetwork/block_tensor/block_tensor.py | 114 ++++++++++++++++++++- 1 file changed, 110 insertions(+), 4 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index a4388ce79..db9bd393a 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -74,7 +74,7 @@ def compute_num_nonzero(charges: List[np.ndarray], #compute the degeneracies of `fused_charges` charges fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, leg_degeneracies) - #compute the new degeneracies resulting of fusing the vectors of unique charges + #compute the new degeneracies resulting from fusing #`accumulated_charges` and `leg_charge_2` accumulated_charges = np.unique(fused_charges) accumulated_degeneracies = [] @@ -107,6 +107,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], dict: Dictionary mapping a tuple of charges to a shape tuple. Each element corresponds to a non-zero valued block of the tensor. """ + #FIXME: this routine is slow check_flows(flows) degeneracies = [] unique_charges = [] @@ -189,6 +190,108 @@ def retrieve_non_zero_diagonal_blocks( row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) + # we only care about charges common to row and columns + mask = np.isin(row_charges, common_charges) + relevant_row_charges = row_charges[mask] + + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_row_charges == c + masks[c] = mask + degeneracy_vector[mask] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) + if not return_data: + blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + else: + blocks[c] = np.reshape(data[a + b], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def retrieve_non_zero_diagonal_blocks_column_major( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Deprecated + + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict, assuming column-major + ordering. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + #we multiply the flows into the charges + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column + + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + # we only care about charges common to row and columns mask = np.isin(column_charges, -common_charges) relevant_column_charges = column_charges[mask] @@ -240,6 +343,8 @@ def retrieve_non_zero_diagonal_blocks_deprecated( flows: List[Union[bool, int]], return_data: Optional[bool] = False) -> Dict: """ + Deprecated + Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. This is a deprecated version which in general performs worse than the @@ -298,14 +403,14 @@ def retrieve_non_zero_diagonal_blocks_deprecated( #for each charge `c` in `relevant_column_charges` holds the #row-degeneracy of charge `c` - degeneracy_vector = row_dims[column_locations] + degeneracy_vector = column_dims[row_locations] stop_positions = np.cumsum(degeneracy_vector) blocks = {} for c in common_charges: #numpy broadcasting is substantially faster than kron! a = np.expand_dims( - stop_positions[column_charges == -c] - row_degeneracies[c], 0) - b = np.expand_dims(np.arange(row_degeneracies[c]), 1) + stop_positions[row_charges == c] - column_degeneracies[-c], 0) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) if not return_data: blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] else: @@ -344,6 +449,7 @@ class BlockSparseTensor: The class design follows Glen's proposal (Design 0). The class currently only supports a single U(1) symmetry and only numpy.ndarray. + Attributes: * self.data: A 1d np.ndarray storing the underlying data of the tensor From 4f4ba935b3bd148c8d8c9d67da5c697c6d355aaa Mon Sep 17 00:00:00 2001 From: mganahl Date: Sun, 15 Dec 2019 14:58:04 -0500 Subject: [PATCH 055/212] documentation --- tensornetwork/block_tensor/block_tensor.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index db9bd393a..31c8298e6 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -554,8 +554,7 @@ def rank(self): def sparse_shape(self) -> Tuple: """ The sparse shape of the tensor. - Returns a copy of self.indices. Note that copying - can be relatively expensive for deeply nested indices. + Returns a copy of self.indices. Returns: Tuple: A tuple of `Index` objects. """ From d583e2b5dedaa7e0de2cadcafc0382c0de95ac6d Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 17 Dec 2019 12:33:26 -0500 Subject: [PATCH 056/212] added function to compute unique charges and charge degeneracies Function avoids explicit full fusion of all legs, and instead only keeps track of the unique charges and their degeneracies upon fusion --- tensornetwork/block_tensor/block_tensor.py | 176 +++++++++++++++++---- 1 file changed, 147 insertions(+), 29 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 31c8298e6..8f3bbf023 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -34,11 +34,11 @@ def check_flows(flows) -> None: "flows = {} contains values different from 1 and -1".format(flows)) -def compute_num_nonzero(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> int: +def compute_fused_charge_degeneracies(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> Dict: """ - Compute the number of non-zero elements, given the meta-data of - a symmetric tensor. + For a list of charges, compute all possible fused charges resulting + from fusing `charges`, together with their respective degeneracyn Args: charges: List of np.ndarray of int, one for each leg of the underlying tensor. Each np.ndarray `charges[leg]` @@ -49,40 +49,64 @@ def compute_num_nonzero(charges: List[np.ndarray], of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. Returns: - int: The number of non-zero elements. + dict: Mapping fused charges (int) to degeneracies (int) """ if len(charges) == 1: - return len(np.nonzero(charges == 0)[0]) - #get unique charges and their degeneracies on each leg - charge_degeneracies = [ - np.unique(charge, return_counts=True) for charge in charges - ] - accumulated_charges, accumulated_degeneracies = charge_degeneracies[0] + return dict(zip(np.unique(charges[0], return_counts=True))) + + # get unique charges and their degeneracies on the first leg. + # We are fusing from "left" to "right". + accumulated_charges, accumulated_degeneracies = np.unique( + charges[0], return_counts=True) #multiply the flow into the charges of first leg accumulated_charges *= flows[0] - for n in range(1, len(charge_degeneracies)): + for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor - leg_charge, leg_degeneracies = charge_degeneracies[n] + leg_charges, leg_degeneracies = np.unique(charges[n], return_counts=True) #fuse the unique charges #Note: entries in `fused_charges` are not unique anymore. #flow1 = 1 because the flow of leg 0 has already been #mulitplied above fused_charges = fuse_charges( - q1=accumulated_charges, flow1=1, q2=leg_charge, flow2=flows[n]) + q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n]) #compute the degeneracies of `fused_charges` charges + #`fused_degeneracies` is a list of degeneracies such that + # `fused_degeneracies[n]` is the degeneracy of of + # charge `c = fused_charges[n]`. fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, leg_degeneracies) #compute the new degeneracies resulting from fusing - #`accumulated_charges` and `leg_charge_2` + #`accumulated_charges` and `leg_charges_2` accumulated_charges = np.unique(fused_charges) - accumulated_degeneracies = [] + accumulated_degeneracies = np.empty( + len(accumulated_charges), dtype=np.int64) for n in range(len(accumulated_charges)): - accumulated_degeneracies.append( - np.sum(fused_degeneracies[fused_charges == accumulated_charges[n]])) + accumulated_degeneracies[n] = np.sum( + fused_degeneracies[fused_charges == accumulated_charges[n]]) + return accumulated_charges, accumulated_degeneracies + - accumulated_degeneracies = np.asarray(accumulated_degeneracies) +def compute_num_nonzero(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> int: + """ + Compute the number of non-zero elements, given the meta-data of + a symmetric tensor. + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + int: The number of non-zero elements. + """ + accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( + charges, flows) if len(np.nonzero(accumulated_charges == 0)[0]) == 0: raise ValueError( "given leg-charges `charges` and flows `flows` are incompatible " @@ -235,6 +259,105 @@ def retrieve_non_zero_diagonal_blocks( return blocks +def retrieve_non_zero_diagonal_blocks_test_2( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + #we multiply the flows into the charges + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column + + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + # we only care about charges common to row and columns + mask = np.isin(row_charges, common_charges) + relevant_row_charges = row_charges[mask] + + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_row_charges == c + masks[c] = mask + degeneracy_vector[mask] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) + if not return_data: + blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + else: + blocks[c] = np.reshape(data[a + b], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + def retrieve_non_zero_diagonal_blocks_column_major( data: np.ndarray, charges: List[np.ndarray], @@ -438,6 +561,10 @@ def compute_mapping_table(charges: List[np.ndarray], with `N` the number of non-zero elements, and `r` the rank of the tensor. """ + # we are using row-major encoding, meaning that the last index + # is moving quickest when iterating through the linear data + # transposing is done taking, for each value of the indices i_0 to i_N-2 + # the junk i_N-1 that gives non-zero tables = np.meshgrid([np.arange(c.shape[0]) for c in charges], indexing='ij') tables = tables[::-1] #reverse the order raise NotImplementedError() @@ -551,23 +678,15 @@ def rank(self): #Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly #(apart from debugging). @property - def sparse_shape(self) -> Tuple: + def shape(self) -> Tuple: """ The sparse shape of the tensor. Returns a copy of self.indices. Returns: Tuple: A tuple of `Index` objects. """ - return tuple([i.copy() for i in self.indices]) - @property - def shape(self) -> Tuple: - """ - The dense shape of the tensor. - """ - return tuple([i.dimension for i in self.indices]) - @property def dtype(self) -> Type[np.number]: return self.data.dtype @@ -584,7 +703,6 @@ def transpose(self, order): """ Transpose the tensor into the new order `order` """ - raise NotImplementedError('transpose is not implemented!!') def reset_shape(self) -> None: From b9f45cbeb329fce90f642aad2e31f44ee25af632 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 17 Dec 2019 22:37:04 -0500 Subject: [PATCH 057/212] improved block finding, fixed bug in reshape re-intorduced BlockSparseTensor.dense_shape new method for fusing charges and degeneracies (faster for very rectangular matrices) --- tensornetwork/block_tensor/block_tensor.py | 125 ++++++++++++++++----- 1 file changed, 94 insertions(+), 31 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 8f3bbf023..d783a71e0 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -20,7 +20,7 @@ from tensornetwork.network_components import Node, contract, contract_between from tensornetwork.backends import backend_factory # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges import numpy as np import itertools import time @@ -52,7 +52,7 @@ def compute_fused_charge_degeneracies(charges: List[np.ndarray], dict: Mapping fused charges (int) to degeneracies (int) """ if len(charges) == 1: - return dict(zip(np.unique(charges[0], return_counts=True))) + return np.unique(charges[0], return_counts=True) # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". @@ -69,7 +69,7 @@ def compute_fused_charge_degeneracies(charges: List[np.ndarray], #Note: entries in `fused_charges` are not unique anymore. #flow1 = 1 because the flow of leg 0 has already been #mulitplied above - fused_charges = fuse_charges( + fused_charges = fuse_charge_pair( q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n]) #compute the degeneracies of `fused_charges` charges #`fused_degeneracies` is a list of degeneracies such that @@ -160,12 +160,14 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def retrieve_non_zero_diagonal_blocks( +def retrieve_non_zero_diagonal_blocks_old_version( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], return_data: Optional[bool] = True) -> Dict: """ + Deprecated: this version is about 2 times slower (worst case) than the current used + implementation Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. Args: @@ -259,22 +261,35 @@ def retrieve_non_zero_diagonal_blocks( return blocks -def retrieve_non_zero_diagonal_blocks_test_2( +def retrieve_non_zero_diagonal_blocks( data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], return_data: Optional[bool] = True) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. Note that `column_charges` + are never explicitly fused (`row_charges` are). Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. @@ -290,20 +305,25 @@ def retrieve_non_zero_diagonal_blocks_test_2( dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray or a python list of locations and shapes, depending on the value of `return_data`. """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") + flows = row_flows.copy() + flows.extend(column_flows) check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column - - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) + + #since we are using row-major we have to fuse the row charges anyway. + fused_row_charges = fuse_charges(row_charges, row_flows) + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + column_charges, column_flows) #get the charges common to rows and columns (only those matter) common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) @@ -314,8 +334,8 @@ def retrieve_non_zero_diagonal_blocks_test_2( column_degeneracies = dict(zip(unique_column_charges, column_dims)) # we only care about charges common to row and columns - mask = np.isin(row_charges, common_charges) - relevant_row_charges = row_charges[mask] + mask = np.isin(fused_row_charges, common_charges) + relevant_row_charges = fused_row_charges[mask] #some numpy magic to get the index locations of the blocks #we generate a vector of `len(relevant_row_charges) which, @@ -677,6 +697,16 @@ def rank(self): #``` #Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly #(apart from debugging). + + @property + def dense_shape(self) -> Tuple: + """ + The dense shape of the tensor. + Returns: + Tuple: A tuple of `int`. + """ + return tuple([i.dimension for i in self.indices]) + @property def shape(self) -> Tuple: """ @@ -758,8 +788,7 @@ def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: else: dense_shape.append(s) # a few simple checks - - if np.prod(dense_shape) != np.prod(self.shape): + if np.prod(dense_shape) != np.prod(self.dense_shape): raise ValueError("A tensor with {} elements cannot be " "reshaped into a tensor with {} elements".format( np.prod(self.shape), np.prod(dense_shape))) @@ -783,17 +812,17 @@ def raise_error(): self.reset_shape() #bring tensor back into its elementary shape for n in range(len(dense_shape)): - if dense_shape[n] > self.shape[n]: - while dense_shape[n] > self.shape[n]: + if dense_shape[n] > self.dense_shape[n]: + while dense_shape[n] > self.dense_shape[n]: #fuse indices i1, i2 = self.indices.pop(n), self.indices.pop(n) #note: the resulting flow is set to one since the flow #is multiplied into the charges. As a result the tensor #will then be invariant in any case. self.indices.insert(n, fuse_index_pair(i1, i2)) - if self.shape[n] > dense_shape[n]: + if self.dense_shape[n] > dense_shape[n]: raise_error() - elif dense_shape[n] < self.shape[n]: + elif dense_shape[n] < self.dense_shape[n]: raise_error() def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: @@ -816,7 +845,41 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: raise ValueError( "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) + + row_indices = self.indices[0].get_elementary_indices() + column_indices = self.indices[1].get_elementary_indices() + return retrieve_non_zero_diagonal_blocks( + data=self.data, + row_charges=[i.charges for i in row_indices], + column_charges=[i.charges for i in column_indices], + row_flows=[i.flow for i in row_indices], + column_flows=[i.flow for i in column_indices], + return_data=return_data) + + def get_diagonal_blocks_old_version( + self, return_data: Optional[bool] = True) -> Dict: + """ + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + + return retrieve_non_zero_diagonal_blocks_old_version( data=self.data, charges=self.charges, flows=self.flows, From 69309eb3853fd37fec342449540d4f9593f6c164 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 17 Dec 2019 22:37:45 -0500 Subject: [PATCH 058/212] fuse_charge_pair added fuse_charges added --- tensornetwork/block_tensor/index.py | 40 ++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 326311ec1..ffb004d1e 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -42,6 +42,9 @@ def __init__(self, self.right_child = right_child self.name = name if name else 'index' + def __repr__(self): + return str(self.dimension) + @property def is_leave(self): return (self.left_child is None) and (self.right_child is None) @@ -113,22 +116,22 @@ def __mul__(self, index: "Index") -> "Index": def charges(self): if self.is_leave: return self._charges - fused_charges = fuse_charges(self.left_child.charges, self.left_child.flow, - self.right_child.charges, - self.right_child.flow) + fused_charges = fuse_charge_pair( + self.left_child.charges, self.left_child.flow, self.right_child.charges, + self.right_child.flow) return fused_charges -def fuse_charges(q1: Union[List, np.ndarray], flow1: int, - q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: +def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, + q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: """ Fuse charges `q1` with charges `q2` by simple addition (valid for U(1) charges). `q1` and `q2` typically belong to two consecutive legs of `BlockSparseTensor`. Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns `[10, 11, 12, 100, 101, 102]`. - When using column-major ordering of indices in `BlockSparseTensor`, + When using row-major ordering of indices in `BlockSparseTensor`, the position of q1 should be "to the left" of the position of q2. Args: q1: Iterable of integers @@ -143,6 +146,27 @@ def fuse_charges(q1: Union[List, np.ndarray], flow1: int, len(q1) * len(q2)) +def fuse_charges(charges: List[Union[List, np.ndarray]], + flows: List[int]) -> np.ndarray: + """ + Fuse all `charges` by simple addition (valid + for U(1) charges). + Args: + chargs: A list of charges to be fused. + flows: A list of flows, one for each element in `charges`. + Returns: + np.ndarray: The result of fusing `charges`. + """ + if len(charges) == 1: + #nothing to do + return charges[0] + fused_charges = charges[0] * flows[0] + for n in range(1, len(charges)): + fused_charges = fuse_charge_pair( + q1=fused_charges, flow1=1, q2=charges[n], flow2=flows[n]) + return fused_charges + + def fuse_degeneracies(degen1: Union[List, np.ndarray], degen2: Union[List, np.ndarray]) -> np.ndarray: """ @@ -151,7 +175,7 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray], consecutive legs of `BlockSparseTensor`. Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns `[10, 11, 12, 100, 101, 102]`. - When using column-major ordering of indices in `BlockSparseTensor`, + When using row-major ordering of indices in `BlockSparseTensor`, the position of q1 should be "to the left" of the position of q2. Args: q1: Iterable of integers @@ -182,8 +206,6 @@ def fuse_index_pair(left_index: Index, raise ValueError( "index1 and index2 are the same object. Can only fuse distinct objects") - # fused_charges = fuse_charges(left_index.charges, left_index.flow, - # right_index.charges, right_index.flow) return Index( charges=None, flow=flow, left_child=left_index, right_child=right_index) From 5026ed36073ccad1363d62752541c5599cb0e220 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 17 Dec 2019 22:44:16 -0500 Subject: [PATCH 059/212] use is_leave --- tensornetwork/block_tensor/index.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index ffb004d1e..96e8ba2d6 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -234,7 +234,7 @@ def split_index(index: Index) -> Tuple[Index, Index]: Returns: Tuple[Index, Index]: The result of splitting `index`. """ - if (not index.left_child) or (not index.right_child): + if index.is_leave: raise ValueError("cannot split an elementary index") return index.left_child, index.right_child From 8ada65d20d0755bf535ce8f22e203f079a5685a4 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 17 Dec 2019 22:44:24 -0500 Subject: [PATCH 060/212] new tests --- tensornetwork/block_tensor/index_test.py | 40 ++++++++++++++++++++---- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index ff331a36a..8cdda8720 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,14 +1,14 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair -def test_fuse_charges(): +def test_fuse_charge_pair(): q1 = np.asarray([0, 1]) q2 = np.asarray([2, 3, 4]) - fused_charges = fuse_charges(q1, 1, q2, 1) + fused_charges = fuse_charge_pair(q1, 1, q2, 1) assert np.all(fused_charges == np.asarray([2, 3, 3, 4, 4, 5])) - fused_charges = fuse_charges(q1, 1, q2, -1) + fused_charges = fuse_charge_pair(q1, 1, q2, -1) assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3])) @@ -26,7 +26,7 @@ def test_index_fusion_mul(): i12 = i1 * i2 assert i12.left_child is i1 assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1)) + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) def test_index_fusion(): @@ -43,4 +43,32 @@ def test_index_fusion(): i12 = fuse_index_pair(i1, i2) assert i12.left_child is i1 assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charges(q1, 1, q2, 1)) + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + + +def test_elementary_indices(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i3 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i4 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + i34 = i3 * i4 + elmt12 = i12.get_elementary_indices() + assert elmt12[0] is i1 + assert elmt12[1] is i2 + + i1234 = i12 * i34 + elmt1234 = i1234.get_elementary_indices() + assert elmt1234[0] is i1 + assert elmt1234[1] is i2 + assert elmt1234[2] is i3 + assert elmt1234[3] is i4 From 11ab7c0c0ec7b4bbfcf0657b0cdb4d95b3035b74 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 08:38:32 -0500 Subject: [PATCH 061/212] removed TODO, BlockSparseTensor.shape returns ref instead of copy --- tensornetwork/block_tensor/block_tensor.py | 28 +++++++--------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index d783a71e0..515e4cdd1 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -680,24 +680,6 @@ def init_random(): def rank(self): return len(self.indices) - #TODO: we should consider to switch the names - #`BlockSparseTensor.sparse_shape` and `BlockSparseTensor.shape`, - #i.e. have `BlockSparseTensor.shape`return the sparse shape of the tensor. - #This may be more convenient for building tensor-type and backend - #agnostic code. For example, in MPS code we essentially never - #explicitly set a shape to a certain value (apart from initialization). - #That is, code like this - #``` - #tensor = np.random.rand(10,10,10) - #``` - #is never used. Rather one inquires shapes of tensors and - #multiplies them to get new shapes: - #``` - #new_tensor = reshape(tensor, [tensor.shape[0]*tensor.shape[1], tensor.shape[2]]) - #``` - #Thduis the return type of `BlockSparseTensor.shape` is never inspected explicitly - #(apart from debugging). - @property def dense_shape(self) -> Tuple: """ @@ -711,11 +693,10 @@ def dense_shape(self) -> Tuple: def shape(self) -> Tuple: """ The sparse shape of the tensor. - Returns a copy of self.indices. Returns: Tuple: A tuple of `Index` objects. """ - return tuple([i.copy() for i in self.indices]) + return tuple(self.indices) @property def dtype(self) -> Type[np.number]: @@ -829,6 +810,9 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. + For matrices with shape[0] << shape[1], this routine avoids explicit fusion + of column charges. + Args: return_data: If `True`, the return dictionary maps quantum numbers `q` to actual `np.ndarray` with the data. This involves a copy of data. @@ -860,6 +844,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: def get_diagonal_blocks_old_version( self, return_data: Optional[bool] = True) -> Dict: """ + Deprecated + Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. Args: @@ -888,6 +874,8 @@ def get_diagonal_blocks_old_version( def get_diagonal_blocks_deprecated( self, return_data: Optional[bool] = True) -> Dict: """ + Deprecated + Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. Args: From 04c40283b98d8b31e1e0dac13a0cca6a3a3def84 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 08:38:51 -0500 Subject: [PATCH 062/212] added tests --- tensornetwork/block_tensor/index_test.py | 38 ++++++++++++++++++------ 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 8cdda8720..780034133 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -50,15 +50,12 @@ def test_elementary_indices(): D = 10 B = 4 dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i3 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i4 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q1, flow=1, name='index3') + i4 = Index(charges=q2, flow=1, name='index4') i12 = i1 * i2 i34 = i3 * i4 @@ -72,3 +69,26 @@ def test_elementary_indices(): assert elmt1234[1] is i2 assert elmt1234[2] is i3 assert elmt1234[3] is i4 + + +def test_copy(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q1, flow=-1, name='index3') + i4 = Index(charges=q2, flow=-1, name='index4') + + i12 = i1 * i2 + i34 = i3 * i4 + i1234 = i12 * i34 + i1234_copy = i1234.copy() + + elmt1234 = i1234_copy.get_elementary_indices() + assert elmt1234[0] is not i1 + assert elmt1234[1] is not i2 + assert elmt1234[2] is not i3 + assert elmt1234[3] is not i4 From e521d35783a65c957bccd23615e2b162a0b9ecae Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 10:52:56 -0500 Subject: [PATCH 063/212] added tests --- tensornetwork/block_tensor/index_test.py | 93 ++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 6 deletions(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 780034133..a67ddf7dc 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,6 +1,6 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices def test_fuse_charge_pair(): @@ -12,8 +12,17 @@ def test_fuse_charge_pair(): assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3])) +def test_fuse_charges(): + q1 = np.asarray([0, 1]) + q2 = np.asarray([2, 3, 4]) + fused_charges = fuse_charges([q1, q2], flows=[1, 1]) + assert np.all(fused_charges == np.asarray([2, 3, 3, 4, 4, 5])) + fused_charges = fuse_charges([q1, q2], flows=[1, -1]) + assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3])) + + def test_index_fusion_mul(): - D = 100 + D = 10 B = 4 dtype = np.int16 q1 = np.random.randint(-B // 2, B // 2 + 1, @@ -29,8 +38,8 @@ def test_index_fusion_mul(): assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) -def test_index_fusion(): - D = 100 +def test_fuse_index_pair(): + D = 10 B = 4 dtype = np.int16 q1 = np.random.randint(-B // 2, B // 2 + 1, @@ -46,16 +55,60 @@ def test_index_fusion(): assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) +def test_fuse_indices(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = fuse_indices([i1, i2]) + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + + +def test_split_index(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + i1_, i2_ = split_index(i12) + assert i1 is i1_ + assert i2 is i2_ + np.testing.assert_allclose(q1, i1.charges) + np.testing.assert_allclose(q2, i2.charges) + np.testing.assert_allclose(q1, i1_.charges) + np.testing.assert_allclose(q2, i2_.charges) + assert i1_.name == 'index1' + assert i2_.name == 'index2' + assert i1_.flow == i1.flow + assert i2_.flow == i2.flow + + def test_elementary_indices(): D = 10 B = 4 dtype = np.int16 q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) i1 = Index(charges=q1, flow=1, name='index1') i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q1, flow=1, name='index3') - i4 = Index(charges=q2, flow=1, name='index4') + i3 = Index(charges=q3, flow=1, name='index3') + i4 = Index(charges=q4, flow=1, name='index4') i12 = i1 * i2 i34 = i3 * i4 @@ -69,6 +122,34 @@ def test_elementary_indices(): assert elmt1234[1] is i2 assert elmt1234[2] is i3 assert elmt1234[3] is i4 + assert elmt1234[0].name == 'index1' + assert elmt1234[1].name == 'index2' + assert elmt1234[2].name == 'index3' + assert elmt1234[3].name == 'index4' + assert elmt1234[0].flow == i1.flow + assert elmt1234[1].flow == i2.flow + assert elmt1234[2].flow == i3.flow + assert elmt1234[3].flow == i4.flow + + np.testing.assert_allclose(q1, i1.charges) + np.testing.assert_allclose(q2, i2.charges) + np.testing.assert_allclose(q3, i3.charges) + np.testing.assert_allclose(q4, i4.charges) + + +def test_leave(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + assert i1.is_leave + assert i2.is_leave + + i12 = i1 * i2 + assert not i12.is_leave def test_copy(): From 3fec7ba38c12bd802c6d9adb3530d4220965fe05 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 21:49:12 -0500 Subject: [PATCH 064/212] column-major -> row-major forgot to fix fusing order of charges and degeneracies --- tensornetwork/block_tensor/index.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 96e8ba2d6..a737fa02e 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -130,7 +130,7 @@ def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, for U(1) charges). `q1` and `q2` typically belong to two consecutive legs of `BlockSparseTensor`. Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns - `[10, 11, 12, 100, 101, 102]`. + `[10, 100, 11, 101, 12, 102]`. When using row-major ordering of indices in `BlockSparseTensor`, the position of q1 should be "to the left" of the position of q2. Args: @@ -142,7 +142,7 @@ def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, np.ndarray: The result of fusing `q1` with `q2`. """ return np.reshape( - flow2 * np.asarray(q2)[:, None] + flow1 * np.asarray(q1)[None, :], + flow1 * np.asarray(q1)[:, None] + flow2 * np.asarray(q2)[None, :], len(q1) * len(q2)) @@ -173,19 +173,17 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray], Fuse degeneracies `degen1` and `degen2` of two leg-charges by simple kronecker product. `degen1` and `degen2` typically belong to two consecutive legs of `BlockSparseTensor`. - Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns - `[10, 11, 12, 100, 101, 102]`. + Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns + `[10, 100, 20, 200, 30, 300]`. When using row-major ordering of indices in `BlockSparseTensor`, - the position of q1 should be "to the left" of the position of q2. + the position of `degen1` should be "to the left" of the position of `degen2`. Args: - q1: Iterable of integers - flow1: Flow direction of charge `q1`. - q2: Iterable of integers - flow2: Flow direction of charge `q2`. + degen1: Iterable of integers + degen2: Iterable of integers Returns: - np.ndarray: The result of fusing `q1` with `q2`. + np.ndarray: The result of fusing `dege1` with `degen2`. """ - return np.reshape(degen2[:, None] * degen1[None, :], + return np.reshape(degen1[:, None] * degen2[None, :], len(degen1) * len(degen2)) From a6f91a6b3f5681040361e57907f09cc714910300 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 21:49:25 -0500 Subject: [PATCH 065/212] fix broken tests --- tensornetwork/block_tensor/index_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index a67ddf7dc..9d86e748e 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -7,18 +7,18 @@ def test_fuse_charge_pair(): q1 = np.asarray([0, 1]) q2 = np.asarray([2, 3, 4]) fused_charges = fuse_charge_pair(q1, 1, q2, 1) - assert np.all(fused_charges == np.asarray([2, 3, 3, 4, 4, 5])) + assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) fused_charges = fuse_charge_pair(q1, 1, q2, -1) - assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3])) + assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) def test_fuse_charges(): q1 = np.asarray([0, 1]) q2 = np.asarray([2, 3, 4]) fused_charges = fuse_charges([q1, q2], flows=[1, 1]) - assert np.all(fused_charges == np.asarray([2, 3, 3, 4, 4, 5])) + assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) fused_charges = fuse_charges([q1, q2], flows=[1, -1]) - assert np.all(fused_charges == np.asarray([-2, -1, -3, -2, -4, -3])) + assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) def test_index_fusion_mul(): From 7f4f3ce5a291105829ebdc2ac9f6ff0fd2eeb001 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 21:52:08 -0500 Subject: [PATCH 066/212] test added --- tensornetwork/block_tensor/index_test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 9d86e748e..1bb2c37be 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -21,6 +21,13 @@ def test_fuse_charges(): assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) +def test_fuse_degeneracies(): + d1 = np.asarray([0, 1]) + d2 = np.asarray([2, 3, 4]) + fused_degeneracies = fuse_degeneracies(d1, d2) + np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2)) + + def test_index_fusion_mul(): D = 10 B = 4 From 86adb1b5520f69adaa78d4c23eebc3a400993ead Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 22:03:01 -0500 Subject: [PATCH 067/212] mostly docstring --- tensornetwork/block_tensor/block_tensor.py | 120 ++++++++++----------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 515e4cdd1..7254a96c7 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -160,24 +160,35 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def retrieve_non_zero_diagonal_blocks_old_version( +def retrieve_non_zero_diagonal_blocks( data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], return_data: Optional[bool] = True) -> Dict: """ - Deprecated: this version is about 2 times slower (worst case) than the current used - implementation Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. Note that `column_charges` + are never explicitly fused (`row_charges` are). Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. @@ -193,20 +204,25 @@ def retrieve_non_zero_diagonal_blocks_old_version( dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray or a python list of locations and shapes, depending on the value of `return_data`. """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") + flows = row_flows.copy() + flows.extend(column_flows) check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column + #since we are using row-major we have to fuse the row charges anyway. + fused_row_charges = fuse_charges(row_charges, row_flows) + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + column_charges, column_flows) #get the charges common to rows and columns (only those matter) common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) @@ -217,8 +233,8 @@ def retrieve_non_zero_diagonal_blocks_old_version( column_degeneracies = dict(zip(unique_column_charges, column_dims)) # we only care about charges common to row and columns - mask = np.isin(row_charges, common_charges) - relevant_row_charges = row_charges[mask] + mask = np.isin(fused_row_charges, common_charges) + relevant_row_charges = fused_row_charges[mask] #some numpy magic to get the index locations of the blocks #we generate a vector of `len(relevant_row_charges) which, @@ -261,35 +277,24 @@ def retrieve_non_zero_diagonal_blocks_old_version( return blocks -def retrieve_non_zero_diagonal_blocks( +def retrieve_non_zero_diagonal_blocks_old_version( data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], + charges: List[np.ndarray], + flows: List[Union[bool, int]], return_data: Optional[bool] = True) -> Dict: """ + Deprecated: this version is about 2 times slower (worst case) than the current used + implementation Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. Note that `column_charges` - are never explicitly fused (`row_charges` are). Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. + flows: A list of integers, one for each leg, with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. @@ -305,25 +310,20 @@ def retrieve_non_zero_diagonal_blocks( dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray or a python list of locations and shapes, depending on the value of `return_data`. """ - flows = row_flows.copy() - flows.extend(column_flows) + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") - #since we are using row-major we have to fuse the row charges anyway. - fused_row_charges = fuse_charges(row_charges, row_flows) - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) + #we multiply the flows into the charges + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - column_charges, column_flows) + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) #get the charges common to rows and columns (only those matter) common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) @@ -334,8 +334,8 @@ def retrieve_non_zero_diagonal_blocks( column_degeneracies = dict(zip(unique_column_charges, column_dims)) # we only care about charges common to row and columns - mask = np.isin(fused_row_charges, common_charges) - relevant_row_charges = fused_row_charges[mask] + mask = np.isin(row_charges, common_charges) + relevant_row_charges = row_charges[mask] #some numpy magic to get the index locations of the blocks #we generate a vector of `len(relevant_row_charges) which, @@ -585,8 +585,8 @@ def compute_mapping_table(charges: List[np.ndarray], # is moving quickest when iterating through the linear data # transposing is done taking, for each value of the indices i_0 to i_N-2 # the junk i_N-1 that gives non-zero - tables = np.meshgrid([np.arange(c.shape[0]) for c in charges], indexing='ij') - tables = tables[::-1] #reverse the order + + #for example raise NotImplementedError() From 5657456c7ff942be3c67efbc8ebf1bc02dd95f7d Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 18 Dec 2019 22:03:14 -0500 Subject: [PATCH 068/212] docstring --- tensornetwork/block_tensor/index.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index a737fa02e..a299fa381 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -133,6 +133,7 @@ def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, `[10, 100, 11, 101, 12, 102]`. When using row-major ordering of indices in `BlockSparseTensor`, the position of q1 should be "to the left" of the position of q2. + Args: q1: Iterable of integers flow1: Flow direction of charge `q1`. @@ -150,7 +151,9 @@ def fuse_charges(charges: List[Union[List, np.ndarray]], flows: List[int]) -> np.ndarray: """ Fuse all `charges` by simple addition (valid - for U(1) charges). + for U(1) charges). Charges are fused from "right to left", + in accordance with row-major order (see `fuse_charges_pair`). + Args: chargs: A list of charges to be fused. flows: A list of flows, one for each element in `charges`. From a89d97231e8227ebf21d7a40baf89168ea450ebe Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 10:18:30 -0500 Subject: [PATCH 069/212] added map_to_integer --- tensornetwork/block_tensor/block_tensor.py | 389 +++++++++++++++------ 1 file changed, 275 insertions(+), 114 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 7254a96c7..218ab50f6 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -20,7 +20,7 @@ from tensornetwork.network_components import Node, contract, contract_between from tensornetwork.backends import backend_factory # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges, unfuse import numpy as np import itertools import time @@ -34,6 +34,31 @@ def check_flows(flows) -> None: "flows = {} contains values different from 1 and -1".format(flows)) +def map_to_integer(dims: Union[List, np.ndarray], + table: np.ndarray, + dtype: Optional[Type[np.number]] = np.int64): + """ + Map a `table` of integers of shape (N, r) bijectively into + an np.ndarray `integers` of length N of unique numbers. + The mapping is done using + ``` + `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` + + Args: + dims: An iterable of integers. + table: An array of shape (N,r) of integers. + dtype: An optional dtype used for the conversion. + Care should be taken when choosing this to avoid overflow issues. + Returns: + np.ndarray: An array of integers. + """ + converter_table = np.expand_dims( + np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) + tmp = table * converter_table + integers = np.sum(tmp, axis=1) + return integers + + def compute_fused_charge_degeneracies(charges: List[np.ndarray], flows: List[Union[bool, int]]) -> Dict: """ @@ -160,13 +185,12 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def retrieve_non_zero_diagonal_blocks( - data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks(data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. @@ -196,7 +220,7 @@ def retrieve_non_zero_diagonal_blocks( actual `np.ndarray` with the data. This involves a copy of data. If `False`, the returned dict maps quantum numbers of a list [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. + containing the sparse locations of the tensor elements within A.data, i.e. `A.data[locations]` contains the elements belonging to the tensor with quantum numbers `(q,q). `shape` is the shape of the corresponding array. @@ -270,31 +294,47 @@ def retrieve_non_zero_diagonal_blocks( a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) if not return_data: - blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] else: - blocks[c] = np.reshape(data[a + b], - (row_degeneracies[c], column_degeneracies[-c])) + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) return blocks -def retrieve_non_zero_diagonal_blocks_old_version( +def find_diagonal_sparse_blocks_test( data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], return_data: Optional[bool] = True) -> Dict: """ - Deprecated: this version is about 2 times slower (worst case) than the current used - implementation Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. Note that `column_charges` + are never explicitly fused (`row_charges` are). Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. @@ -302,7 +342,7 @@ def retrieve_non_zero_diagonal_blocks_old_version( actual `np.ndarray` with the data. This involves a copy of data. If `False`, the returned dict maps quantum numbers of a list [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. + containing the sparse locations of the tensor elements within A.data, i.e. `A.data[locations]` contains the elements belonging to the tensor with quantum numbers `(q,q). `shape` is the shape of the corresponding array. @@ -310,20 +350,25 @@ def retrieve_non_zero_diagonal_blocks_old_version( dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray or a python list of locations and shapes, depending on the value of `return_data`. """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") + flows = row_flows.copy() + flows.extend(column_flows) check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column + #since we are using row-major we have to fuse the row charges anyway. + fused_row_charges = fuse_charges(row_charges, row_flows) + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + column_charges, column_flows) #get the charges common to rows and columns (only those matter) common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) @@ -334,8 +379,8 @@ def retrieve_non_zero_diagonal_blocks_old_version( column_degeneracies = dict(zip(unique_column_charges, column_dims)) # we only care about charges common to row and columns - mask = np.isin(row_charges, common_charges) - relevant_row_charges = row_charges[mask] + mask = np.isin(fused_row_charges, common_charges) + relevant_row_charges = fused_row_charges[mask] #some numpy magic to get the index locations of the blocks #we generate a vector of `len(relevant_row_charges) which, @@ -371,24 +416,28 @@ def retrieve_non_zero_diagonal_blocks_old_version( a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) if not return_data: - blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] else: - blocks[c] = np.reshape(data[a + b], - (row_degeneracies[c], column_degeneracies[-c])) + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) return blocks -def retrieve_non_zero_diagonal_blocks_column_major( +def find_diagonal_sparse_blocks_old_version( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], return_data: Optional[bool] = True) -> Dict: """ - Deprecated - + Deprecated: this version is about 2 times slower (worst case) than the current used + implementation Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict, assuming column-major - ordering. + all diagonal blocks and return them in a dict. Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -436,63 +485,66 @@ def retrieve_non_zero_diagonal_blocks_column_major( column_degeneracies = dict(zip(unique_column_charges, column_dims)) # we only care about charges common to row and columns - mask = np.isin(column_charges, -common_charges) - relevant_column_charges = column_charges[mask] + mask = np.isin(row_charges, common_charges) + relevant_row_charges = row_charges[mask] #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_column_charges) which, - #for each charge `c` in `relevant_column_charges` holds the - #row-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. masks = {} for c in common_charges: - mask = relevant_column_charges == -c + mask = relevant_row_charges == c masks[c] = mask - degeneracy_vector[mask] = row_degeneracies[c] + degeneracy_vector[mask] = column_degeneracies[-c] # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each column + # the stop positions of the non-zero values of each row # within the data vector. - # E.g. for `relevant_column_charges` = [0,1,0,0,3], and - # row_degeneracies[0] = 10 - # row_degeneracies[1] = 20 - # row_degeneracies[3] = 30 + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 # we have # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in column-major order) in - # each column with charge `c=0` within the data vector are then simply obtained using + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - row_degeneracies[0]` + # and `stop_positions[masks[0]] - column_degeneracies[0]` stop_positions = np.cumsum(degeneracy_vector) blocks = {} for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) - b = np.expand_dims(np.arange(row_degeneracies[c]), 1) + a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) if not return_data: - blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] else: - blocks[c] = np.reshape(data[a + b], - (row_degeneracies[c], column_degeneracies[-c])) + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) return blocks -def retrieve_non_zero_diagonal_blocks_deprecated( +def find_diagonal_sparse_blocks_column_major( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], - return_data: Optional[bool] = False) -> Dict: + return_data: Optional[bool] = True) -> Dict: """ Deprecated Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - This is a deprecated version which in general performs worse than the - current main implementation. - + all diagonal blocks and return them in a dict, assuming column-major + ordering. Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -526,44 +578,130 @@ def retrieve_non_zero_diagonal_blocks_deprecated( row_charges = flows[0] * charges[0] # a list of charges on each row column_charges = flows[1] * charges[1] # a list of charges on each column - # we only care about charges common to rows and columns - common_charges = np.unique(np.intersect1d(row_charges, -column_charges)) - row_charges = row_charges[np.isin(row_charges, common_charges)] - column_charges = column_charges[np.isin(column_charges, -common_charges)] - #get the unique charges - unique_row_charges, row_locations, row_dims = np.unique( - row_charges, return_inverse=True, return_counts=True) - unique_column_charges, column_locations, column_dims = np.unique( - column_charges, return_inverse=True, return_counts=True) + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + #convenience container for storing the degeneracies of each #row and column charge row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) + # we only care about charges common to row and columns + mask = np.isin(column_charges, -common_charges) + relevant_column_charges = column_charges[mask] + #some numpy magic to get the index locations of the blocks #we generate a vector of `len(relevant_column_charges) which, #for each charge `c` in `relevant_column_charges` holds the #row-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_column_charges == -c + masks[c] = mask + degeneracy_vector[mask] = row_degeneracies[c] - degeneracy_vector = column_dims[row_locations] + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each column + # within the data vector. + # E.g. for `relevant_column_charges` = [0,1,0,0,3], and + # row_degeneracies[0] = 10 + # row_degeneracies[1] = 20 + # row_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in column-major order) in + # each column with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - row_degeneracies[0]` stop_positions = np.cumsum(degeneracy_vector) blocks = {} + for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims( - stop_positions[row_charges == c] - column_degeneracies[-c], 0) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) + a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) + b = np.expand_dims(np.arange(row_degeneracies[c]), 1) if not return_data: - blocks[c] = [a + b, (row_degeneracies[c], column_degeneracies[-c])] + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] else: - blocks[c] = np.reshape(data[a + b], - (row_degeneracies[c], column_degeneracies[-c])) + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) return blocks -def compute_mapping_table(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> int: +def find_dense_blocks(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: + """ + Find the dense locations of the blocks xs(i.e. the index-values within the DENSE tensor) + in the vector `fused_charges` resulting from fusing np.ndarrays + `left_charges` and `right_charges` that have a value of `target_charge`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the all different blocks + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + together with their corresponding index-values of the data in the dense array. + `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` + to an array of integers. + For the above example, we get: + * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` + was obtained from fusing -2 and 2. + * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, + `fused_charges[5,13,17]` were obtained from fusing 0 and 0. + * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` + was obtained from fusing 1 and -1. + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping tuples of integers to np.ndarray of integers. + """ + check_flows([left_flow, right_flow]) + unique_left = np.unique(left_charges) + unique_right = np.unique(right_charges) + fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + left_inds, right_inds = unfuse( + np.nonzero(fused == target_charge)[0], len(unique_left), + len(unique_right)) + left_c = unique_left[left_inds] + right_c = unique_right[right_inds] + len_right_charges = len(right_charges) + linear_positions = {} + for left_charge, right_charge in zip(left_c, right_c): + left_positions = np.nonzero(left_charges == left_charge)[0] + left_offsets = np.expand_dims(left_positions * len_right_charges, 1) + right_offsets = np.expand_dims( + np.nonzero(right_charges == right_charge)[0], 0) + linear_positions[(left_charge, right_charge)] = np.reshape( + left_offsets + right_offsets, + left_offsets.shape[0] * right_offsets.shape[1]) + return linear_positions + + +def compute_block_table(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: """ Compute a mapping table mapping the linear positions of the non-zero elements to their multi-index label. @@ -576,18 +714,39 @@ def compute_mapping_table(charges: List[np.ndarray], with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. + target_charge: The total target charge of the blocks to be calculated. Returns: np.ndarray: An (N, r) np.ndarray of dtype np.int16, with `N` the number of non-zero elements, and `r` the rank of the tensor. """ - # we are using row-major encoding, meaning that the last index - # is moving quickest when iterating through the linear data - # transposing is done taking, for each value of the indices i_0 to i_N-2 - # the junk i_N-1 that gives non-zero + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + + # #all legs smaller or equal to `min_ind` are on the left side + # #of the partition. All others are on the right side. + # min_ind = np.argmin([ + # np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) + # for n in range(1, len(charges)) + # ]) + # fused_left_charges = fuse_charges(charges[0:min_ind + 1], + # flows[0:min_ind + 1]) + # fused_right_charges = fuse_charges(charges[min_ind + 1::], + # flows[min_ind + 1::]) + + fused_charges = fuse_charges(charges, flows) + nz_indices = np.nonzero(fused_charges == target_charge)[0] + + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) - #for example - raise NotImplementedError() + index_locations = [] + for n in reversed(range(len(charges))): + nz_indices, right_indices = unfuse(nz_indices, np.prod(dims[0:n]), dims[n]) + index_locations.insert(0, right_indices) + return index_locations class BlockSparseTensor: @@ -806,7 +965,7 @@ def raise_error(): elif dense_shape[n] < self.dense_shape[n]: raise_error() - def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_new(self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -830,24 +989,20 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) - row_indices = self.indices[0].get_elementary_indices() - column_indices = self.indices[1].get_elementary_indices() - - return retrieve_non_zero_diagonal_blocks( - data=self.data, - row_charges=[i.charges for i in row_indices], - column_charges=[i.charges for i in column_indices], - row_flows=[i.flow for i in row_indices], - column_flows=[i.flow for i in column_indices], - return_data=return_data) + return find_dense_blocks( + left_charges=self.indices[0].charges, + left_flow=1, + right_charges=self.indices[1].charges, + right_flow=1, + target_charge=0) - def get_diagonal_blocks_old_version( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ - Deprecated - Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. + For matrices with shape[0] << shape[1], this routine avoids explicit fusion + of column charges. + Args: return_data: If `True`, the return dictionary maps quantum numbers `q` to actual `np.ndarray` with the data. This involves a copy of data. @@ -865,16 +1020,21 @@ def get_diagonal_blocks_old_version( "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) - return retrieve_non_zero_diagonal_blocks_old_version( + row_indices = self.indices[0].get_elementary_indices() + column_indices = self.indices[1].get_elementary_indices() + + return find_diagonal_sparse_blocks( data=self.data, - charges=self.charges, - flows=self.flows, + row_charges=[i.charges for i in row_indices], + column_charges=[i.charges for i in column_indices], + row_flows=[i.flow for i in row_indices], + column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated( + def get_diagonal_blocks_old_version( self, return_data: Optional[bool] = True) -> Dict: """ - Deprecated + Deprecated Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -894,7 +1054,8 @@ def get_diagonal_blocks_deprecated( raise ValueError( "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) - return retrieve_non_zero_diagonal_blocks_deprecated( + + return find_diagonal_sparse_blocks_old_version( data=self.data, charges=self.charges, flows=self.flows, From 1faf8c1921e9021838e953793b064a2002e1d6f8 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 10:18:37 -0500 Subject: [PATCH 070/212] test for map_to_integer --- .../block_tensor/block_tensor_test.py | 92 ++++++++++++++++++- 1 file changed, 90 insertions(+), 2 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 5e63237a5..bc2067fec 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -1,8 +1,8 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero -from index import Index +from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, compute_block_table, find_dense_blocks, map_to_integer +from index import Index, fuse_charges np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] @@ -30,3 +30,91 @@ def test_block_sparse_init(dtype): assert A.indices[r].name == 'index{}'.format(r) assert A.dense_shape == tuple([D] * rank) assert len(A.data) == num_elements + + +def test_block_table(): + D = 30 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + num_non_zero = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + + inds = compute_block_table(charges=charges, flows=flows, target_charge=0) + total = flows[0] * charges[0][inds[0]] + flows[1] * charges[1][ + inds[1]] + flows[2] * charges[2][inds[2]] + flows[3] * charges[3][inds[3]] + assert len(total) == len(np.nonzero(total == 0)[0]) + assert len(total) == num_non_zero + + +def test_find_dense_blocks(): + left_charges = [-2, 0, 1, 0, 0] + right_charges = [-1, 0, 2, 1] + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) + blocks = find_dense_blocks(left_charges, 1, right_charges, 1, target_charge) + np.testing.assert_allclose(blocks[(-2, 2)], [2]) + np.testing.assert_allclose(blocks[(0, 0)], [5, 13, 17]) + np.testing.assert_allclose(blocks[(1, -1)], [8]) + + +def test_find_dense_blocks_2(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + n1 = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], + [1 for _ in range(rank // 2)]) + column_charges = fuse_charges( + [indices[n].charges for n in range(rank // 2, rank)], + [1 for _ in range(rank // 2, rank)]) + + i01 = indices[0] * indices[1] + i23 = indices[2] * indices[3] + blocks = find_dense_blocks(i01.charges, 1, i23.charges, 1, 0) + assert sum([len(v) for v in blocks.values()]) == n1 + + tensor = BlockSparseTensor.random(indices=indices, dtype=np.float64) + tensor.reshape((D * D, D * D)) + blocks_2 = tensor.get_diagonal_blocks(return_data=False) + np.testing.assert_allclose([k[0] for k in blocks.keys()], + list(blocks_2.keys())) + for c in blocks.keys(): + assert np.prod(blocks_2[c[0]][1]) == len(blocks[c]) + + +def test_map_to_integer(): + dims = [4, 3, 2] + dim_prod = [6, 2, 1] + N = 10 + table = np.stack([np.random.randint(0, d, N) for d in dims], axis=1) + integers = map_to_integer(dims, table) + ints = [] + for n in range(N): + i = 0 + for d in range(len(dims)): + i += dim_prod[d] * table[n, d] + ints.append(i) + np.testing.assert_allclose(ints, integers) From 053dcbe6c2c2805a50c2e1e4c12b6857a083f0e6 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 14:08:21 -0500 Subject: [PATCH 071/212] added functions to find sparse positions when fusing two charges --- tensornetwork/block_tensor/block_tensor.py | 226 ++++++++++++++++----- 1 file changed, 179 insertions(+), 47 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 218ab50f6..46d8396c1 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -28,12 +28,26 @@ Tensor = Any -def check_flows(flows) -> None: +def _check_flows(flows) -> None: if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): raise ValueError( "flows = {} contains values different from 1 and -1".format(flows)) +def _find_best_partition(charges, flows): + dims = np.asarray([len(c) for c in charges]) + min_ind = np.argmin([ + np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) + for n in range(1, len(charges)) + ]) + fused_left_charges = fuse_charges(charges[0:min_ind + 1], + flows[0:min_ind + 1]) + fused_right_charges = fuse_charges(charges[min_ind + 1::], + flows[min_ind + 1::]) + + return fused_left_charges, fused_right_charges + + def map_to_integer(dims: Union[List, np.ndarray], table: np.ndarray, dtype: Optional[Type[np.number]] = np.int64): @@ -157,7 +171,7 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], Each element corresponds to a non-zero valued block of the tensor. """ #FIXME: this routine is slow - check_flows(flows) + _check_flows(flows) degeneracies = [] unique_charges = [] rank = len(charges) @@ -230,7 +244,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, """ flows = row_flows.copy() flows.extend(column_flows) - check_flows(flows) + _check_flows(flows) if len(flows) != (len(row_charges) + len(column_charges)): raise ValueError( "`len(flows)` is different from `len(row_charges) + len(column_charges)`" @@ -287,11 +301,12 @@ def find_diagonal_sparse_blocks(data: np.ndarray, # masks[0] = [True, False, True, True, False] # and `stop_positions[masks[0]] - column_degeneracies[0]` stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector blocks = {} for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) + a = np.expand_dims(start_positions[masks[c]], 0) b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) if not return_data: blocks[c] = [ @@ -352,17 +367,20 @@ def find_diagonal_sparse_blocks_test( """ flows = row_flows.copy() flows.extend(column_flows) - check_flows(flows) + _check_flows(flows) if len(flows) != (len(row_charges) + len(column_charges)): raise ValueError( "`len(flows)` is different from `len(row_charges) + len(column_charges)`" ) #since we are using row-major we have to fuse the row charges anyway. - fused_row_charges = fuse_charges(row_charges, row_flows) - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) + left_row_charges, right_row_charges = _find_best_partition( + row_charges, row_flows) + + unique_left = np.unique(left_row_charges) + unique_right = np.unique(right_row_charges) + unique_row_charges = np.unique( + fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster @@ -374,26 +392,28 @@ def find_diagonal_sparse_blocks_test( unique_row_charges, -unique_column_charges, assume_unique=True) #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) + #column charge column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(fused_row_charges, common_charges) - relevant_row_charges = fused_row_charges[mask] + row_locations = {} + row_locations = find_sparse_positions( + left_charges=left_row_charges, + left_flow=1, + right_charges=right_row_charges, + right_flow=1, + target_charges=common_charges) #some numpy magic to get the index locations of the blocks #we generate a vector of `len(relevant_row_charges) which, #for each charge `c` in `relevant_row_charges` holds the #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + + degeneracy_vector = np.empty( + np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. masks = {} for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] + degeneracy_vector[row_locations[c]] = column_degeneracies[-c] # the result of the cumulative sum is a vector containing # the stop positions of the non-zero values of each row @@ -409,22 +429,24 @@ def find_diagonal_sparse_blocks_test( # masks[0] = [True, False, True, True, False] # and `stop_positions[masks[0]] - column_degeneracies[0]` stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector blocks = {} for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) + a = np.expand_dims(start_positions[row_locations[c]], 0) b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) if not return_data: blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) + np.reshape(a + b, + len(row_locations[c]) * column_degeneracies[-c]), + (len(row_locations[c]), column_degeneracies[-c]) ] else: blocks[c] = np.reshape( data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) + len(row_locations[c]) * column_degeneracies[-c])], + (len(row_locations[c]), column_degeneracies[-c])) return blocks @@ -463,7 +485,7 @@ def find_diagonal_sparse_blocks_old_version( """ if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") - check_flows(flows) + _check_flows(flows) if len(flows) != len(charges): raise ValueError("`len(flows)` is different from `len(charges)`") @@ -570,7 +592,7 @@ def find_diagonal_sparse_blocks_column_major( """ if len(charges) != 2: raise ValueError("input has to be a two-dimensional symmetric matrix") - check_flows(flows) + _check_flows(flows) if len(flows) != len(charges): raise ValueError("`len(flows)` is different from `len(charges)`") @@ -641,13 +663,13 @@ def find_diagonal_sparse_blocks_column_major( return blocks -def find_dense_blocks(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: +def find_dense_positions(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: """ - Find the dense locations of the blocks xs(i.e. the index-values within the DENSE tensor) - in the vector `fused_charges` resulting from fusing np.ndarrays - `left_charges` and `right_charges` that have a value of `target_charge`. + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`. For example, given ``` left_charges = [-2,0,1,0,0] @@ -677,7 +699,7 @@ def find_dense_blocks(left_charges: np.ndarray, left_flow: int, Returns: dict: Mapping tuples of integers to np.ndarray of integers. """ - check_flows([left_flow, right_flow]) + _check_flows([left_flow, right_flow]) unique_left = np.unique(left_charges) unique_right = np.unique(right_charges) fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) @@ -699,12 +721,112 @@ def find_dense_blocks(left_charges: np.ndarray, left_flow: int, return linear_positions -def compute_block_table(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: +def find_sparse_positions(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charges: Union[List[int], np.ndarray]) -> Dict: """ - Compute a mapping table mapping the linear positions of the non-zero - elements to their multi-index label. + Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`, + assuming that all elements different from `target_charges` are `0`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charges = [0,1] + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` 0 1 2 3 4 5 6 7 8 + we want to find the all different blocks + that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, + together with their corresponding sparse index-values of the data in the sparse array, + assuming that all elements in `fused_charges` different from `target_charges` are 0. + + `find_sparse_blocks` returns a dict mapping integers `target_charge` + to an array of integers denoting the sparse locations of elements within + `fused_charges`. + For the above example, we get: + * `target_charge=0`: [0,1,3,5,7] + * `target_charge=1`: [2,4,6,8] + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping integers to np.ndarray of integers. + """ + #FIXME: this is probably still not optimal + + _check_flows([left_flow, right_flow]) + target_charges = np.unique(target_charges) + unique_left = np.unique(left_charges) + unique_right = np.unique(right_charges) + fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + + #compute all unique charges that can add up to + #target_charges + left_inds, right_inds = [], [] + for target_charge in target_charges: + li, ri = unfuse( + np.nonzero(fused == target_charge)[0], len(unique_left), + len(unique_right)) + left_inds.append(li) + right_inds.append(ri) + + #compute the relevant unique left and right charges + unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] + unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] + + relevant_left_charges = left_charges[np.isin(left_charges, + unique_left_charges)] + relevant_right_charges = right_charges[np.isin(right_charges, + unique_right_charges)] + unique_right_charges, right_dims = np.unique( + relevant_right_charges, return_counts=True) + right_degeneracies = dict(zip(unique_right_charges, right_dims)) + degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) + total_row_degeneracies = {} + right_indices = {} + for left_charge in unique_left_charges: + total_degeneracy = np.sum(right_dims[np.isin( + left_flow * left_charge + right_flow * unique_right_charges, + target_charges)]) + tmp_relevant_right_charges = relevant_right_charges[np.isin( + relevant_right_charges, + (target_charges - left_flow * left_charge) * right_flow)] + + for target_charge in target_charges: + right_indices[(left_charge, target_charge)] = np.nonzero( + tmp_relevant_right_charges == + (target_charge - left_flow * left_charge) * right_flow)[0] + + degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy + + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + blocks = {t: [] for t in target_charges} + for left_charge in unique_left_charges: + a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) + for target_charge in target_charges: + ri = right_indices[(left_charge, target_charge)] + if len(ri) != 0: + b = np.expand_dims(ri, 1) + tmp = a + b + blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) + out = {} + for target_charge in target_charges: + out[target_charge] = np.concatenate(blocks[target_charge]) + return out + + +def compute_dense_to_sparse_table(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute a table mapping multi-index positions to the linear positions + within the sparse data container. Args: charges: List of np.ndarray of int, one for each leg of the underlying tensor. Each np.ndarray `charges[leg]` @@ -781,7 +903,7 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: indices: List of `Index` objecst, one for each leg. """ self.indices = indices - check_flows(self.flows) + _check_flows(self.flows) num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) if num_non_zero_elements != len(data.flat): @@ -964,8 +1086,14 @@ def raise_error(): raise_error() elif dense_shape[n] < self.dense_shape[n]: raise_error() - - def get_diagonal_blocks_new(self, return_data: Optional[bool] = True) -> Dict: + #at this point the first len(dense_shape) indices of the tensor + #match the `dense_shape`. + while len(dense_shape) < len(self.indices): + i2, i1 = self.indices.pop(), self.indices.pop() + self.indices.append(fuse_index_pair(i1, i2)) + + def get_diagonal_blocks_test(self, + return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -989,12 +1117,16 @@ def get_diagonal_blocks_new(self, return_data: Optional[bool] = True) -> Dict: "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) - return find_dense_blocks( - left_charges=self.indices[0].charges, - left_flow=1, - right_charges=self.indices[1].charges, - right_flow=1, - target_charge=0) + row_indices = self.indices[0].get_elementary_indices() + column_indices = self.indices[1].get_elementary_indices() + + return find_diagonal_sparse_blocks_test( + data=self.data, + row_charges=[i.charges for i in row_indices], + column_charges=[i.charges for i in column_indices], + row_flows=[i.flow for i in row_indices], + column_flows=[i.flow for i in column_indices], + return_data=return_data) def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ From 3076ff0ba457a7dd699e02b27361306c61c2583d Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 14:26:31 -0500 Subject: [PATCH 072/212] renaming of routines --- tensornetwork/block_tensor/block_tensor.py | 108 ++++++++++++--------- 1 file changed, 62 insertions(+), 46 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 46d8396c1..0efac6a91 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -35,6 +35,10 @@ def _check_flows(flows) -> None: def _find_best_partition(charges, flows): + if len(charges) == 1: + raise ValueError( + '_expecting `charges` with a length of at least 2, got `len(charges)={}`' + .format(len(charges))) dims = np.asarray([len(c) for c in charges]) min_ind = np.argmin([ np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) @@ -199,12 +203,13 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def find_diagonal_sparse_blocks(data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_version_1( + data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. @@ -321,13 +326,12 @@ def find_diagonal_sparse_blocks(data: np.ndarray, return blocks -def find_diagonal_sparse_blocks_test( - data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks(data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. @@ -373,40 +377,52 @@ def find_diagonal_sparse_blocks_test( "`len(flows)` is different from `len(row_charges) + len(column_charges)`" ) - #since we are using row-major we have to fuse the row charges anyway. - left_row_charges, right_row_charges = _find_best_partition( - row_charges, row_flows) - - unique_left = np.unique(left_row_charges) - unique_right = np.unique(right_row_charges) - unique_row_charges = np.unique( - fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) - #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly unique_column_charges, column_dims = compute_fused_charge_degeneracies( column_charges, column_flows) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - #convenience container for storing the degeneracies of each #column charge column_degeneracies = dict(zip(unique_column_charges, column_dims)) - row_locations = {} - row_locations = find_sparse_positions( - left_charges=left_row_charges, - left_flow=1, - right_charges=right_row_charges, - right_flow=1, - target_charges=common_charges) - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` + if len(row_charges) > 1: + left_row_charges, right_row_charges = _find_best_partition( + row_charges, row_flows) + unique_left = np.unique(left_row_charges) + unique_right = np.unique(right_row_charges) + unique_row_charges = np.unique( + fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) + + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + row_locations = {} + row_locations = find_sparse_positions( + left_charges=left_row_charges, + left_flow=1, + right_charges=right_row_charges, + right_flow=1, + target_charges=common_charges) + elif len(row_charges) == 1: + fused_row_charges = fuse_charges(row_charges, row_flows) + + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + relevant_fused_row_charges = fused_row_charges[np.isin( + fused_row_charges, common_charges)] + row_locations = {} + for c in common_charges: + row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] + else: + raise ValueError('Found an empty sequence for `row_charges`') + #some numpy magic to get the index locations of the blocks degeneracy_vector = np.empty( np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask @@ -450,7 +466,7 @@ def find_diagonal_sparse_blocks_test( return blocks -def find_diagonal_sparse_blocks_old_version( +def find_diagonal_sparse_blocks_version_0( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], @@ -1092,8 +1108,7 @@ def raise_error(): i2, i1 = self.indices.pop(), self.indices.pop() self.indices.append(fuse_index_pair(i1, i2)) - def get_diagonal_blocks_test(self, - return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1120,7 +1135,7 @@ def get_diagonal_blocks_test(self, row_indices = self.indices[0].get_elementary_indices() column_indices = self.indices[1].get_elementary_indices() - return find_diagonal_sparse_blocks_test( + return find_diagonal_sparse_blocks( data=self.data, row_charges=[i.charges for i in row_indices], column_charges=[i.charges for i in column_indices], @@ -1128,7 +1143,8 @@ def get_diagonal_blocks_test(self, column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_version_1(self, + return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1155,7 +1171,7 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: row_indices = self.indices[0].get_elementary_indices() column_indices = self.indices[1].get_elementary_indices() - return find_diagonal_sparse_blocks( + return find_diagonal_sparse_blocks_version_1( data=self.data, row_charges=[i.charges for i in row_indices], column_charges=[i.charges for i in column_indices], @@ -1163,8 +1179,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_old_version( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_version_0(self, + return_data: Optional[bool] = True) -> Dict: """ Deprecated @@ -1187,7 +1203,7 @@ def get_diagonal_blocks_old_version( "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) - return find_diagonal_sparse_blocks_old_version( + return find_diagonal_sparse_blocks_version_0( data=self.data, charges=self.charges, flows=self.flows, From 386d1779a34a0d7ebc285a656a049012f90d6df7 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 14:26:51 -0500 Subject: [PATCH 073/212] added unfuse --- tensornetwork/block_tensor/index.py | 45 +++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index a299fa381..b4bba3cee 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -239,3 +239,48 @@ def split_index(index: Index) -> Tuple[Index, Index]: raise ValueError("cannot split an elementary index") return index.left_child, index.right_child + + +def unfuse(fused_indices: np.ndarray, len_left: int, + len_right: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Given an np.ndarray `fused_indices` of integers denoting + index-positions of elements within a 1d array, `unfuse` + obtains the index-positions of the elements in the left and + right np.ndarrays `left`, `right` which, upon fusion, + are placed at the index-positions given by + `fused_indices` in the fused np.ndarray. + An example will help to illuminate this: + Given np.ndarrays `left`, `right` and the result + of their fusion (`fused`): + + ``` + left = [0,1,0,2] + right = [-1,3,-2] + fused = fuse_charges([left, right], flows=[1,1]) + print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] + ``` + + we want to find which elements in `left` and `right` + fuse to a value of 0. In the above case, there are two + 0 in `fused`: one is obtained from fusing `left[1]` and + `right[0]`, the second one from fusing `left[3]` and `right[2]` + `unfuse` returns the index-positions of these values within + `left` and `right`, that is + + ``` + left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) + print(left_index_values) # [1,3] + print(right_index_values) # [0,2] + ``` + + Args: + fused_indices: A 1d np.ndarray of integers. + len_left: The length of the left np.ndarray. + len_right: The length of the right np.ndarray. + Returns: + (np.ndarry, np.ndarray) + """ + right = fused_indices % len_right + left = (fused_indices - right) // len_right + return left, right From f32edc3e1c0a99534a0e248421cef0de7115ec0b Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 14:27:07 -0500 Subject: [PATCH 074/212] test unfuse --- tensornetwork/block_tensor/index_test.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 1bb2c37be..0feb2eb15 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,6 +1,6 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse def test_fuse_charge_pair(): @@ -180,3 +180,17 @@ def test_copy(): assert elmt1234[1] is not i2 assert elmt1234[2] is not i3 assert elmt1234[3] is not i4 + + +def test_unfuse(): + q1 = np.random.randint(-4, 5, 10).astype(np.int16) + q2 = np.random.randint(-4, 5, 4).astype(np.int16) + q3 = np.random.randint(-4, 5, 4).astype(np.int16) + q12 = fuse_charges([q1, q2], [1, 1]) + q123 = fuse_charges([q12, q3], [1, 1]) + nz = np.nonzero(q123 == 0)[0] + q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) + + q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) + np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], + np.zeros(len(q1_inds), dtype=np.int16)) From 2048841b84fe10c13c080846da71799a299d9123 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 15:24:09 -0500 Subject: [PATCH 075/212] fixed bug in the new routine for finding diagonal blocks --- tensornetwork/block_tensor/block_tensor.py | 26 ++++++++-------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 0efac6a91..fd4e13f5d 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -278,7 +278,6 @@ def find_diagonal_sparse_blocks_version_1( # we only care about charges common to row and columns mask = np.isin(fused_row_charges, common_charges) relevant_row_charges = fused_row_charges[mask] - #some numpy magic to get the index locations of the blocks #we generate a vector of `len(relevant_row_charges) which, #for each charge `c` in `relevant_row_charges` holds the @@ -311,8 +310,8 @@ def find_diagonal_sparse_blocks_version_1( for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[masks[c]], 0) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) + a = np.expand_dims(start_positions[masks[c]], 1) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) if not return_data: blocks[c] = [ np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), @@ -398,7 +397,6 @@ def find_diagonal_sparse_blocks(data: np.ndarray, common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) - row_locations = {} row_locations = find_sparse_positions( left_charges=left_row_charges, left_flow=1, @@ -421,7 +419,6 @@ def find_diagonal_sparse_blocks(data: np.ndarray, row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] else: raise ValueError('Found an empty sequence for `row_charges`') - #some numpy magic to get the index locations of the blocks degeneracy_vector = np.empty( np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) @@ -450,19 +447,14 @@ def find_diagonal_sparse_blocks(data: np.ndarray, for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[row_locations[c]], 0) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) + a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) + inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) if not return_data: - blocks[c] = [ - np.reshape(a + b, - len(row_locations[c]) * column_degeneracies[-c]), - (len(row_locations[c]), column_degeneracies[-c]) - ] + blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - len(row_locations[c]) * column_degeneracies[-c])], - (len(row_locations[c]), column_degeneracies[-c])) + blocks[c] = np.reshape(data[inds], + (len(row_locations[c]), column_degeneracies[-c])) return blocks @@ -830,7 +822,7 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, if len(ri) != 0: b = np.expand_dims(ri, 1) tmp = a + b - blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) + blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) out = {} for target_charge in target_charges: out[target_charge] = np.concatenate(blocks[target_charge]) From 0c4f557d58d8d975a8c5578c462ae04908f5282b Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 15:24:15 -0500 Subject: [PATCH 076/212] test added --- .../block_tensor/block_tensor_test.py | 49 ++++++++++++++++--- 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index bc2067fec..e862d811e 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -1,7 +1,7 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, compute_block_table, find_dense_blocks, map_to_integer +from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, compute_dense_to_sparse_table, find_sparse_positions, find_dense_positions, map_to_integer from index import Index, fuse_charges np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] @@ -50,25 +50,27 @@ def test_block_table(): num_non_zero = compute_num_nonzero([i.charges for i in indices], [i.flow for i in indices]) - inds = compute_block_table(charges=charges, flows=flows, target_charge=0) + inds = compute_dense_to_sparse_table( + charges=charges, flows=flows, target_charge=0) total = flows[0] * charges[0][inds[0]] + flows[1] * charges[1][ inds[1]] + flows[2] * charges[2][inds[2]] + flows[3] * charges[3][inds[3]] assert len(total) == len(np.nonzero(total == 0)[0]) assert len(total) == num_non_zero -def test_find_dense_blocks(): +def test_find_dense_positions(): left_charges = [-2, 0, 1, 0, 0] right_charges = [-1, 0, 2, 1] target_charge = 0 fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) - blocks = find_dense_blocks(left_charges, 1, right_charges, 1, target_charge) + blocks = find_dense_positions(left_charges, 1, right_charges, 1, + target_charge) np.testing.assert_allclose(blocks[(-2, 2)], [2]) np.testing.assert_allclose(blocks[(0, 0)], [5, 13, 17]) np.testing.assert_allclose(blocks[(1, -1)], [8]) -def test_find_dense_blocks_2(): +def test_find_dense_positions_2(): D = 40 #bond dimension B = 4 #number of blocks dtype = np.int16 #the dtype of the quantum numbers @@ -93,7 +95,7 @@ def test_find_dense_blocks_2(): i01 = indices[0] * indices[1] i23 = indices[2] * indices[3] - blocks = find_dense_blocks(i01.charges, 1, i23.charges, 1, 0) + blocks = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) assert sum([len(v) for v in blocks.values()]) == n1 tensor = BlockSparseTensor.random(indices=indices, dtype=np.float64) @@ -105,6 +107,41 @@ def test_find_dense_blocks_2(): assert np.prod(blocks_2[c[0]][1]) == len(blocks[c]) +def test_find_sparse_positions(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + n1 = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], + [1 for _ in range(rank // 2)]) + column_charges = fuse_charges( + [indices[n].charges for n in range(rank // 2, rank)], + [1 for _ in range(rank // 2, rank)]) + + i01 = indices[0] * indices[1] + i23 = indices[2] * indices[3] + unique_row_charges = np.unique(i01.charges) + unique_column_charges = np.unique(i23.charges) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + blocks = find_sparse_positions( + i01.charges, 1, i23.charges, 1, target_charges=[0]) + assert sum([len(v) for v in blocks.values()]) == n1 + np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) + + def test_map_to_integer(): dims = [4, 3, 2] dim_prod = [6, 2, 1] From 69afbb6a20260d339c869178e34bedd15bfa6bdd Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 15:26:08 -0500 Subject: [PATCH 077/212] docstring --- tensornetwork/block_tensor/block_tensor.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index fd4e13f5d..ba11e2965 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -211,6 +211,10 @@ def find_diagonal_sparse_blocks_version_1( column_flows: List[Union[bool, int]], return_data: Optional[bool] = True) -> Dict: """ + Deprecated + + This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. + Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. `row_charges` and `column_charges` are lists of np.ndarray. The tensor From 9981237543b8bdb6f54f5ca7e24a5592d2307466 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 15:50:12 -0500 Subject: [PATCH 078/212] added tests --- .../block_tensor/block_tensor_test.py | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index e862d811e..580ff3f10 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -142,6 +142,35 @@ def test_find_sparse_positions(): np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) +def test_find_sparse_positions_2(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + flows = [1, -1] + + rank = len(flows) + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + i1, i2 = indices + common_charges = np.intersect1d(i1.charges, i2.charges) + row_locations = find_sparse_positions( + left_charges=i1.charges, + left_flow=flows[0], + right_charges=i2.charges, + right_flow=flows[1], + target_charges=common_charges) + fused = (i1 * i2).charges + relevant = fused[np.isin(fused, common_charges)] + for k, v in row_locations.items(): + np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) + + def test_map_to_integer(): dims = [4, 3, 2] dim_prod = [6, 2, 1] @@ -155,3 +184,27 @@ def test_map_to_integer(): i += dim_prod[d] * table[n, d] ints.append(i) np.testing.assert_allclose(ints, integers) + + +def test_ge_diagonal_blocks(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + common_charges = np.intersect1d(indices[0].charges, indices[1].charges) + row_locations = find_sparse_positions( + left_charges=indices[0].charges, + left_flow=1, + right_charges=indices[1].charges, + right_flow=1, + target_charges=common_charges) From 08b9a150e5bc3cbbecf7bb21804209d56554caec Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 20:39:29 -0500 Subject: [PATCH 079/212] renaming --- tensornetwork/block_tensor/block_tensor.py | 36 +++++++++++++++++----- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index ba11e2965..a531aa616 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -787,19 +787,23 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, left_inds.append(li) right_inds.append(ri) - #compute the relevant unique left and right charges + #now compute the relevant unique left and right charges unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] + #only keep those charges that are relevant relevant_left_charges = left_charges[np.isin(left_charges, unique_left_charges)] relevant_right_charges = right_charges[np.isin(right_charges, unique_right_charges)] + unique_right_charges, right_dims = np.unique( relevant_right_charges, return_counts=True) right_degeneracies = dict(zip(unique_right_charges, right_dims)) + #generate a degeneracy vector which for each value r in relevant_right_charges + #holds the corresponding number of non-zero elements `relevant_right_charges` + #that can add up to `target_charges`. degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) - total_row_degeneracies = {} right_indices = {} for left_charge in unique_left_charges: total_degeneracy = np.sum(right_dims[np.isin( @@ -833,12 +837,30 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, return out -def compute_dense_to_sparse_table(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: +def compute_dense_to_sparse_mapping(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: """ - Compute a table mapping multi-index positions to the linear positions - within the sparse data container. + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` Args: charges: List of np.ndarray of int, one for each leg of the underlying tensor. Each np.ndarray `charges[leg]` From 4a7d0b6c7e9c168a5a84e70b939de08166c416b9 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 20 Dec 2019 20:39:37 -0500 Subject: [PATCH 080/212] tests --- .../block_tensor/block_tensor_test.py | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 580ff3f10..c6d6b4de4 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -1,7 +1,7 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, compute_dense_to_sparse_table, find_sparse_positions, find_dense_positions, map_to_integer +from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, compute_dense_to_sparse_mapping, find_sparse_positions, find_dense_positions, map_to_integer from index import Index, fuse_charges np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] @@ -32,7 +32,7 @@ def test_block_sparse_init(dtype): assert len(A.data) == num_elements -def test_block_table(): +def test_dense_to_sparse_table(): D = 30 #bond dimension B = 4 #number of blocks dtype = np.int16 #the dtype of the quantum numbers @@ -43,18 +43,15 @@ def test_block_table(): np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) for _ in range(rank) ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - num_non_zero = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) + num_non_zero = compute_num_nonzero(charges, flows) - inds = compute_dense_to_sparse_table( + inds = compute_dense_to_sparse_mapping( charges=charges, flows=flows, target_charge=0) - total = flows[0] * charges[0][inds[0]] + flows[1] * charges[1][ - inds[1]] + flows[2] * charges[2][inds[2]] + flows[3] * charges[3][inds[3]] - assert len(total) == len(np.nonzero(total == 0)[0]) + total = np.zeros(len(inds[0]), dtype=np.int16) + for n in range(len(charges)): + total += flows[n] * charges[n][inds[n]] + + np.testing.assert_allclose(total, 0) assert len(total) == num_non_zero From 00c279ebcee04ab43a77a7afb91d4774866968ea Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 23 Dec 2019 19:59:24 -0500 Subject: [PATCH 081/212] transpose added, map_to_integer removed (used np.unravel_index and np.ravel_multi_index instead) --- tensornetwork/block_tensor/block_tensor.py | 194 ++++++++++++++++----- 1 file changed, 153 insertions(+), 41 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index a531aa616..5284b9ffd 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -49,32 +49,7 @@ def _find_best_partition(charges, flows): fused_right_charges = fuse_charges(charges[min_ind + 1::], flows[min_ind + 1::]) - return fused_left_charges, fused_right_charges - - -def map_to_integer(dims: Union[List, np.ndarray], - table: np.ndarray, - dtype: Optional[Type[np.number]] = np.int64): - """ - Map a `table` of integers of shape (N, r) bijectively into - an np.ndarray `integers` of length N of unique numbers. - The mapping is done using - ``` - `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` - - Args: - dims: An iterable of integers. - table: An array of shape (N,r) of integers. - dtype: An optional dtype used for the conversion. - Care should be taken when choosing this to avoid overflow issues. - Returns: - np.ndarray: An array of integers. - """ - converter_table = np.expand_dims( - np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) - tmp = table * converter_table - integers = np.sum(tmp, axis=1) - return integers + return fused_left_charges, fused_right_charges, min_ind + 1 def compute_fused_charge_degeneracies(charges: List[np.ndarray], @@ -390,7 +365,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, column_degeneracies = dict(zip(unique_column_charges, column_dims)) if len(row_charges) > 1: - left_row_charges, right_row_charges = _find_best_partition( + left_row_charges, right_row_charges, _ = _find_best_partition( row_charges, row_flows) unique_left = np.unique(left_row_charges) unique_right = np.unique(right_row_charges) @@ -879,21 +854,9 @@ def compute_dense_to_sparse_mapping(charges: List[np.ndarray], #find the best partition (the one where left and right dimensions are #closest dims = np.asarray([len(c) for c in charges]) - - # #all legs smaller or equal to `min_ind` are on the left side - # #of the partition. All others are on the right side. - # min_ind = np.argmin([ - # np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) - # for n in range(1, len(charges)) - # ]) - # fused_left_charges = fuse_charges(charges[0:min_ind + 1], - # flows[0:min_ind + 1]) - # fused_right_charges = fuse_charges(charges[min_ind + 1::], - # flows[min_ind + 1::]) - + t1 = time.time() fused_charges = fuse_charges(charges, flows) nz_indices = np.nonzero(fused_charges == target_charge)[0] - if len(nz_indices) == 0: raise ValueError( "`charges` do not add up to a total charge {}".format(target_charge)) @@ -905,6 +868,145 @@ def compute_dense_to_sparse_mapping(charges: List[np.ndarray], return index_locations +def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` + the rank of the tensor. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + + #note: left_charges and right_charges have been fused from RIGHT to LEFT + left_charges, right_charges, partition = _find_best_partition(charges, flows) + t1 = time.time() + blocks = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=target_charge) + + #value elements of `blocks` are already sorted + first_elements = sorted([(k, v[0]) for k, v in blocks.items()], + key=lambda x: x[1]) + + nz_indices = np.concatenate([blocks[t[0]] for t in first_elements]) + + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + t1 = time.time() + nz_left_indices, nz_right_indices = unfuse(nz_indices, len(left_charges), + len(right_charges)) + index_locations = [] + #first unfuse left charges + for n in range(partition): + t1 = time.time() + indices, nz_left_indices = unfuse(nz_left_indices, dims[n], + np.prod(dims[n + 1:partition])) + index_locations.append(indices) + + for n in range(partition, len(dims)): + indices, nz_right_indices = unfuse(nz_right_indices, dims[n], + np.prod(dims[n + 1::])) + index_locations.append(indices) + + return index_locations + + +def compute_dense_to_sparse_mapping_3(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` + the rank of the tensor. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + + #note: left_charges and right_charges have been fused from RIGHT to LEFT + left_charges, right_charges, partition = _find_best_partition(charges, flows) + t1 = time.time() + blocks = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=target_charge) + + #value elements of `blocks` are already sorted + first_elements = sorted([(k, v[0]) for k, v in blocks.items()], + key=lambda x: x[1]) + + nz_indices = np.concatenate([blocks[t[0]] for t in first_elements]) + + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + t1 = time.time() + return np.unravel_index(nz_indices, dims) + + class BlockSparseTensor: """ Minimal class implementation of block sparsity. @@ -1029,7 +1131,17 @@ def transpose(self, order): """ Transpose the tensor into the new order `order` """ - raise NotImplementedError('transpose is not implemented!!') + dims = [len(c) for c in self.charges] + left_charges, right_charges, _ = _find_best_partition( + self.charges, self.flows) + blocks = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + nz_indices = np.sort(np.concatenate(list(blocks.values()))) + + multi_indices = np.unravel_index(nz_indices, dims) + transposed_linear_positions = np.ravel_multi_index( + [multi_indices[p] for p in order], dims=[dims[p] for p in order]) + self.data = self.data[np.argsort(transposed_linear_positions)] def reset_shape(self) -> None: """ From 21fe6361a1ce88d09356c0ca52fbc8faaab005f6 Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 26 Dec 2019 21:20:12 -0500 Subject: [PATCH 082/212] find_dense_positions made faster --- tensornetwork/block_tensor/block_tensor.py | 404 +++++++++++++-------- 1 file changed, 255 insertions(+), 149 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 5284b9ffd..f48dacfbc 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -52,6 +52,31 @@ def _find_best_partition(charges, flows): return fused_left_charges, fused_right_charges, min_ind + 1 +def map_to_integer(dims: Union[List, np.ndarray], + table: np.ndarray, + dtype: Optional[Type[np.number]] = np.int64): + """ + Map a `table` of integers of shape (N, r) bijectively into + an np.ndarray `integers` of length N of unique numbers. + The mapping is done using + ``` + `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` + + Args: + dims: An iterable of integers. + table: An array of shape (N,r) of integers. + dtype: An optional dtype used for the conversion. + Care should be taken when choosing this to avoid overflow issues. + Returns: + np.ndarray: An array of integers. + """ + converter_table = np.expand_dims( + np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) + tmp = table * converter_table + integers = np.sum(tmp, axis=1) + return integers + + def compute_fused_charge_degeneracies(charges: List[np.ndarray], flows: List[Union[bool, int]]) -> Dict: """ @@ -178,18 +203,13 @@ def compute_nonzero_block_shapes(charges: List[np.ndarray], return charge_shape_dict -def find_diagonal_sparse_blocks_version_1( - data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks(data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ - Deprecated - - This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. - Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. `row_charges` and `column_charges` are lists of np.ndarray. The tensor @@ -234,41 +254,57 @@ def find_diagonal_sparse_blocks_version_1( "`len(flows)` is different from `len(row_charges) + len(column_charges)`" ) - #since we are using row-major we have to fuse the row charges anyway. - fused_row_charges = fuse_charges(row_charges, row_flows) - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) - #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly unique_column_charges, column_dims = compute_fused_charge_degeneracies( column_charges, column_flows) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) + #column charge column_degeneracies = dict(zip(unique_column_charges, column_dims)) - # we only care about charges common to row and columns - mask = np.isin(fused_row_charges, common_charges) - relevant_row_charges = fused_row_charges[mask] + if len(row_charges) > 1: + left_row_charges, right_row_charges, _ = _find_best_partition( + row_charges, row_flows) + unique_left = np.unique(left_row_charges) + unique_right = np.unique(right_row_charges) + unique_row_charges = np.unique( + fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) + + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + row_locations = find_sparse_positions( + left_charges=left_row_charges, + left_flow=1, + right_charges=right_row_charges, + right_flow=1, + target_charges=common_charges) + elif len(row_charges) == 1: + fused_row_charges = fuse_charges(row_charges, row_flows) + + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + relevant_fused_row_charges = fused_row_charges[np.isin( + fused_row_charges, common_charges)] + row_locations = {} + for c in common_charges: + row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] + else: + raise ValueError('Found an empty sequence for `row_charges`') #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + degeneracy_vector = np.empty( + np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. masks = {} for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] + degeneracy_vector[row_locations[c]] = column_degeneracies[-c] # the result of the cumulative sum is a vector containing # the stop positions of the non-zero values of each row @@ -289,28 +325,29 @@ def find_diagonal_sparse_blocks_version_1( for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[masks[c]], 1) + a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) + inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] + blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) + blocks[c] = np.reshape(data[inds], + (len(row_locations[c]), column_degeneracies[-c])) return blocks -def find_diagonal_sparse_blocks(data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_depreacated_1( + data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ + Deprecated + + This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. + Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. `row_charges` and `column_charges` are lists of np.ndarray. The tensor @@ -355,57 +392,41 @@ def find_diagonal_sparse_blocks(data: np.ndarray, "`len(flows)` is different from `len(row_charges) + len(column_charges)`" ) + #since we are using row-major we have to fuse the row charges anyway. + fused_row_charges = fuse_charges(row_charges, row_flows) + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly unique_column_charges, column_dims = compute_fused_charge_degeneracies( column_charges, column_flows) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + #convenience container for storing the degeneracies of each - #column charge + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) column_degeneracies = dict(zip(unique_column_charges, column_dims)) - if len(row_charges) > 1: - left_row_charges, right_row_charges, _ = _find_best_partition( - row_charges, row_flows) - unique_left = np.unique(left_row_charges) - unique_right = np.unique(right_row_charges) - unique_row_charges = np.unique( - fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) - - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - row_locations = find_sparse_positions( - left_charges=left_row_charges, - left_flow=1, - right_charges=right_row_charges, - right_flow=1, - target_charges=common_charges) - elif len(row_charges) == 1: - fused_row_charges = fuse_charges(row_charges, row_flows) - - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - relevant_fused_row_charges = fused_row_charges[np.isin( - fused_row_charges, common_charges)] - row_locations = {} - for c in common_charges: - row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] - else: - raise ValueError('Found an empty sequence for `row_charges`') + # we only care about charges common to row and columns + mask = np.isin(fused_row_charges, common_charges) + relevant_row_charges = fused_row_charges[mask] #some numpy magic to get the index locations of the blocks - degeneracy_vector = np.empty( - np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. masks = {} for c in common_charges: - degeneracy_vector[row_locations[c]] = column_degeneracies[-c] + mask = relevant_row_charges == c + masks[c] = mask + degeneracy_vector[mask] = column_degeneracies[-c] # the result of the cumulative sum is a vector containing # the stop positions of the non-zero values of each row @@ -426,18 +447,22 @@ def find_diagonal_sparse_blocks(data: np.ndarray, for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) + a = np.expand_dims(start_positions[masks[c]], 1) b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) - inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) if not return_data: - blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] else: - blocks[c] = np.reshape(data[inds], - (len(row_locations[c]), column_degeneracies[-c])) + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) return blocks -def find_diagonal_sparse_blocks_version_0( +def find_diagonal_sparse_blocks_deprecated_0( data: np.ndarray, charges: List[np.ndarray], flows: List[Union[bool, int]], @@ -650,9 +675,9 @@ def find_diagonal_sparse_blocks_column_major( return blocks -def find_dense_positions(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: +def find_dense_positions_deprecated(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector `fused_charges` (resulting from fusing np.ndarrays @@ -705,7 +730,66 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, linear_positions[(left_charge, right_charge)] = np.reshape( left_offsets + right_offsets, left_offsets.shape[0] * right_offsets.shape[1]) - return linear_positions + return np.sort(np.concatenate(list(linear_positions.values()))) + + +def find_dense_positions(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the all different blocks + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + together with their corresponding index-values of the data in the dense array. + `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` + to an array of integers. + For the above example, we get: + * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` + was obtained from fusing -2 and 2. + * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, + `fused_charges[5,13,17]` were obtained from fusing 0 and 0. + * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` + was obtained from fusing 1 and -1. + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping tuples of integers to np.ndarray of integers. + """ + _check_flows([left_flow, right_flow]) + unique_left, left_degeneracies = np.unique(left_charges, return_counts=True) + unique_right, right_degeneracies = np.unique( + right_charges, return_counts=True) + + common_charges = np.intersect1d( + unique_left, (target_charge - right_flow * unique_right) * left_flow, + assume_unique=True) + + right_locations = {} + for c in common_charges: + right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( + right_charges == (target_charge - left_flow * c) * right_flow)[0] + + len_right_charges = len(right_charges) + indices = [] + for n in range(len(left_charges)): + c = left_charges[n] + indices.append(n * len_right_charges + right_locations[ + (target_charge - left_flow * c) * right_flow]) + return np.concatenate(indices) def find_sparse_positions(left_charges: np.ndarray, left_flow: int, @@ -816,19 +900,19 @@ def compute_dense_to_sparse_mapping(charges: List[np.ndarray], flows: List[Union[bool, int]], target_charge: int) -> int: """ - Compute the mapping from multi-index positions to the linear positions + Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with + This function returns a list of np.ndarray `index_positions`, with `len(index_positions)=len(charges)` (equal to the rank of the tensor). When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. ` multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using + can for example be obtained using ``` index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) @@ -837,8 +921,8 @@ def compute_dense_to_sparse_mapping(charges: List[np.ndarray], np.testing.assert_allclose(total_charges, 0) ``` Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. The bond dimension `D[leg]` can vary on each leg. flows: A list of integers, one for each leg, @@ -847,8 +931,8 @@ def compute_dense_to_sparse_mapping(charges: List[np.ndarray], charge. target_charge: The total target charge of the blocks to be calculated. Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` the rank of the tensor. """ #find the best partition (the one where left and right dimensions are @@ -863,8 +947,10 @@ def compute_dense_to_sparse_mapping(charges: List[np.ndarray], index_locations = [] for n in reversed(range(len(charges))): + t1 = time.time() nz_indices, right_indices = unfuse(nz_indices, np.prod(dims[0:n]), dims[n]) index_locations.insert(0, right_indices) + print(time.time() - t1) return index_locations @@ -872,19 +958,19 @@ def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], flows: List[Union[bool, int]], target_charge: int) -> int: """ - Compute the mapping from multi-index positions to the linear positions + Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with + This function returns a list of np.ndarray `index_positions`, with `len(index_positions)=len(charges)` (equal to the rank of the tensor). When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. ` multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using + can for example be obtained using ``` index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) @@ -893,8 +979,8 @@ def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], np.testing.assert_allclose(total_charges, 0) ``` Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. The bond dimension `D[leg]` can vary on each leg. flows: A list of integers, one for each leg, @@ -903,8 +989,8 @@ def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], charge. target_charge: The total target charge of the blocks to be calculated. Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` the rank of the tensor. """ #find the best partition (the one where left and right dimensions are @@ -914,21 +1000,16 @@ def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], #note: left_charges and right_charges have been fused from RIGHT to LEFT left_charges, right_charges, partition = _find_best_partition(charges, flows) t1 = time.time() - blocks = find_dense_positions( + nz_indices = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=target_charge) - - #value elements of `blocks` are already sorted - first_elements = sorted([(k, v[0]) for k, v in blocks.items()], - key=lambda x: x[1]) - - nz_indices = np.concatenate([blocks[t[0]] for t in first_elements]) - + print(time.time() - t1) if len(nz_indices) == 0: raise ValueError( "`charges` do not add up to a total charge {}".format(target_charge)) t1 = time.time() nz_left_indices, nz_right_indices = unfuse(nz_indices, len(left_charges), len(right_charges)) + print(time.time() - t1) index_locations = [] #first unfuse left charges for n in range(partition): @@ -936,11 +1017,13 @@ def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], indices, nz_left_indices = unfuse(nz_left_indices, dims[n], np.prod(dims[n + 1:partition])) index_locations.append(indices) - + print(time.time() - t1) for n in range(partition, len(dims)): + t1 = time.time() indices, nz_right_indices = unfuse(nz_right_indices, dims[n], np.prod(dims[n + 1::])) index_locations.append(indices) + print(time.time() - t1) return index_locations @@ -949,19 +1032,19 @@ def compute_dense_to_sparse_mapping_3(charges: List[np.ndarray], flows: List[Union[bool, int]], target_charge: int) -> int: """ - Compute the mapping from multi-index positions to the linear positions + Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with + This function returns a list of np.ndarray `index_positions`, with `len(index_positions)=len(charges)` (equal to the rank of the tensor). When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. ` multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using + can for example be obtained using ``` index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) @@ -970,8 +1053,8 @@ def compute_dense_to_sparse_mapping_3(charges: List[np.ndarray], np.testing.assert_allclose(total_charges, 0) ``` Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. The bond dimension `D[leg]` can vary on each leg. flows: A list of integers, one for each leg, @@ -980,8 +1063,8 @@ def compute_dense_to_sparse_mapping_3(charges: List[np.ndarray], charge. target_charge: The total target charge of the blocks to be calculated. Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` the rank of the tensor. """ #find the best partition (the one where left and right dimensions are @@ -991,15 +1074,9 @@ def compute_dense_to_sparse_mapping_3(charges: List[np.ndarray], #note: left_charges and right_charges have been fused from RIGHT to LEFT left_charges, right_charges, partition = _find_best_partition(charges, flows) t1 = time.time() - blocks = find_dense_positions( + nz_indices = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=target_charge) - #value elements of `blocks` are already sorted - first_elements = sorted([(k, v[0]) for k, v in blocks.items()], - key=lambda x: x[1]) - - nz_indices = np.concatenate([blocks[t[0]] for t in first_elements]) - if len(nz_indices) == 0: raise ValueError( "`charges` do not add up to a total charge {}".format(target_charge)) @@ -1127,21 +1204,50 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose(self, order): + def transpose(self, + order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` + Args: + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. """ + if len(order) != self.rank: + raise ValueError( + "`len(order)={}` is different form `self.rank={}`".format( + len(order), self.rank)) + t1 = time.time() dims = [len(c) for c in self.charges] + + #find the best partition into left and right charges left_charges, right_charges, _ = _find_best_partition( self.charges, self.flows) - blocks = find_dense_positions( + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + t1 = time.time() + nz_indices = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - nz_indices = np.sort(np.concatenate(list(blocks.values()))) + print("time for finding dense positions", time.time() - t1) + #use numpy to unravel the linear indices into multi-indices + t1 = time.time() multi_indices = np.unravel_index(nz_indices, dims) + print("time for unravelling", time.time() - t1) + #transposed the multi-indices and ravel the result back into + #a linear index. This will be now an unsorted array of int valuesn + t1 = time.time() transposed_linear_positions = np.ravel_multi_index( [multi_indices[p] for p in order], dims=[dims[p] for p in order]) - self.data = self.data[np.argsort(transposed_linear_positions)] + print("time for ravelling", time.time() - t1) + #argsort returns an array of integers that sorts `transposed_linear_positions`. + #this array can then be used to transpose the linear data. + print(time.time() - t1) + t1 = time.time() + inds = np.argsort(transposed_linear_positions) + print("argsort:", time.time() - t1) + #self.data = self.data[inds] def reset_shape(self) -> None: """ @@ -1273,8 +1379,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_version_1(self, - return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1301,7 +1407,7 @@ def get_diagonal_blocks_version_1(self, row_indices = self.indices[0].get_elementary_indices() column_indices = self.indices[1].get_elementary_indices() - return find_diagonal_sparse_blocks_version_1( + return find_diagonal_sparse_blocks_deprecated_1( data=self.data, row_charges=[i.charges for i in row_indices], column_charges=[i.charges for i in column_indices], @@ -1309,8 +1415,8 @@ def get_diagonal_blocks_version_1(self, column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_version_0(self, - return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: """ Deprecated @@ -1333,7 +1439,7 @@ def get_diagonal_blocks_version_0(self, "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" .format(self.rank)) - return find_diagonal_sparse_blocks_version_0( + return find_diagonal_sparse_blocks_deprecated_0( data=self.data, charges=self.charges, flows=self.flows, From 7c4b84931e6e0e2b7ea6e2b6a7238c93daedf81f Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 26 Dec 2019 23:07:21 -0500 Subject: [PATCH 083/212] working on transpose --- tensornetwork/block_tensor/block_tensor.py | 55 +++++++----- .../block_tensor/block_tensor_test.py | 83 ++++++------------- tensornetwork/block_tensor/index.py | 4 +- 3 files changed, 61 insertions(+), 81 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index f48dacfbc..b87b8dd99 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -22,6 +22,7 @@ # pylint: disable=line-too-long from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges, unfuse import numpy as np +import scipy as sp import itertools import time from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable @@ -1213,41 +1214,51 @@ def transpose(self, Returns: BlockSparseTensor: The transposed tensor. """ + #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the + #lookup-table from dense to sparse indices. According to some quick + #testing, the final lookup is currently the bottleneck. + #FIXME: transpose currently shuffles data. This can in principle be postponed + #until `tensordot` or `find_diagonal_sparse_blocks` if len(order) != self.rank: raise ValueError( "`len(order)={}` is different form `self.rank={}`".format( len(order), self.rank)) - t1 = time.time() - dims = [len(c) for c in self.charges] + charges = self.charges #call only once in case some of the indices are merged indices + dims = [len(c) for c in charges] + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - self.charges, self.flows) + left_charges, right_charges, _ = _find_best_partition(charges, self.flows) #find the index-positions of the elements in the fusion #of `left_charges` and `right_charges` that have `0` #total charge (those are the only non-zero elements). t1 = time.time() - nz_indices = find_dense_positions( + linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - print("time for finding dense positions", time.time() - t1) - - #use numpy to unravel the linear indices into multi-indices - t1 = time.time() - multi_indices = np.unravel_index(nz_indices, dims) - print("time for unravelling", time.time() - t1) - #transposed the multi-indices and ravel the result back into - #a linear index. This will be now an unsorted array of int valuesn - t1 = time.time() - transposed_linear_positions = np.ravel_multi_index( - [multi_indices[p] for p in order], dims=[dims[p] for p in order]) - print("time for ravelling", time.time() - t1) - #argsort returns an array of integers that sorts `transposed_linear_positions`. - #this array can then be used to transpose the linear data. print(time.time() - t1) t1 = time.time() - inds = np.argsort(transposed_linear_positions) - print("argsort:", time.time() - t1) - #self.data = self.data[inds] + dense_to_sparse_table = sp.sparse.csr_matrix( + (np.arange(len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) + print('creating table', time.time() - t1) + tr_charges = [charges[n] for n in order] + tr_flows = [self.flows[n] for n in order] + tr_strides = [strides[n] for n in order] + tr_dims = [dims[n] for n in order] + tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_charges, tr_flows) + + tr_dense_linear_positions = fuse_charges( + [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + flows=[1] * len(tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + t1 = time.time() + inds = np.squeeze( + dense_to_sparse_table[tr_dense_linear_positions[tr_linear_positions], 0] + .toarray()) + print('inds', time.time() - t1) + self.data = self.data[inds] def reset_shape(self) -> None: """ diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index c6d6b4de4..9f11bec6e 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -1,7 +1,7 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, compute_dense_to_sparse_mapping, find_sparse_positions, find_dense_positions, map_to_integer +from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, find_sparse_positions, find_dense_positions from index import Index, fuse_charges np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] @@ -32,39 +32,15 @@ def test_block_sparse_init(dtype): assert len(A.data) == num_elements -def test_dense_to_sparse_table(): - D = 30 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - num_non_zero = compute_num_nonzero(charges, flows) - - inds = compute_dense_to_sparse_mapping( - charges=charges, flows=flows, target_charge=0) - total = np.zeros(len(inds[0]), dtype=np.int16) - for n in range(len(charges)): - total += flows[n] * charges[n][inds[n]] - - np.testing.assert_allclose(total, 0) - assert len(total) == num_non_zero - - def test_find_dense_positions(): - left_charges = [-2, 0, 1, 0, 0] - right_charges = [-1, 0, 2, 1] + left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) + right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) target_charge = 0 fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) - blocks = find_dense_positions(left_charges, 1, right_charges, 1, - target_charge) - np.testing.assert_allclose(blocks[(-2, 2)], [2]) - np.testing.assert_allclose(blocks[(0, 0)], [5, 13, 17]) - np.testing.assert_allclose(blocks[(1, -1)], [8]) + dense_positions = find_dense_positions(left_charges, 1, right_charges, 1, + target_charge) + np.testing.assert_allclose(dense_positions, + np.nonzero(fused_charges == target_charge)[0]) def test_find_dense_positions_2(): @@ -92,16 +68,8 @@ def test_find_dense_positions_2(): i01 = indices[0] * indices[1] i23 = indices[2] * indices[3] - blocks = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) - assert sum([len(v) for v in blocks.values()]) == n1 - - tensor = BlockSparseTensor.random(indices=indices, dtype=np.float64) - tensor.reshape((D * D, D * D)) - blocks_2 = tensor.get_diagonal_blocks(return_data=False) - np.testing.assert_allclose([k[0] for k in blocks.keys()], - list(blocks_2.keys())) - for c in blocks.keys(): - assert np.prod(blocks_2[c[0]][1]) == len(blocks[c]) + positions = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) + assert len(positions) == n1 def test_find_sparse_positions(): @@ -168,22 +136,7 @@ def test_find_sparse_positions_2(): np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) -def test_map_to_integer(): - dims = [4, 3, 2] - dim_prod = [6, 2, 1] - N = 10 - table = np.stack([np.random.randint(0, d, N) for d in dims], axis=1) - integers = map_to_integer(dims, table) - ints = [] - for n in range(N): - i = 0 - for d in range(len(dims)): - i += dim_prod[d] * table[n, d] - ints.append(i) - np.testing.assert_allclose(ints, integers) - - -def test_ge_diagonal_blocks(): +def test_get_diagonal_blocks(): D = 40 #bond dimension B = 4 #number of blocks dtype = np.int16 #the dtype of the quantum numbers @@ -205,3 +158,19 @@ def test_ge_diagonal_blocks(): right_charges=indices[1].charges, right_flow=1, target_charges=common_charges) + + +def test_dense_transpose(): + Ds = [10, 11, 12] #bond dimension + rank = len(Ds) + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [np.zeros(Ds[n], dtype=np.int16) for n in range(rank)] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + A = BlockSparseTensor.random(indices=indices, dtype=np.float64) + B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) + A.transpose((1, 0, 2)) + np.testing.assert_allclose(A.data, B.flat) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index b4bba3cee..d50143989 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -281,6 +281,6 @@ def unfuse(fused_indices: np.ndarray, len_left: int, Returns: (np.ndarry, np.ndarray) """ - right = fused_indices % len_right - left = (fused_indices - right) // len_right + right = np.mod(fused_indices, len_right) + left = np.floor_divide(fused_indices - right, len_right) return left, right From 4d80da2ce3138370d242a56546deaa1265b422fe Mon Sep 17 00:00:00 2001 From: mganahl Date: Sun, 29 Dec 2019 21:19:45 -0500 Subject: [PATCH 084/212] transpose modified --- tensornetwork/block_tensor/block_tensor.py | 228 ++++++++++++++++++--- 1 file changed, 201 insertions(+), 27 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index b87b8dd99..f87c29947 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -16,6 +16,7 @@ from __future__ import division from __future__ import print_function import numpy as np +from tensornetwork.block_tensor.lookup import lookup # pylint: disable=line-too-long from tensornetwork.network_components import Node, contract, contract_between from tensornetwork.backends import backend_factory @@ -897,9 +898,9 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, return out -def compute_dense_to_sparse_mapping(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: +def compute_dense_to_sparse_mapping_deprecated(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: """ Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. @@ -1029,9 +1030,9 @@ def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], return index_locations -def compute_dense_to_sparse_mapping_3(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: +def compute_dense_to_sparse_mapping(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: """ Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. @@ -1064,24 +1065,21 @@ def compute_dense_to_sparse_mapping_3(charges: List[np.ndarray], charge. target_charge: The total target charge of the blocks to be calculated. Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` - the rank of the tensor. + list of np.ndarray: A list of length `r`, with `r` the rank of the tensor. + Each element in the list is an N-dimensional np.ndarray of int, + with `N` the number of non-zero elements. """ #find the best partition (the one where left and right dimensions are #closest dims = np.asarray([len(c) for c in charges]) - #note: left_charges and right_charges have been fused from RIGHT to LEFT left_charges, right_charges, partition = _find_best_partition(charges, flows) - t1 = time.time() nz_indices = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=target_charge) if len(nz_indices) == 0: raise ValueError( "`charges` do not add up to a total charge {}".format(target_charge)) - t1 = time.time() return np.unravel_index(nz_indices, dims) @@ -1206,11 +1204,99 @@ def charges(self): return [i.charges for i in self.indices] def transpose(self, - order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + order: Union[List[int], np.ndarray], + transposed_linear_positions: Optional[np.ndarray] = None + ) -> "BlockSparseTensor": """ - Transpose the tensor into the new order `order` + Transpose the tensor into the new order `order`. This routine currently shuffles + data. Args: order: The new order of indices. + transposed_linear_positions: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + can greatly speed up the transposition. + Returns: + BlockSparseTensor: The transposed tensor. + """ + #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the + #lookup-table from dense to sparse indices. According to some quick + #testing, the final lookup is currently the bottleneck. + #FIXME: transpose currently shuffles data. This can in principle be postponed + #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of + #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse + #positions + if len(order) != self.rank: + raise ValueError( + "`len(order)={}` is different form `self.rank={}`".format( + len(order), self.rank)) + #transpose is the only function using self.dense_to_sparse_table + #so we can initialize it here. This will change if we are implementing + #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` + #also needs + + #we use elementary indices here because it is + #more efficient to get the fused charges using + #the best partition + if transposed_linear_positions is None: + elementary_indices = {} + flat_elementary_indices = [] + + for n in range(self.rank): + elementary_indices[n] = self.indices[n].get_elementary_indices() + flat_elementary_indices.extend(elementary_indices[n]) + flat_index_list = np.arange(len(flat_elementary_indices)) + cum_num_legs = np.append( + 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) + flat_order = np.concatenate( + [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + + flat_charges = [i.charges for i in flat_elementary_indices] + flat_flows = [i.flow for i in flat_elementary_indices] + flat_dims = [len(c) for c in flat_charges] + flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + if not hasattr(self, 'dense_to_sparse_table'): + #find the best partition into left and right charges + leftx_charges, right_charges, _ = _find_best_partition( + flat_charges, flat_flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) + + flat_tr_charges = [flat_charges[n] for n in flat_order] + flat_tr_flows = [flat_flows[n] for n in flat_order] + flat_tr_strides = [flat_strides[n] for n in flat_order] + flat_tr_dims = [flat_dims[n] for n in flat_order] + + tr_left_charges, tr_right_charges, _ = _find_best_partition( + flat_tr_charges, flat_tr_flows) + #FIXME: this should be done without fully fusing the strides + tr_dense_linear_positions = fuse_charges([ + np.arange(flat_tr_dims[n]) * flat_tr_strides[n] + for n in range(len(flat_tr_dims)) + ], + flows=[1] * len(flat_tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + + inds = np.squeeze(self.dense_to_sparse_table[ + tr_dense_linear_positions[tr_linear_positions], 0].toarray()) + else: + inds = transposed_linear_positions + self.data = self.data[inds] + return inds + + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order` + Args: pp + order: The new order of indices. Returns: BlockSparseTensor: The transposed tensor. """ @@ -1219,6 +1305,52 @@ def transpose(self, #testing, the final lookup is currently the bottleneck. #FIXME: transpose currently shuffles data. This can in principle be postponed #until `tensordot` or `find_diagonal_sparse_blocks` + if len(order) != self.rank: + raise ValueError(len(order), self.rank) + charges = self.charges #call only once in case some of the indices are merged indices + dims = [len(c) for c in charges] + + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + tr_charges = [charges[n] for n in order] + tr_flows = [self.flows[n] for n in order] + tr_strides = [strides[n] for n in order] + tr_dims = [dims[n] for n in order] + tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_charges, tr_flows) + + tr_dense_linear_positions = fuse_charges( + [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + flows=[1] * len(tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + new_linear_positions = tr_dense_linear_positions[tr_linear_positions] + _, _, inds = np.intersect1d( + linear_positions, + new_linear_positions, + return_indices=True, + assume_unique=True) + self.data = self.data[inds] + + def transpose_lookup( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + """ + Deprecated + + Transpose the tensor into the new order `order`. Uses a simple cython std::map + for the lookup + Args: + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ if len(order) != self.rank: raise ValueError( "`len(order)={}` is different form `self.rank={}`".format( @@ -1232,43 +1364,85 @@ def transpose(self, #find the index-positions of the elements in the fusion #of `left_charges` and `right_charges` that have `0` #total charge (those are the only non-zero elements). - t1 = time.time() linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - print(time.time() - t1) - t1 = time.time() - dense_to_sparse_table = sp.sparse.csr_matrix( - (np.arange(len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) - print('creating table', time.time() - t1) + tr_charges = [charges[n] for n in order] tr_flows = [self.flows[n] for n in order] tr_strides = [strides[n] for n in order] tr_dims = [dims[n] for n in order] tr_left_charges, tr_right_charges, _ = _find_best_partition( tr_charges, tr_flows) + #FIXME: this should be done without fully fusing the strides + tr_dense_linear_positions = fuse_charges( + [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + flows=[1] * len(tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + inds = lookup(linear_positions, + tr_dense_linear_positions[tr_linear_positions]) + self.data = self.data[inds] + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + """ + Deprecated: + + Transpose the tensor into the new order `order`. Uses `np.searchsorted` + for the lookup. + Args: + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ + if len(order) != self.rank: + raise ValueError( + "`len(order)={}` is different form `self.rank={}`".format( + len(order), self.rank)) + charges = self.charges #call only once in case some of the indices are merged indices + dims = [len(c) for c in charges] + + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + tr_charges = [charges[n] for n in order] + tr_flows = [self.flows[n] for n in order] + tr_strides = [strides[n] for n in order] + tr_dims = [dims[n] for n in order] + tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_charges, tr_flows) + #FIXME: this should be done without fully fusing the strides tr_dense_linear_positions = fuse_charges( [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], flows=[1] * len(tr_dims)) tr_linear_positions = find_dense_positions(tr_left_charges, 1, tr_right_charges, 1, 0) - t1 = time.time() - inds = np.squeeze( - dense_to_sparse_table[tr_dense_linear_positions[tr_linear_positions], 0] - .toarray()) - print('inds', time.time() - t1) + + inds = np.searchsorted(linear_positions, + tr_dense_linear_positions[tr_linear_positions]) self.data = self.data[inds] def reset_shape(self) -> None: """ Bring the tensor back into its elementary shape. """ + self.indices = self.get_elementary_indices() + + def get_elementary_indices(self) -> List: + """ + Compute the elementary indices of the array. + """ elementary_indices = [] for i in self.indices: elementary_indices.extend(i.get_elementary_indices()) - self.indices = elementary_indices + return elementary_indices def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: """ From 1a2238f1aa4cec9a4316afb09ef2ed2573908ec1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sun, 29 Dec 2019 21:20:01 -0500 Subject: [PATCH 085/212] Index.name -> property --- tensornetwork/block_tensor/index.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index d50143989..5cb48ae3c 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -40,7 +40,7 @@ def __init__(self, self.flow = flow self.left_child = left_child self.right_child = right_child - self.name = name if name else 'index' + self._name = name def __repr__(self): return str(self.dimension) @@ -122,6 +122,14 @@ def charges(self): return fused_charges + @property + def name(self): + if self._name: + return self._name + if self.is_leave: + return self.name + return self.left_child.name + ' & ' + self.right_child.name + def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: From 9b417b7a14af63cf22c2cb613a01ac7b75fe51ad Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 31 Dec 2019 22:49:36 -0500 Subject: [PATCH 086/212] added charge types --- tensornetwork/block_tensor/index.py | 110 ++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 5cb48ae3c..63ef58f32 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -292,3 +292,113 @@ def unfuse(fused_indices: np.ndarray, len_left: int, right = np.mod(fused_indices, len_right) left = np.floor_divide(fused_indices - right, len_right) return left, right + + +class BaseCharge: + """ + Base class for fundamental charges (i.e. for symmetries that + are not products of smaller groups) + """ + + def __init__(self, charges: np.ndarray) -> None: + if not isinstance(charges, np.ndarray): + raise TypeError("only np.ndarray allowed for argument `charges` " + "in BaseCharge.__init__(charges)") + + self.charges = charges + + def __add__(self, other: "BaseCharge"): + raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") + + def __mul__(self, number: int) -> "U1Charge": + raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") + + def __rmul__(self, number: int) -> "U1Charge": + raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") + + def __matmul__(self, other: "U1Charge") -> "Charge": + raise NotImplementedError( + "`__matmul__` is not implemented for `BaseCharge`") + + def __repr__(self): + return self.charges.__repr__() + + +class U1Charge(BaseCharge): + """ + A simple charge class for a single U1 symmetry. + """ + + def __init__(self, charges: np.ndarray) -> None: + super().__init__(charges) + + def __add__(self, other: "U1Charge") -> "U1Charge": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `U1Charge` object holding the result. + Args: + other: A `U1Charge` object. + Returns: + U1Charge: The result of fusing `self` with `other`. + """ + fused = np.reshape( + np.asarray(self.charges)[:, None] + np.asarray(other.charges)[None, :], + len(self.charges) * len(other.charges)) + + return U1Charge(charges=fused) + + def __mul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __rmul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __matmul__(self, other: "U1Charge") -> "Charge": + return Charge([self.charges, other.charges]) + + +class Charge: + + def __init__(self, charges: List[BaseCharge]) -> None: + if not isinstance(charges, list): + raise TypeError("only list allowed for argument `charges` " + "in BaseCharge.__init__(charges)") + if not np.all([len(c) == len(charges[0]) for c in charges]): + raise ValueError("not all charges have the same length. " + "Got lengths = {}".format([len(c) for c in charges])) + self.charges = charges + + def __add__(self, other: "Charge") -> "Charge": + """ + Fuse `self` with `other`. + Args: + other: A `Charge` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + return Charge([c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) + + def __matmul__(self, other: Union["Charge", BaseCharge]) -> "Charge": + """ + Product of `self` with `other` (group product). + Args: + other: A `BaseCharge` or `Charge` object. + Returns: + Charge: The resulting charge of the product of `self` with `other`. + + """ + if isinstance(other, BaseCharge): + return Charge(self.charges + [other.charges]) + elif isinstance(other, Charge): + return Charge(self.charges + other.charges) + + raise TypeError("datatype not understood") + + def __mul__(self, number: int) -> "Charge": + return Charge(charges=[c * number for c in self.charges]) + + def __rmul__(self, number: int) -> "Charge": + return Charge(charges=[c * number for c in self.charges]) + + def __repr__(self): + return self.charges.__repr__() From b267322bc3d13b86a2305a3ba1427ec46b4b0669 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 1 Jan 2020 15:34:55 -0500 Subject: [PATCH 087/212] adding tests --- tensornetwork/block_tensor/index_test.py | 394 +++++++++++++---------- 1 file changed, 227 insertions(+), 167 deletions(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 0feb2eb15..05b80a8a2 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,6 +1,21 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse, U1Charge, Charge, BaseCharge + + +def test_U1Charge_mul(): + q = U1Charge(np.asarray([0, 1])) + q2 = 2 * q + q3 = q * 2 + assert np.all(q2.charges == np.asarray([0, 2])) + assert np.all(q3.charges == np.asarray([0, 2])) + + +def test_U1Charge_add(): + q1 = U1Charge(np.asarray([0, 1])) + q2 = U1Charge(np.asarray([2, 3, 4])) + fused_charges = q1 + q2 + assert np.all(fused_charges.charges == np.asarray([2, 3, 4, 3, 4, 5])) def test_fuse_charge_pair(): @@ -12,6 +27,58 @@ def test_fuse_charge_pair(): assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) +def test_Charge_mul(): + q = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) + expected = [np.asarray([0, 2]), np.asarray([-4, 6])] + q2 = 2 * q + q3 = q * 2 + for n in range(len(q.charges)): + np.testing.assert_allclose(expected[n], q2.charges[n].charges) + np.testing.assert_allclose(expected[n], q3.charges[n].charges) + + +def test_Charge_add(): + q1 = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) + q2 = Charge([U1Charge(np.asarray([2, 3])), U1Charge(np.asarray([-1, 4]))]) + expected = [np.asarray([2, 3, 3, 4]), np.asarray([-3, 2, 2, 7])] + q12 = q1 + q2 + for n in range(len(q12.charges)): + np.testing.assert_allclose(expected[n], q12.charges[n].charges) + + +def test_Charge_product(): + expected = [np.asarray([0, 1]), np.asarray([2, 3])] + q1 = U1Charge(expected[0]) + q2 = U1Charge(expected[1]) + prod = q1 @ q2 + for n in range(len(prod.charges)): + np.testing.assert_allclose(prod.charges[n].charges, expected[n]) + + B = 4 + dtype = np.int16 + D = 10 + Q1 = Charge(charges=[ + U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + for _ in range(2) + ]) + Q2 = Charge(charges=[ + U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + for _ in range(2) + ]) + prod = Q1 @ Q2 + expected = Q1.charges + Q2.charges + for n in range(len(prod.charges)): + np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) + assert isinstance(prod.charges[n], BaseCharge) + + q1 = U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + prod = q1 @ Q2 + expected = [q1] + Q2.charges + for n in range(len(prod.charges)): + np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) + assert isinstance(prod.charges[n], BaseCharge) + + def test_fuse_charges(): q1 = np.asarray([0, 1]) q2 = np.asarray([2, 3, 4]) @@ -28,169 +95,162 @@ def test_fuse_degeneracies(): np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2)) -def test_index_fusion_mul(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = i1 * i2 - assert i12.left_child is i1 - assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - - -def test_fuse_index_pair(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = fuse_index_pair(i1, i2) - assert i12.left_child is i1 - assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - - -def test_fuse_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = fuse_indices([i1, i2]) - assert i12.left_child is i1 - assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - - -def test_split_index(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = i1 * i2 - i1_, i2_ = split_index(i12) - assert i1 is i1_ - assert i2 is i2_ - np.testing.assert_allclose(q1, i1.charges) - np.testing.assert_allclose(q2, i2.charges) - np.testing.assert_allclose(q1, i1_.charges) - np.testing.assert_allclose(q2, i2_.charges) - assert i1_.name == 'index1' - assert i2_.name == 'index2' - assert i1_.flow == i1.flow - assert i2_.flow == i2.flow - - -def test_elementary_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q3, flow=1, name='index3') - i4 = Index(charges=q4, flow=1, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - elmt12 = i12.get_elementary_indices() - assert elmt12[0] is i1 - assert elmt12[1] is i2 - - i1234 = i12 * i34 - elmt1234 = i1234.get_elementary_indices() - assert elmt1234[0] is i1 - assert elmt1234[1] is i2 - assert elmt1234[2] is i3 - assert elmt1234[3] is i4 - assert elmt1234[0].name == 'index1' - assert elmt1234[1].name == 'index2' - assert elmt1234[2].name == 'index3' - assert elmt1234[3].name == 'index4' - assert elmt1234[0].flow == i1.flow - assert elmt1234[1].flow == i2.flow - assert elmt1234[2].flow == i3.flow - assert elmt1234[3].flow == i4.flow - - np.testing.assert_allclose(q1, i1.charges) - np.testing.assert_allclose(q2, i2.charges) - np.testing.assert_allclose(q3, i3.charges) - np.testing.assert_allclose(q4, i4.charges) - - -def test_leave(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - assert i1.is_leave - assert i2.is_leave - - i12 = i1 * i2 - assert not i12.is_leave - - -def test_copy(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q1, flow=-1, name='index3') - i4 = Index(charges=q2, flow=-1, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - i1234 = i12 * i34 - i1234_copy = i1234.copy() - - elmt1234 = i1234_copy.get_elementary_indices() - assert elmt1234[0] is not i1 - assert elmt1234[1] is not i2 - assert elmt1234[2] is not i3 - assert elmt1234[3] is not i4 - - -def test_unfuse(): - q1 = np.random.randint(-4, 5, 10).astype(np.int16) - q2 = np.random.randint(-4, 5, 4).astype(np.int16) - q3 = np.random.randint(-4, 5, 4).astype(np.int16) - q12 = fuse_charges([q1, q2], [1, 1]) - q123 = fuse_charges([q12, q3], [1, 1]) - nz = np.nonzero(q123 == 0)[0] - q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) - - q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) - np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], - np.zeros(len(q1_inds), dtype=np.int16)) +# def test_index_fusion_mul(): +# D = 10 +# B = 4 +# dtype = np.int16 +# q1 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 1 +# q2 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 2 +# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 +# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + +# i12 = i1 * i2 +# assert i12.left_child is i1 +# assert i12.right_child is i2 +# assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + +# def test_fuse_index_pair(): +# D = 10 +# B = 4 +# dtype = np.int16 +# q1 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 1 +# q2 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 2 +# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 +# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + +# i12 = fuse_index_pair(i1, i2) +# assert i12.left_child is i1 +# assert i12.right_child is i2 +# assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + +# def test_fuse_indices(): +# D = 10 +# B = 4 +# dtype = np.int16 +# q1 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 1 +# q2 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 2 +# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 +# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + +# i12 = fuse_indices([i1, i2]) +# assert i12.left_child is i1 +# assert i12.right_child is i2 +# assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + +# def test_split_index(): +# D = 10 +# B = 4 +# dtype = np.int16 +# q1 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 1 +# q2 = np.random.randint(-B // 2, B // 2 + 1, +# D).astype(dtype) #quantum numbers on leg 2 +# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 +# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + +# i12 = i1 * i2 +# i1_, i2_ = split_index(i12) +# assert i1 is i1_ +# assert i2 is i2_ +# np.testing.assert_allclose(q1, i1.charges) +# np.testing.assert_allclose(q2, i2.charges) +# np.testing.assert_allclose(q1, i1_.charges) +# np.testing.assert_allclose(q2, i2_.charges) +# assert i1_.name == 'index1' +# assert i2_.name == 'index2' +# assert i1_.flow == i1.flow +# assert i2_.flow == i2.flow + +# def test_elementary_indices(): +# D = 10 +# B = 4 +# dtype = np.int16 +# q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# i1 = Index(charges=q1, flow=1, name='index1') +# i2 = Index(charges=q2, flow=1, name='index2') +# i3 = Index(charges=q3, flow=1, name='index3') +# i4 = Index(charges=q4, flow=1, name='index4') + +# i12 = i1 * i2 +# i34 = i3 * i4 +# elmt12 = i12.get_elementary_indices() +# assert elmt12[0] is i1 +# assert elmt12[1] is i2 + +# i1234 = i12 * i34 +# elmt1234 = i1234.get_elementary_indices() +# assert elmt1234[0] is i1 +# assert elmt1234[1] is i2 +# assert elmt1234[2] is i3 +# assert elmt1234[3] is i4 +# assert elmt1234[0].name == 'index1' +# assert elmt1234[1].name == 'index2' +# assert elmt1234[2].name == 'index3' +# assert elmt1234[3].name == 'index4' +# assert elmt1234[0].flow == i1.flow +# assert elmt1234[1].flow == i2.flow +# assert elmt1234[2].flow == i3.flow +# assert elmt1234[3].flow == i4.flow + +# np.testing.assert_allclose(q1, i1.charges) +# np.testing.assert_allclose(q2, i2.charges) +# np.testing.assert_allclose(q3, i3.charges) +# np.testing.assert_allclose(q4, i4.charges) + +# def test_leave(): +# D = 10 +# B = 4 +# dtype = np.int16 +# q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# i1 = Index(charges=q1, flow=1, name='index1') +# i2 = Index(charges=q2, flow=1, name='index2') +# assert i1.is_leave +# assert i2.is_leave + +# i12 = i1 * i2 +# assert not i12.is_leave + +# def test_copy(): +# D = 10 +# B = 4 +# dtype = np.int16 +# q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# i1 = Index(charges=q1, flow=1, name='index1') +# i2 = Index(charges=q2, flow=1, name='index2') +# i3 = Index(charges=q1, flow=-1, name='index3') +# i4 = Index(charges=q2, flow=-1, name='index4') + +# i12 = i1 * i2 +# i34 = i3 * i4 +# i1234 = i12 * i34 +# i1234_copy = i1234.copy() + +# elmt1234 = i1234_copy.get_elementary_indices() +# assert elmt1234[0] is not i1 +# assert elmt1234[1] is not i2 +# assert elmt1234[2] is not i3 +# assert elmt1234[3] is not i4 + +# def test_unfuse(): +# q1 = np.random.randint(-4, 5, 10).astype(np.int16) +# q2 = np.random.randint(-4, 5, 4).astype(np.int16) +# q3 = np.random.randint(-4, 5, 4).astype(np.int16) +# q12 = fuse_charges([q1, q2], [1, 1]) +# q123 = fuse_charges([q12, q3], [1, 1]) +# nz = np.nonzero(q123 == 0)[0] +# q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) + +# q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) +# np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], +# np.zeros(len(q1_inds), dtype=np.int16)) From c1e75a1ed29f2bebf730e54a2c1d8baf307352e1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 1 Jan 2020 15:35:03 -0500 Subject: [PATCH 088/212] fixing bugs --- tensornetwork/block_tensor/index.py | 363 +++++++++++++++------------- 1 file changed, 199 insertions(+), 164 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 63ef58f32..7d8ababdb 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -23,114 +23,6 @@ from typing import List, Union, Any, Optional, Tuple, Text -class Index: - """ - An index class to store indices of a symmetric tensor. - An index keeps track of all its childs by storing references - to them (i.e. it is a binary tree). - """ - - def __init__(self, - charges: Union[List, np.ndarray], - flow: int, - name: Optional[Text] = None, - left_child: Optional["Index"] = None, - right_child: Optional["Index"] = None): - self._charges = np.asarray(charges) - self.flow = flow - self.left_child = left_child - self.right_child = right_child - self._name = name - - def __repr__(self): - return str(self.dimension) - - @property - def is_leave(self): - return (self.left_child is None) and (self.right_child is None) - - @property - def dimension(self): - return np.prod([len(i.charges) for i in self.get_elementary_indices()]) - - def _copy_helper(self, index: "Index", copied_index: "Index") -> None: - """ - Helper function for copy - """ - if index.left_child != None: - left_copy = Index( - charges=copy.copy(index.left_child.charges), - flow=copy.copy(index.left_child.flow), - name=copy.copy(index.left_child.name)) - - copied_index.left_child = left_copy - self._copy_helper(index.left_child, left_copy) - if index.right_child != None: - right_copy = Index( - charges=copy.copy(index.right_child.charges), - flow=copy.copy(index.right_child.flow), - name=copy.copy(index.right_child.name)) - copied_index.right_child = right_copy - self._copy_helper(index.right_child, right_copy) - - def copy(self): - """ - Returns: - Index: A deep copy of `Index`. Note that all children of - `Index` are copied as well. - """ - index_copy = Index( - charges=self._charges.copy(), flow=copy.copy(self.flow), name=self.name) - - self._copy_helper(self, index_copy) - return index_copy - - def _leave_helper(self, index: "Index", leave_list: List) -> None: - if index.left_child: - self._leave_helper(index.left_child, leave_list) - if index.right_child: - self._leave_helper(index.right_child, leave_list) - if (index.left_child is None) and (index.right_child is None): - leave_list.append(index) - - def get_elementary_indices(self) -> List: - """ - Returns: - List: A list containing the elementary indices (the leaves) - of `Index`. - """ - leave_list = [] - self._leave_helper(self, leave_list) - return leave_list - - def __mul__(self, index: "Index") -> "Index": - """ - Merge `index` and self into a single larger index. - The flow of the resulting index is set to 1. - Flows of `self` and `index` are multiplied into - the charges upon fusing.n - """ - return fuse_index_pair(self, index) - - @property - def charges(self): - if self.is_leave: - return self._charges - fused_charges = fuse_charge_pair( - self.left_child.charges, self.left_child.flow, self.right_child.charges, - self.right_child.flow) - - return fused_charges - - @property - def name(self): - if self._name: - return self._name - if self.is_leave: - return self.name - return self.left_child.name + ' & ' + self.right_child.name - - def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: """ @@ -198,57 +90,6 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray], len(degen1) * len(degen2)) -def fuse_index_pair(left_index: Index, - right_index: Index, - flow: Optional[int] = 1) -> Index: - """ - Fuse two consecutive indices (legs) of a symmetric tensor. - Args: - left_index: A tensor Index. - right_index: A tensor Index. - flow: An optional flow of the resulting `Index` object. - Returns: - Index: The result of fusing `index1` and `index2`. - """ - #Fuse the charges of the two indices - if left_index is right_index: - raise ValueError( - "index1 and index2 are the same object. Can only fuse distinct objects") - - return Index( - charges=None, flow=flow, left_child=left_index, right_child=right_index) - - -def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: - """ - Fuse a list of indices (legs) of a symmetric tensor. - Args: - indices: A list of tensor Index objects - flow: An optional flow of the resulting `Index` object. - Returns: - Index: The result of fusing `indices`. - """ - - index = indices[0] - for n in range(1, len(indices)): - index = fuse_index_pair(index, indices[n], flow=flow) - return index - - -def split_index(index: Index) -> Tuple[Index, Index]: - """ - Split an index (leg) of a symmetric tensor into two legs. - Args: - index: A tensor Index. - Returns: - Tuple[Index, Index]: The result of splitting `index`. - """ - if index.is_leave: - raise ValueError("cannot split an elementary index") - - return index.left_child, index.right_child - - def unfuse(fused_indices: np.ndarray, len_left: int, len_right: int) -> Tuple[np.ndarray, np.ndarray]: """ @@ -294,6 +135,16 @@ def unfuse(fused_indices: np.ndarray, len_left: int, return left, right +def _copy_charges(charges): + cs = [] + for n in range(len(charges)): + c = type(charges[n]).__new__(type( + charges[n])) #create a new charge object of type type(other) + c.__init__(charges[n].charges.copy()) + cs.append(c) + return cs + + class BaseCharge: """ Base class for fundamental charges (i.e. for symmetries that @@ -320,6 +171,9 @@ def __matmul__(self, other: "U1Charge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") + def __len__(self): + return len(self.charges) + def __repr__(self): return self.charges.__repr__() @@ -353,19 +207,34 @@ def __mul__(self, number: int) -> "U1Charge": def __rmul__(self, number: int) -> "U1Charge": return U1Charge(charges=self.charges * number) - def __matmul__(self, other: "U1Charge") -> "Charge": - return Charge([self.charges, other.charges]) + def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": + c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) + if isinstance(other, U1Charge): + c2 = type(other).__new__( + type(other)) #create a new charge object of type type(other) + c2.__init__(other.charges.copy()) + return Charge([c1, c2]) + else: + #`other` should be of type `Charge`. + + return Charge([c1] + _copy_charges(other.charges)) class Charge: - def __init__(self, charges: List[BaseCharge]) -> None: + def __init__(self, charges: List[Union[np.ndarray, BaseCharge]]) -> None: if not isinstance(charges, list): raise TypeError("only list allowed for argument `charges` " "in BaseCharge.__init__(charges)") if not np.all([len(c) == len(charges[0]) for c in charges]): raise ValueError("not all charges have the same length. " "Got lengths = {}".format([len(c) for c in charges])) + for n in range(len(charges)): + if not isinstance(charges[n], BaseCharge): + raise TypeError( + "`Charge` can only be initialized with a list of `BaseCharge`. Found {} instead" + .format(type(charges[n]))) + self.charges = charges def __add__(self, other: "Charge") -> "Charge": @@ -388,9 +257,12 @@ def __matmul__(self, other: Union["Charge", BaseCharge]) -> "Charge": """ if isinstance(other, BaseCharge): - return Charge(self.charges + [other.charges]) + c = type(other).__new__( + type(other)) #create a new charge object of type type(other) + c.__init__(other.charges.copy()) + return Charge(self.charges + [c]) elif isinstance(other, Charge): - return Charge(self.charges + other.charges) + return Charge(_copy_charges(self.charges) + _copy_charges(other.charges)) raise TypeError("datatype not understood") @@ -400,5 +272,168 @@ def __mul__(self, number: int) -> "Charge": def __rmul__(self, number: int) -> "Charge": return Charge(charges=[c * number for c in self.charges]) + def __len__(self): + return len(self.charges[0]) + def __repr__(self): return self.charges.__repr__() + + +class Index: + """ + An index class to store indices of a symmetric tensor. + An index keeps track of all its childs by storing references + to them (i.e. it is a binary tree). + """ + + def __init__(self, + charges: Union[Charge, BaseCharge], + flow: int, + name: Optional[Text] = None, + left_child: Optional["Index"] = None, + right_child: Optional["Index"] = None): + if isinstance(charges, BaseCharge): + self._charges = Charge([charges]) + elif isinstance(charges, Charge): + self._charges = charges + self.flow = flow + self.left_child = left_child + self.right_child = right_child + self._name = name + + def __repr__(self): + return str(self.dimension) + + @property + def is_leave(self): + return (self.left_child is None) and (self.right_child is None) + + @property + def dimension(self): + return np.prod([len(i.charges) for i in self.get_elementary_indices()]) + + def _copy_helper(self, index: "Index", copied_index: "Index") -> None: + """ + Helper function for copy + """ + if index.left_child != None: + left_copy = Index( + charges=copy.copy(index.left_child.charges), + flow=copy.copy(index.left_child.flow), + name=copy.copy(index.left_child.name)) + + copied_index.left_child = left_copy + self._copy_helper(index.left_child, left_copy) + if index.right_child != None: + right_copy = Index( + charges=copy.copy(index.right_child.charges), + flow=copy.copy(index.right_child.flow), + name=copy.copy(index.right_child.name)) + copied_index.right_child = right_copy + self._copy_helper(index.right_child, right_copy) + + def copy(self): + """ + Returns: + Index: A deep copy of `Index`. Note that all children of + `Index` are copied as well. + """ + index_copy = Index( + charges=copy.copy(self._charges), + flow=copy.copy(self.flow), + name=self.name) + + self._copy_helper(self, index_copy) + return index_copy + + def _leave_helper(self, index: "Index", leave_list: List) -> None: + if index.left_child: + self._leave_helper(index.left_child, leave_list) + if index.right_child: + self._leave_helper(index.right_child, leave_list) + if (index.left_child is None) and (index.right_child is None): + leave_list.append(index) + + def get_elementary_indices(self) -> List: + """ + Returns: + List: A list containing the elementary indices (the leaves) + of `Index`. + """ + leave_list = [] + self._leave_helper(self, leave_list) + return leave_list + + def __mul__(self, index: "Index") -> "Index": + """ + Merge `index` and self into a single larger index. + The flow of the resulting index is set to 1. + Flows of `self` and `index` are multiplied into + the charges upon fusing.n + """ + return fuse_index_pair(self, index) + + @property + def charges(self): + if self.is_leave: + return self._charges + return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow + + @property + def name(self): + if self._name: + return self._name + if self.is_leave: + return self.name + return self.left_child.name + ' & ' + self.right_child.name + + +def fuse_index_pair(left_index: Index, + right_index: Index, + flow: Optional[int] = 1) -> Index: + """ + Fuse two consecutive indices (legs) of a symmetric tensor. + Args: + left_index: A tensor Index. + right_index: A tensor Index. + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `index1` and `index2`. + """ + #Fuse the charges of the two indices + if left_index is right_index: + raise ValueError( + "index1 and index2 are the same object. Can only fuse distinct objects") + + return Index( + charges=None, flow=flow, left_child=left_index, right_child=right_index) + + +def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: + """ + Fuse a list of indices (legs) of a symmetric tensor. + Args: + indices: A list of tensor Index objects + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `indices`. + """ + + index = indices[0] + for n in range(1, len(indices)): + index = fuse_index_pair(index, indices[n], flow=flow) + return index + + +def split_index(index: Index) -> Tuple[Index, Index]: + """ + Split an index (leg) of a symmetric tensor into two legs. + Args: + index: A tensor Index. + Returns: + Tuple[Index, Index]: The result of splitting `index`. + """ + if index.is_leave: + raise ValueError("cannot split an elementary index") + + return index.left_child, index.right_child From a1cf874d4374f0ca744447348783445cb23e4abe Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 2 Jan 2020 09:52:13 -0500 Subject: [PATCH 089/212] implementing more symmetries --- tensornetwork/block_tensor/index.py | 201 +++++++++++++++++++++-- tensornetwork/block_tensor/index_test.py | 26 +++ 2 files changed, 216 insertions(+), 11 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 7d8ababdb..da1e0da9a 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -156,27 +156,154 @@ def __init__(self, charges: np.ndarray) -> None: raise TypeError("only np.ndarray allowed for argument `charges` " "in BaseCharge.__init__(charges)") - self.charges = charges + self.charges = np.asarray(charges) - def __add__(self, other: "BaseCharge"): + def __add__(self, other: "BaseCharge") -> "BaseCharge": raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") - def __mul__(self, number: int) -> "U1Charge": + def __mul__(self, number: int) -> "BaseCharge": raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") - def __rmul__(self, number: int) -> "U1Charge": + def __rmul__(self, number: int) -> "BaseCharge": raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") - def __matmul__(self, other: "U1Charge") -> "Charge": + def __matmul__(self, other: "BaseCharge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") - def __len__(self): + def __len__(self) -> int: return len(self.charges) - def __repr__(self): + def __repr__(self) -> str: return self.charges.__repr__() + @property + def dual_charges(self) -> np.ndarray: + raise NotImplementedError( + "`dual_charges` is not implemented for `BaseCharge`") + + def get_charges(self, dual: bool) -> np.ndarray: + if dual: + return self.dual_charges + return self.charges + + +class U1ChargeCoerced: + """ + A simple charge class for a single U1 symmetry. + """ + + def __init__(self, + charges: List[np.ndarray], + offsets: Optional[np.ndarray] = None, + shifts: Optional[np.ndarray] = None) -> None: + itemsizes = [8 * c.dtype.itemsize for c in charges] + if np.sum(itemsizes) > 64: + raise TypeError("number of bits required to store all charges " + "in a single int is larger than 64") + if np.sum(itemsizes) == 16: + dtype = np.int16 + if np.sum(itemsizes) > 16: + dtype = np.int32 + if np.sum(itemsizes) > 32: + dtype = np.int64 + + if shifts is None: + self.shifts = np.flip(np.append(0, np.cumsum(np.flip( + itemsizes[1::])))).astype(dtype) + else: + self.shifts = shifts + + dtype_charges = [c.astype(dtype) for c in charges] + if offsets is None: + offsets = [np.min(dtype_charges[n]) for n in range(len(dtype_charges))] + pos_charges = [ + dtype_charges[n] - offsets[n] for n in range(len(dtype_charges)) + ] + self.offsets = np.sum([ + np.left_shift(offsets[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + self._charges = np.sum([ + np.left_shift(pos_charges[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + else: + if len(charges) > 1: + raise ValueError( + 'if offsets is given, only a single charge array can be passed') + self.offsets = offsets + self._charges = dtype_charges[0] + + @property + def num_symmetries(self): + return len(self.shifts) + + def __add__(self, other: "U1ChargeCoerced") -> "U1ChargeCoerced": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `U1Charge` object holding the result. + Args: + other: A `U1ChargeCoerced` object. + Returns: + U1ChargeCoerced: The result of fusing `self` with `other`. + """ + if self.num_symmetries != other.num_symmetries: + raise ValueError( + "cannot fuse charges with different number of symmetries") + + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse U1-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + offsets = np.sum([self.offsets, other.offsets]) + fused = np.reshape(self._charges[:, None] + other.charges[None, :], + len(self._charges) * len(other.charges)) + return U1ChargeCoerced(charges=[fused], offsets=offsets, shifts=self.shifts) + + def __repr__(self): + return 'U1-charge: \n' + 'shifts: ' + self.shifts.__repr__( + ) + '\n' + 'offsets: ' + self.offsets.__repr__( + ) + '\n' + 'charges: ' + self._charges.__repr__() + + # def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": + # c1 = U1Charge(self._charges.copy()) #make a copy of the charges (np.ndarray) + # if isinstance(other, U1Charge): + # c2 = type(other).__new__( + # type(other)) #create a new charge object of type type(other) + # c2.__init__(other.charges.copy()) + # return Charge([c1, c2]) + # #`other` should be of type `Charge`. + # return Charge([c1] + _copy_charges(other.charges)) + + @property + def dual_charges(self) -> np.ndarray: + #the dual of a U1 charge is its negative value + return (self._charges + self.offsets) * self._charges.dtype.type(-1) + + @property + def charges(self) -> np.ndarray: + return self._charges + self.offsets + + def get_charges(self, dual: bool) -> np.ndarray: + if dual: + return self.dual_charges + return self._charges + self.offsets + + def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: + if len(target_charges) != len(self.shifts): + raise ValueError("len(target_charges) = {} is different " + "from len(U1ChargeCoerced.shifts) = {}".format( + len(target_charges), len(self.shifts))) + charge = np.asarray(target_charges).astype(self._charges.dtype) + target = np.sum([ + np.left_shift(charge[n], self.shifts[n]) + for n in range(len(self.shifts)) + ]) + return np.nonzero(self._charges + self.offsets == target)[0] + class U1Charge(BaseCharge): """ @@ -186,6 +313,52 @@ class U1Charge(BaseCharge): def __init__(self, charges: np.ndarray) -> None: super().__init__(charges) + def __add__(self, other: "U1Charge") -> "U1Charge": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `U1Charge` object holding the result. + Args: + other: A `U1Charge` object. + Returns: + U1Charge: The result of fusing `self` with `other`. + """ + fused = np.reshape(self.charges[:, None] + other.charges[None, :], + len(self.charges) * len(other.charges)) + + return U1Charge(charges=fused) + + def __mul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __rmul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": + c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) + if isinstance(other, U1Charge): + c2 = type(other).__new__( + type(other)) #create a new charge object of type type(other) + c2.__init__(other.charges.copy()) + return Charge([c1, c2]) + #`other` should be of type `Charge`. + return Charge([c1] + _copy_charges(other.charges)) + + @property + def dual_charges(self): + #the dual of a U1 charge is its negative value + return self.charges * self.charges.dtype.type(-1) + + +class Z2Charge(BaseCharge): + """ + A simple charge class for a single Z2 symmetry. + """ + + def __init__(self, charges: np.ndarray) -> None: + if charges.dtype is not np.dtype(np.bool): + raise TypeError("Z2 charges have to be boolian") + super().__init__(charges) + def __add__(self, other: "U1Charge") -> "U1Charge": """ Fuse the charges of `self` with charges of `other`, and @@ -196,7 +369,7 @@ def __add__(self, other: "U1Charge") -> "U1Charge": U1Charge: The result of fusing `self` with `other`. """ fused = np.reshape( - np.asarray(self.charges)[:, None] + np.asarray(other.charges)[None, :], + np.logical_xor(self.charges[:, None], other.charges[None, :]), len(self.charges) * len(other.charges)) return U1Charge(charges=fused) @@ -214,10 +387,13 @@ def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": type(other)) #create a new charge object of type type(other) c2.__init__(other.charges.copy()) return Charge([c1, c2]) - else: - #`other` should be of type `Charge`. + #`other` should be of type `Charge`. + return Charge([c1] + _copy_charges(other.charges)) - return Charge([c1] + _copy_charges(other.charges)) + @property + def dual_charges(self): + #Z2 charges are self-dual + return self.charges class Charge: @@ -278,6 +454,9 @@ def __len__(self): def __repr__(self): return self.charges.__repr__() + def get_charges(self, dual: bool) -> List[np.ndarray]: + return [c.get_charges(dual) for c in self.charges] + class Index: """ diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 05b80a8a2..08b7f9a86 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -3,6 +3,17 @@ from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse, U1Charge, Charge, BaseCharge +def test_U1Charge_dual(): + q = U1Charge(np.asarray([-1, 0, 1])) + assert np.all(q.dual_charges == np.asarray([1, 0, -1])) + + +def test_U1Charge_get_charges(): + q = U1Charge(np.asarray([-1, 0, 1])) + assert np.all(q.get_charges(dual=False) == np.asarray([-1, 0, 1])) + assert np.all(q.get_charges(dual=True) == np.asarray([1, 0, -1])) + + def test_U1Charge_mul(): q = U1Charge(np.asarray([0, 1])) q2 = 2 * q @@ -79,6 +90,21 @@ def test_Charge_product(): assert isinstance(prod.charges[n], BaseCharge) +def test_Charge_get_charges(): + q = Charge( + [U1Charge(np.asarray([-1, 0, 1])), + U1Charge(np.asarray([-2, 0, 3]))]) + expected = [np.asarray([-1, 0, 1]), np.asarray([-2, 0, 3])] + actual = q.get_charges(dual=False) + for n in range(len(actual)): + np.testing.assert_allclose(expected[n], actual[n]) + + expected = [np.asarray([1, 0, -1]), np.asarray([2, 0, -3])] + actual = q.get_charges(dual=True) + for n in range(len(actual)): + np.testing.assert_allclose(expected[n], actual[n]) + + def test_fuse_charges(): q1 = np.asarray([0, 1]) q2 = np.asarray([2, 3, 4]) From f91389844e735505a2858f80d452516498e98d78 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 10:15:04 -0500 Subject: [PATCH 090/212] typo + remove cython lookup --- tensornetwork/block_tensor/block_tensor.py | 144 +++++++++++---------- 1 file changed, 73 insertions(+), 71 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index f87c29947..e54f6727c 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -16,7 +16,7 @@ from __future__ import division from __future__ import print_function import numpy as np -from tensornetwork.block_tensor.lookup import lookup +#from tensornetwork.block_tensor.lookup import lookup # pylint: disable=line-too-long from tensornetwork.network_components import Node, contract, contract_between from tensornetwork.backends import backend_factory @@ -464,11 +464,11 @@ def find_diagonal_sparse_blocks_depreacated_1( return blocks -def find_diagonal_sparse_blocks_deprecated_0( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True + ) -> Dict: """ Deprecated: this version is about 2 times slower (worst case) than the current used implementation @@ -570,11 +570,11 @@ def find_diagonal_sparse_blocks_deprecated_0( return blocks -def find_diagonal_sparse_blocks_column_major( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_column_major(data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True + ) -> Dict: """ Deprecated @@ -789,8 +789,9 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + right_locations[ - (target_charge - left_flow * c) * right_flow]) + indices.append(n * len_right_charges + + right_locations[(target_charge - left_flow * c) * + right_flow]) return np.concatenate(indices) @@ -876,8 +877,9 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == - (target_charge - left_flow * left_charge) * right_flow)[0] + tmp_relevant_right_charges == (target_charge - + left_flow * left_charge) * + right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1256,7 +1258,7 @@ def transpose(self, flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) if not hasattr(self, 'dense_to_sparse_table'): #find the best partition into left and right charges - leftx_charges, right_charges, _ = _find_best_partition( + left_charges, right_charges, _ = _find_best_partition( flat_charges, flat_flows) #find the index-positions of the elements in the fusion #of `left_charges` and `right_charges` that have `0` @@ -1264,9 +1266,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix( + (np.arange(len(self.data)), + (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1291,8 +1293,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_intersect1d(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -1339,52 +1341,52 @@ def transpose_intersect1d( assume_unique=True) self.data = self.data[inds] - def transpose_lookup( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": - """ - Deprecated - - Transpose the tensor into the new order `order`. Uses a simple cython std::map - for the lookup - Args: - order: The new order of indices. - Returns: - BlockSparseTensor: The transposed tensor. - """ - if len(order) != self.rank: - raise ValueError( - "`len(order)={}` is different form `self.rank={}`".format( - len(order), self.rank)) - charges = self.charges #call only once in case some of the indices are merged indices - dims = [len(c) for c in charges] - - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - tr_charges = [charges[n] for n in order] - tr_flows = [self.flows[n] for n in order] - tr_strides = [strides[n] for n in order] - tr_dims = [dims[n] for n in order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( - tr_charges, tr_flows) - #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_charges( - [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - flows=[1] * len(tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - inds = lookup(linear_positions, - tr_dense_linear_positions[tr_linear_positions]) - self.data = self.data[inds] - - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + # def transpose_lookup(self, order: Union[List[int], np.ndarray] + # ) -> "BlockSparseTensor": + # """ + # Deprecated + + # Transpose the tensor into the new order `order`. Uses a simple cython std::map + # for the lookup + # Args: + # order: The new order of indices. + # Returns: + # BlockSparseTensor: The transposed tensor. + # """ + # if len(order) != self.rank: + # raise ValueError( + # "`len(order)={}` is different form `self.rank={}`".format( + # len(order), self.rank)) + # charges = self.charges #call only once in case some of the indices are merged indices + # dims = [len(c) for c in charges] + + # strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + # #find the best partition into left and right charges + # left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + # #find the index-positions of the elements in the fusion + # #of `left_charges` and `right_charges` that have `0` + # #total charge (those are the only non-zero elements). + # linear_positions = find_dense_positions( + # left_charges, 1, right_charges, 1, target_charge=0) + + # tr_charges = [charges[n] for n in order] + # tr_flows = [self.flows[n] for n in order] + # tr_strides = [strides[n] for n in order] + # tr_dims = [dims[n] for n in order] + # tr_left_charges, tr_right_charges, _ = _find_best_partition( + # tr_charges, tr_flows) + # #FIXME: this should be done without fully fusing the strides + # tr_dense_linear_positions = fuse_charges( + # [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + # flows=[1] * len(tr_dims)) + # tr_linear_positions = find_dense_positions(tr_left_charges, 1, + # tr_right_charges, 1, 0) + # inds = lookup(linear_positions, + # tr_dense_linear_positions[tr_linear_positions]) + # self.data = self.data[inds] + + def transpose_searchsorted(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Deprecated: @@ -1564,8 +1566,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True + ) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1600,8 +1602,8 @@ def get_diagonal_blocks_deprecated_1( column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True + ) -> Dict: """ Deprecated From 2707837571e3301308558fa508a9665d6073463e Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 10:15:32 -0500 Subject: [PATCH 091/212] split charge.py from index.py --- tensornetwork/block_tensor/charge.py | 346 +++++++++++++++++++++++++++ 1 file changed, 346 insertions(+) create mode 100644 tensornetwork/block_tensor/charge.py diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py new file mode 100644 index 000000000..b15e2b984 --- /dev/null +++ b/tensornetwork/block_tensor/charge.py @@ -0,0 +1,346 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensornetwork.network_components import Node, contract, contract_between +# pylint: disable=line-too-long +from tensornetwork.backends import backend_factory +import copy +from typing import List, Union, Any, Optional, Tuple, Text + + +def _copy_charges(charges): + cs = [] + for n in range(len(charges)): + c = type(charges[n]).__new__(type( + charges[n])) #create a new charge object of type type(other) + c.__init__(charges[n].charges.copy()) + cs.append(c) + return cs + + +class BaseCharge: + """ + Base class for fundamental charges (i.e. for symmetries that + are not products of smaller groups) + """ + + def __init__(self, charges: np.ndarray) -> None: + if not isinstance(charges, np.ndarray): + raise TypeError("only np.ndarray allowed for argument `charges` " + "in BaseCharge.__init__(charges)") + + self.charges = np.asarray(charges) + + def __add__(self, other: "BaseCharge") -> "BaseCharge": + raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") + + def __mul__(self, number: int) -> "BaseCharge": + raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") + + def __rmul__(self, number: int) -> "BaseCharge": + raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") + + def __matmul__(self, other: "BaseCharge") -> "Charge": + raise NotImplementedError( + "`__matmul__` is not implemented for `BaseCharge`") + + def __len__(self) -> int: + return len(self.charges) + + def __repr__(self) -> str: + return self.charges.__repr__() + + @property + def dual_charges(self) -> np.ndarray: + raise NotImplementedError( + "`dual_charges` is not implemented for `BaseCharge`") + + def get_charges(self, dual: bool) -> np.ndarray: + if dual: + return self.dual_charges + return self.charges + + +class U1ChargeCoerced: + """ + A simple charge class for a single U1 symmetry. + """ + + def __init__(self, + charges: List[np.ndarray], + offsets: Optional[np.ndarray] = None, + shifts: Optional[np.ndarray] = None) -> None: + itemsizes = [8 * c.dtype.itemsize for c in charges] + if np.sum(itemsizes) > 64: + raise TypeError("number of bits required to store all charges " + "in a single int is larger than 64") + if np.sum(itemsizes) == 16: + dtype = np.int16 + if np.sum(itemsizes) > 16: + dtype = np.int32 + if np.sum(itemsizes) > 32: + dtype = np.int64 + + if shifts is None: + self.shifts = np.flip(np.append(0, np.cumsum(np.flip( + itemsizes[1::])))).astype(dtype) + else: + self.shifts = shifts + + dtype_charges = [c.astype(dtype) for c in charges] + if offsets is None: + offsets = [np.min(dtype_charges[n]) for n in range(len(dtype_charges))] + pos_charges = [ + dtype_charges[n] - offsets[n] for n in range(len(dtype_charges)) + ] + self.offsets = np.sum([ + np.left_shift(offsets[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + self._charges = np.sum([ + np.left_shift(pos_charges[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + else: + if len(charges) > 1: + raise ValueError( + 'if offsets is given, only a single charge array can be passed') + self.offsets = offsets + self._charges = dtype_charges[0] + + @property + def num_symmetries(self): + return len(self.shifts) + + def __add__(self, other: "U1ChargeCoerced") -> "U1ChargeCoerced": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `U1Charge` object holding the result. + Args: + other: A `U1ChargeCoerced` object. + Returns: + U1ChargeCoerced: The result of fusing `self` with `other`. + """ + if self.num_symmetries != other.num_symmetries: + raise ValueError( + "cannot fuse charges with different number of symmetries") + + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse U1-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + offsets = np.sum([self.offsets, other.offsets]) + fused = np.reshape(self._charges[:, None] + other.charges[None, :], + len(self._charges) * len(other.charges)) + return U1ChargeCoerced(charges=[fused], offsets=offsets, shifts=self.shifts) + + def __repr__(self): + return 'U1-charge: \n' + 'shifts: ' + self.shifts.__repr__( + ) + '\n' + 'offsets: ' + self.offsets.__repr__( + ) + '\n' + 'charges: ' + self._charges.__repr__() + + # def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": + # c1 = U1Charge(self._charges.copy()) #make a copy of the charges (np.ndarray) + # if isinstance(other, U1Charge): + # c2 = type(other).__new__( + # type(other)) #create a new charge object of type type(other) + # c2.__init__(other.charges.copy()) + # return Charge([c1, c2]) + # #`other` should be of type `Charge`. + # return Charge([c1] + _copy_charges(other.charges)) + + @property + def dual_charges(self) -> np.ndarray: + #the dual of a U1 charge is its negative value + return (self._charges + self.offsets) * self._charges.dtype.type(-1) + + @property + def charges(self) -> np.ndarray: + return self._charges + self.offsets + + def get_charges(self, dual: bool) -> np.ndarray: + if dual: + return self.dual_charges + return self._charges + self.offsets + + def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: + if len(target_charges) != len(self.shifts): + raise ValueError("len(target_charges) = {} is different " + "from len(U1ChargeCoerced.shifts) = {}".format( + len(target_charges), len(self.shifts))) + charge = np.asarray(target_charges).astype(self._charges.dtype) + target = np.sum([ + np.left_shift(charge[n], self.shifts[n]) + for n in range(len(self.shifts)) + ]) + return np.nonzero(self._charges + self.offsets == target)[0] + + +class U1Charge(BaseCharge): + """ + A simple charge class for a single U1 symmetry. + """ + + def __init__(self, charges: np.ndarray) -> None: + super().__init__(charges) + + def __add__(self, other: "U1Charge") -> "U1Charge": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `U1Charge` object holding the result. + Args: + other: A `U1Charge` object. + Returns: + U1Charge: The result of fusing `self` with `other`. + """ + fused = np.reshape(self.charges[:, None] + other.charges[None, :], + len(self.charges) * len(other.charges)) + + return U1Charge(charges=fused) + + def __mul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __rmul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": + c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) + if isinstance(other, U1Charge): + c2 = type(other).__new__( + type(other)) #create a new charge object of type type(other) + c2.__init__(other.charges.copy()) + return Charge([c1, c2]) + #`other` should be of type `Charge`. + return Charge([c1] + _copy_charges(other.charges)) + + @property + def dual_charges(self): + #the dual of a U1 charge is its negative value + return self.charges * self.charges.dtype.type(-1) + + +class Z2Charge(BaseCharge): + """ + A simple charge class for a single Z2 symmetry. + """ + + def __init__(self, charges: np.ndarray) -> None: + if charges.dtype is not np.dtype(np.bool): + raise TypeError("Z2 charges have to be boolian") + super().__init__(charges) + + def __add__(self, other: "U1Charge") -> "U1Charge": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `U1Charge` object holding the result. + Args: + other: A `U1Charge` object. + Returns: + U1Charge: The result of fusing `self` with `other`. + """ + fused = np.reshape( + np.logical_xor(self.charges[:, None], other.charges[None, :]), + len(self.charges) * len(other.charges)) + + return U1Charge(charges=fused) + + def __mul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __rmul__(self, number: int) -> "U1Charge": + return U1Charge(charges=self.charges * number) + + def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": + c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) + if isinstance(other, U1Charge): + c2 = type(other).__new__( + type(other)) #create a new charge object of type type(other) + c2.__init__(other.charges.copy()) + return Charge([c1, c2]) + #`other` should be of type `Charge`. + return Charge([c1] + _copy_charges(other.charges)) + + @property + def dual_charges(self): + #Z2 charges are self-dual + return self.charges + + +class Charge: + + def __init__(self, charges: List[Union[np.ndarray, BaseCharge]]) -> None: + if not isinstance(charges, list): + raise TypeError("only list allowed for argument `charges` " + "in BaseCharge.__init__(charges)") + if not np.all([len(c) == len(charges[0]) for c in charges]): + raise ValueError("not all charges have the same length. " + "Got lengths = {}".format([len(c) for c in charges])) + for n in range(len(charges)): + if not isinstance(charges[n], BaseCharge): + raise TypeError( + "`Charge` can only be initialized with a list of `BaseCharge`. Found {} instead" + .format(type(charges[n]))) + + self.charges = charges + + def __add__(self, other: "Charge") -> "Charge": + """ + Fuse `self` with `other`. + Args: + other: A `Charge` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + return Charge([c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) + + def __matmul__(self, other: Union["Charge", BaseCharge]) -> "Charge": + """ + Product of `self` with `other` (group product). + Args: + other: A `BaseCharge` or `Charge` object. + Returns: + Charge: The resulting charge of the product of `self` with `other`. + + """ + if isinstance(other, BaseCharge): + c = type(other).__new__( + type(other)) #create a new charge object of type type(other) + c.__init__(other.charges.copy()) + return Charge(self.charges + [c]) + elif isinstance(other, Charge): + return Charge(_copy_charges(self.charges) + _copy_charges(other.charges)) + + raise TypeError("datatype not understood") + + def __mul__(self, number: int) -> "Charge": + return Charge(charges=[c * number for c in self.charges]) + + def __rmul__(self, number: int) -> "Charge": + return Charge(charges=[c * number for c in self.charges]) + + def __len__(self): + return len(self.charges[0]) + + def __repr__(self): + return self.charges.__repr__() + + def get_charges(self, dual: bool) -> List[np.ndarray]: + return [c.get_charges(dual) for c in self.charges] From 8865798e3ce6641f6383182b0768dd5f499434d0 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 10:15:45 -0500 Subject: [PATCH 092/212] tests for charge.py --- tensornetwork/block_tensor/charge_test.py | 121 ++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 tensornetwork/block_tensor/charge_test.py diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py new file mode 100644 index 000000000..65a9b72be --- /dev/null +++ b/tensornetwork/block_tensor/charge_test.py @@ -0,0 +1,121 @@ +import numpy as np +# pylint: disable=line-too-long +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse, U1Charge, Charge, BaseCharge + + +def test_U1Charge_dual(): + q = U1Charge(np.asarray([-1, 0, 1])) + assert np.all(q.dual_charges == np.asarray([1, 0, -1])) + + +def test_U1Charge_get_charges(): + q = U1Charge(np.asarray([-1, 0, 1])) + assert np.all(q.get_charges(dual=False) == np.asarray([-1, 0, 1])) + assert np.all(q.get_charges(dual=True) == np.asarray([1, 0, -1])) + + +def test_U1Charge_mul(): + q = U1Charge(np.asarray([0, 1])) + q2 = 2 * q + q3 = q * 2 + assert np.all(q2.charges == np.asarray([0, 2])) + assert np.all(q3.charges == np.asarray([0, 2])) + + +def test_U1Charge_add(): + q1 = U1Charge(np.asarray([0, 1])) + q2 = U1Charge(np.asarray([2, 3, 4])) + fused_charges = q1 + q2 + assert np.all(fused_charges.charges == np.asarray([2, 3, 4, 3, 4, 5])) + + +def test_fuse_charge_pair(): + q1 = np.asarray([0, 1]) + q2 = np.asarray([2, 3, 4]) + fused_charges = fuse_charge_pair(q1, 1, q2, 1) + assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) + fused_charges = fuse_charge_pair(q1, 1, q2, -1) + assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) + + +def test_Charge_mul(): + q = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) + expected = [np.asarray([0, 2]), np.asarray([-4, 6])] + q2 = 2 * q + q3 = q * 2 + for n in range(len(q.charges)): + np.testing.assert_allclose(expected[n], q2.charges[n].charges) + np.testing.assert_allclose(expected[n], q3.charges[n].charges) + + +def test_Charge_add(): + q1 = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) + q2 = Charge([U1Charge(np.asarray([2, 3])), U1Charge(np.asarray([-1, 4]))]) + expected = [np.asarray([2, 3, 3, 4]), np.asarray([-3, 2, 2, 7])] + q12 = q1 + q2 + for n in range(len(q12.charges)): + np.testing.assert_allclose(expected[n], q12.charges[n].charges) + + +def test_Charge_product(): + expected = [np.asarray([0, 1]), np.asarray([2, 3])] + q1 = U1Charge(expected[0]) + q2 = U1Charge(expected[1]) + prod = q1 @ q2 + for n in range(len(prod.charges)): + np.testing.assert_allclose(prod.charges[n].charges, expected[n]) + + B = 4 + dtype = np.int16 + D = 10 + Q1 = Charge(charges=[ + U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + for _ in range(2) + ]) + Q2 = Charge(charges=[ + U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + for _ in range(2) + ]) + prod = Q1 @ Q2 + expected = Q1.charges + Q2.charges + for n in range(len(prod.charges)): + np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) + assert isinstance(prod.charges[n], BaseCharge) + + q1 = U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + prod = q1 @ Q2 + expected = [q1] + Q2.charges + for n in range(len(prod.charges)): + np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) + assert isinstance(prod.charges[n], BaseCharge) + + +def test_Charge_get_charges(): + q = Charge( + [U1Charge(np.asarray([-1, 0, 1])), + U1Charge(np.asarray([-2, 0, 3]))]) + expected = [np.asarray([-1, 0, 1]), np.asarray([-2, 0, 3])] + actual = q.get_charges(dual=False) + for n in range(len(actual)): + np.testing.assert_allclose(expected[n], actual[n]) + + expected = [np.asarray([1, 0, -1]), np.asarray([2, 0, -3])] + actual = q.get_charges(dual=True) + for n in range(len(actual)): + np.testing.assert_allclose(expected[n], actual[n]) + + +def test_fuse_charges(): + q1 = np.asarray([0, 1]) + q2 = np.asarray([2, 3, 4]) + fused_charges = fuse_charges([q1, q2], flows=[1, 1]) + assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) + fused_charges = fuse_charges([q1, q2], flows=[1, -1]) + assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) + + +def test_fuse_degeneracies(): + d1 = np.asarray([0, 1]) + d2 = np.asarray([2, 3, 4]) + fused_degeneracies = fuse_degeneracies(d1, d2) + np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2)) From 37d31c4710ad487845c498628895b7c09f1e2cf2 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 10:52:36 -0500 Subject: [PATCH 093/212] test added --- tensornetwork/block_tensor/charge_test.py | 35 ++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index 65a9b72be..12ee9382f 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -1,6 +1,7 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse, U1Charge, Charge, BaseCharge +from tensornetwork.block_tensor.charge import U1Charge, Charge, BaseCharge, U1ChargeCoerced +from tensornetwork.block_tensor.index import fuse_charges, fuse_degeneracies, fuse_charge_pair def test_U1Charge_dual(): @@ -119,3 +120,35 @@ def test_fuse_degeneracies(): d2 = np.asarray([2, 3, 4]) fused_degeneracies = fuse_degeneracies(d1, d2) np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2)) + + +def test_U1ChargeCoerced_fusion(): + D = 1000 + B = 6 + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + P1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + + charges_1 = [O1, O2] + charges_2 = [P1, P2] + charges_3 = [Q1, Q2] + + fused_1 = fuse_charges(charges_1, [1, 1]) + fused_2 = fuse_charges(charges_2, [1, 1]) + fused_3 = fuse_charges(charges_3, [1, 1]) + q1 = U1ChargeCoerced([O1, P1, Q1]) + q2 = U1ChargeCoerced([O2, P2, Q2]) + + target = np.random.randint(-B // 2, B // 2 + 1, 3) + q12 = q1 + q2 + + nz_1 = q12.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + tmp1 = np.logical_and(np.logical_and(i1, i2), i3) + nz_2 = np.nonzero(tmp1)[0] + assert np.all(nz_1 == nz_2) From 03ce492d1489e2d88e2f916620907f7e3209a0a5 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 12:27:21 -0500 Subject: [PATCH 094/212] added matmul --- tensornetwork/block_tensor/charge.py | 118 +++++++++++++++++---------- 1 file changed, 74 insertions(+), 44 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index b15e2b984..b0237d9f5 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -76,7 +76,7 @@ def get_charges(self, dual: bool) -> np.ndarray: return self.charges -class U1ChargeCoerced: +class U1ChargeMerged: """ A simple charge class for a single U1 symmetry. """ @@ -85,26 +85,35 @@ def __init__(self, charges: List[np.ndarray], offsets: Optional[np.ndarray] = None, shifts: Optional[np.ndarray] = None) -> None: - itemsizes = [8 * c.dtype.itemsize for c in charges] - if np.sum(itemsizes) > 64: + if len(charges) > 1: + if offsets is not None: + raise ValueError("If `offsets` is passed, only a single charge array " + "can be passed. Got len(charges) = {}".format( + len(charges))) + if shifts is not None: + raise ValueError("If `shifts` is passed, only a single charge array " + "can be passed. Got len(charges) = {}".format( + len(charges))) + + self._itemsizes = [c.dtype.itemsize for c in charges] + if np.sum(self._itemsizes) > 8: raise TypeError("number of bits required to store all charges " "in a single int is larger than 64") - if np.sum(itemsizes) == 16: - dtype = np.int16 - if np.sum(itemsizes) > 16: - dtype = np.int32 - if np.sum(itemsizes) > 32: - dtype = np.int64 - - if shifts is None: - self.shifts = np.flip(np.append(0, np.cumsum(np.flip( - itemsizes[1::])))).astype(dtype) - else: - self.shifts = shifts - dtype_charges = [c.astype(dtype) for c in charges] - if offsets is None: - offsets = [np.min(dtype_charges[n]) for n in range(len(dtype_charges))] + if len(charges) > 1: + dtype = np.int16 + if np.sum(self._itemsizes) > 2: + dtype = np.int32 + if np.sum(self._itemsizes) > 4: + dtype = np.int64 + #multiply by eight to get number of bits + self.shifts = 8 * np.flip( + np.append(0, np.cumsum(np.flip(self._itemsizes[1::])))).astype(dtype) + dtype_charges = [c.astype(dtype) for c in charges] + offsets = [ + np.min([0, np.min(dtype_charges[n])]) + for n in range(len(dtype_charges)) + ] pos_charges = [ dtype_charges[n] - offsets[n] for n in range(len(dtype_charges)) ] @@ -119,24 +128,33 @@ def __init__(self, ], axis=0).astype(dtype) else: - if len(charges) > 1: - raise ValueError( - 'if offsets is given, only a single charge array can be passed') - self.offsets = offsets - self._charges = dtype_charges[0] + if shifts is None: + shifts = np.asarray([0]).astype(charges[0].dtype) + self.shifts = shifts + if offsets is None: + self.offsets = np.min([0, np.min(charges[0])]) + self._charges = charges[0] - self.offsets + else: + #we assume that `charges` are in this case already + #positive + self.offsets = offsets + if np.min(charges[0]) < 0: + raise ValueError("Expected all charges to be >= 0, " + "but found negative charges") + self._charges = charges[0] @property def num_symmetries(self): return len(self.shifts) - def __add__(self, other: "U1ChargeCoerced") -> "U1ChargeCoerced": + def __add__(self, other: "U1ChargeMerged") -> "U1ChargeMerged": """ Fuse the charges of `self` with charges of `other`, and return a new `U1Charge` object holding the result. Args: - other: A `U1ChargeCoerced` object. + other: A `U1ChargeMerged` object. Returns: - U1ChargeCoerced: The result of fusing `self` with `other`. + U1ChargeMerged: The result of fusing `self` with `other`. """ if self.num_symmetries != other.num_symmetries: raise ValueError( @@ -147,24 +165,34 @@ def __add__(self, other: "U1ChargeCoerced") -> "U1ChargeCoerced": "Cannot fuse U1-charges with different shifts {} and {}".format( self.shifts, other.shifts)) offsets = np.sum([self.offsets, other.offsets]) - fused = np.reshape(self._charges[:, None] + other.charges[None, :], - len(self._charges) * len(other.charges)) - return U1ChargeCoerced(charges=[fused], offsets=offsets, shifts=self.shifts) + fused = np.reshape(self._charges[:, None] + other._charges[None, :], + len(self._charges) * len(other._charges)) + return U1ChargeMerged(charges=[fused], offsets=offsets, shifts=self.shifts) def __repr__(self): return 'U1-charge: \n' + 'shifts: ' + self.shifts.__repr__( ) + '\n' + 'offsets: ' + self.offsets.__repr__( ) + '\n' + 'charges: ' + self._charges.__repr__() - # def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": - # c1 = U1Charge(self._charges.copy()) #make a copy of the charges (np.ndarray) - # if isinstance(other, U1Charge): - # c2 = type(other).__new__( - # type(other)) #create a new charge object of type type(other) - # c2.__init__(other.charges.copy()) - # return Charge([c1, c2]) - # #`other` should be of type `Charge`. - # return Charge([c1] + _copy_charges(other.charges)) + def __matmul__(self, other: Union["U1ChargeMerged", "U1ChargeMerged"] + ) -> "U1ChargeMerged": + itemsize = np.sum(self._itemsizes + other._itemsizes) + if itemsize > 8: + raise TypeError("number of bits required to store all charges " + "in a single int is larger than 64") + dtype = np.int32 #need at least np.int32 to store two charges + if itemsize > 4: + dtype = np.int64 + + charges = np.left_shift( + self._charges.astype(dtype), + 8 * np.sum(other._itemsizes)) + other._charges.astype(dtype) + + offsets = np.left_shift( + self.offsets.astype(dtype), + 8 * np.sum(other._itemsizes)) + other.offsets.astype(dtype) + shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) + return U1ChargeMerged(charges=[charges], offsets=offsets, shifts=shifts) @property def dual_charges(self) -> np.ndarray: @@ -173,24 +201,26 @@ def dual_charges(self) -> np.ndarray: @property def charges(self) -> np.ndarray: - return self._charges + self.offsets + if self.offsets != 0: + return self._charges + self.offsets + return self._charges def get_charges(self, dual: bool) -> np.ndarray: if dual: return self.dual_charges - return self._charges + self.offsets + return self.charges def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: if len(target_charges) != len(self.shifts): raise ValueError("len(target_charges) = {} is different " - "from len(U1ChargeCoerced.shifts) = {}".format( + "from len(U1ChargeMerged.shifts) = {}".format( len(target_charges), len(self.shifts))) - charge = np.asarray(target_charges).astype(self._charges.dtype) + _target_charges = np.asarray(target_charges).astype(self._charges.dtype) target = np.sum([ - np.left_shift(charge[n], self.shifts[n]) + np.left_shift(_target_charges[n], self.shifts[n]) for n in range(len(self.shifts)) ]) - return np.nonzero(self._charges + self.offsets == target)[0] + return np.nonzero(self.charges == target)[0] class U1Charge(BaseCharge): From 71e67b2ebe578c859e6600e41e2f96f73e1749c4 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 12:34:33 -0500 Subject: [PATCH 095/212] added test for matmul --- tensornetwork/block_tensor/charge_test.py | 118 ++++++++++++++++++++-- 1 file changed, 109 insertions(+), 9 deletions(-) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index 12ee9382f..acdf2a726 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -1,6 +1,7 @@ import numpy as np +import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.charge import U1Charge, Charge, BaseCharge, U1ChargeCoerced +from tensornetwork.block_tensor.charge import U1Charge, Charge, BaseCharge, U1ChargeMerged from tensornetwork.block_tensor.index import fuse_charges, fuse_degeneracies, fuse_charge_pair @@ -122,15 +123,96 @@ def test_fuse_degeneracies(): np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2)) -def test_U1ChargeCoerced_fusion(): +def test_U1ChargeMerged_charges(): + D = 100 + B = 6 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + + offsets = [np.min([0, np.min(c)]) for c in charges] + pos_charges = [ + charges[n].astype(np.int32) - offsets[n].astype(np.int32) + for n in range(2) + ] + merged_charges = np.left_shift(pos_charges[0], 16) + pos_charges[1] + merged_offsets = np.left_shift(offsets[0], 16) + offsets[1] + + q1 = U1ChargeMerged(charges) + assert np.all(q1.charges == merged_charges + merged_offsets) + + +def test_U1ChargeMerged_dual(): + D = 100 + B = 6 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + + offsets = [np.min([0, np.min(c)]) for c in charges] + pos_charges = [ + charges[n].astype(np.int32) - offsets[n].astype(np.int32) + for n in range(2) + ] + merged_charges = np.left_shift(pos_charges[0], 16) + pos_charges[1] + merged_offsets = np.left_shift(offsets[0], 16) + offsets[1] + + q1 = U1ChargeMerged(charges) + assert np.all(q1.dual_charges == -(merged_charges + merged_offsets)) + + +def test_U1ChargeMerged_get_charges(): + D = 100 + B = 6 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + q1 = U1ChargeMerged(charges) + assert np.all(q1.get_charges(False) == q1.charges) + assert np.all(q1.get_charges(True) == q1.dual_charges) + + +def test_U1ChargeMerged_raises(): + D = 100 + B = 6 + with pytest.raises(TypeError): + q1 = U1ChargeMerged([ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) + for _ in range(2) + ]) + with pytest.raises(ValueError): + q1 = U1ChargeMerged([ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) + for _ in range(2) + ], + offsets=[-5, -6]) + with pytest.raises(ValueError): + q1 = U1ChargeMerged([ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) + for _ in range(2) + ], + shifts=[16, 0]) + with pytest.raises(ValueError): + q1 = U1ChargeMerged([ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) + for _ in range(2) + ], + offsets=[-5, -6], + shifts=[16, 0]) + + +def test_U1ChargeMerged_fusion(): D = 1000 B = 6 O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - P1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + P1 = np.random.randint(0, B + 1, D).astype(np.int16) P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - Q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - Q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q1 = np.random.randint(1, B + 1, D).astype(np.int16) + Q2 = np.random.randint(1, B + 1, D).astype(np.int16) charges_1 = [O1, O2] charges_2 = [P1, P2] @@ -139,8 +221,8 @@ def test_U1ChargeCoerced_fusion(): fused_1 = fuse_charges(charges_1, [1, 1]) fused_2 = fuse_charges(charges_2, [1, 1]) fused_3 = fuse_charges(charges_3, [1, 1]) - q1 = U1ChargeCoerced([O1, P1, Q1]) - q2 = U1ChargeCoerced([O2, P2, Q2]) + q1 = U1ChargeMerged([O1, P1, Q1]) + q2 = U1ChargeMerged([O2, P2, Q2]) target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 + q2 @@ -149,6 +231,24 @@ def test_U1ChargeCoerced_fusion(): i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] - tmp1 = np.logical_and(np.logical_and(i1, i2), i3) - nz_2 = np.nonzero(tmp1)[0] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] assert np.all(nz_1 == nz_2) + + +def test_U1ChargeMerged_matmul(): + D = 1000 + B = 5 + C1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + C3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + + q1 = U1ChargeMerged([C1]) + q2 = U1ChargeMerged([C2]) + q3 = U1ChargeMerged([C3]) + + Q = q1 @ q2 @ q3 + Q_ = U1ChargeMerged([C1, C2, C3]) + assert np.all(Q.charges == Q_.charges) + assert np.all(Q._charges == Q_._charges) + assert Q.offsets == Q_.offsets + assert np.all(Q.shifts == Q_.shifts) From 30dcbf855e08d59107cc7c7c47277a74c58d58f9 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 14:23:21 -0500 Subject: [PATCH 096/212] tests + allow np.int8 --- tensornetwork/block_tensor/charge.py | 8 +- tensornetwork/block_tensor/charge_test.py | 8 +- tensornetwork/block_tensor/index.py | 371 ++++--------------- tensornetwork/block_tensor/index_test.py | 415 ++++++++-------------- 4 files changed, 241 insertions(+), 561 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index b0237d9f5..a51fb44b4 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -101,7 +101,9 @@ def __init__(self, "in a single int is larger than 64") if len(charges) > 1: - dtype = np.int16 + dtype = np.int8 + if np.sum(self._itemsizes) > 1: + dtype = np.int16 if np.sum(self._itemsizes) > 2: dtype = np.int32 if np.sum(self._itemsizes) > 4: @@ -180,7 +182,9 @@ def __matmul__(self, other: Union["U1ChargeMerged", "U1ChargeMerged"] if itemsize > 8: raise TypeError("number of bits required to store all charges " "in a single int is larger than 64") - dtype = np.int32 #need at least np.int32 to store two charges + dtype = np.int16 #need at least np.int16 to store two charges + if itemsize > 2: + dtype = np.int32 if itemsize > 4: dtype = np.int64 diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index acdf2a726..680ba5205 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -207,12 +207,12 @@ def test_U1ChargeMerged_raises(): def test_U1ChargeMerged_fusion(): D = 1000 B = 6 - O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) P1 = np.random.randint(0, B + 1, D).astype(np.int16) P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - Q1 = np.random.randint(1, B + 1, D).astype(np.int16) - Q2 = np.random.randint(1, B + 1, D).astype(np.int16) + Q1 = np.random.randint(1, B + 1, D).astype(np.int8) + Q2 = np.random.randint(1, B + 1, D).astype(np.int8) charges_1 = [O1, O2] charges_2 = [P1, P2] diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index da1e0da9a..10648ffc8 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -17,6 +17,7 @@ from __future__ import print_function import numpy as np from tensornetwork.network_components import Node, contract, contract_between +from tensornetwork.block_tensor.charge import BaseCharge, Charge # pylint: disable=line-too-long from tensornetwork.backends import backend_factory import copy @@ -135,330 +136,116 @@ def unfuse(fused_indices: np.ndarray, len_left: int, return left, right -def _copy_charges(charges): - cs = [] - for n in range(len(charges)): - c = type(charges[n]).__new__(type( - charges[n])) #create a new charge object of type type(other) - c.__init__(charges[n].charges.copy()) - cs.append(c) - return cs - - -class BaseCharge: - """ - Base class for fundamental charges (i.e. for symmetries that - are not products of smaller groups) - """ - - def __init__(self, charges: np.ndarray) -> None: - if not isinstance(charges, np.ndarray): - raise TypeError("only np.ndarray allowed for argument `charges` " - "in BaseCharge.__init__(charges)") - - self.charges = np.asarray(charges) - - def __add__(self, other: "BaseCharge") -> "BaseCharge": - raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") - - def __mul__(self, number: int) -> "BaseCharge": - raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") - - def __rmul__(self, number: int) -> "BaseCharge": - raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") - - def __matmul__(self, other: "BaseCharge") -> "Charge": - raise NotImplementedError( - "`__matmul__` is not implemented for `BaseCharge`") - - def __len__(self) -> int: - return len(self.charges) - - def __repr__(self) -> str: - return self.charges.__repr__() - - @property - def dual_charges(self) -> np.ndarray: - raise NotImplementedError( - "`dual_charges` is not implemented for `BaseCharge`") - - def get_charges(self, dual: bool) -> np.ndarray: - if dual: - return self.dual_charges - return self.charges - - -class U1ChargeCoerced: +class Index: """ - A simple charge class for a single U1 symmetry. + An index class to store indices of a symmetric tensor. + An index keeps track of all its childs by storing references + to them (i.e. it is a binary tree). """ def __init__(self, - charges: List[np.ndarray], - offsets: Optional[np.ndarray] = None, - shifts: Optional[np.ndarray] = None) -> None: - itemsizes = [8 * c.dtype.itemsize for c in charges] - if np.sum(itemsizes) > 64: - raise TypeError("number of bits required to store all charges " - "in a single int is larger than 64") - if np.sum(itemsizes) == 16: - dtype = np.int16 - if np.sum(itemsizes) > 16: - dtype = np.int32 - if np.sum(itemsizes) > 32: - dtype = np.int64 - - if shifts is None: - self.shifts = np.flip(np.append(0, np.cumsum(np.flip( - itemsizes[1::])))).astype(dtype) - else: - self.shifts = shifts - - dtype_charges = [c.astype(dtype) for c in charges] - if offsets is None: - offsets = [np.min(dtype_charges[n]) for n in range(len(dtype_charges))] - pos_charges = [ - dtype_charges[n] - offsets[n] for n in range(len(dtype_charges)) - ] - self.offsets = np.sum([ - np.left_shift(offsets[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) - self._charges = np.sum([ - np.left_shift(pos_charges[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) - else: - if len(charges) > 1: - raise ValueError( - 'if offsets is given, only a single charge array can be passed') - self.offsets = offsets - self._charges = dtype_charges[0] - - @property - def num_symmetries(self): - return len(self.shifts) - - def __add__(self, other: "U1ChargeCoerced") -> "U1ChargeCoerced": - """ - Fuse the charges of `self` with charges of `other`, and - return a new `U1Charge` object holding the result. - Args: - other: A `U1ChargeCoerced` object. - Returns: - U1ChargeCoerced: The result of fusing `self` with `other`. - """ - if self.num_symmetries != other.num_symmetries: - raise ValueError( - "cannot fuse charges with different number of symmetries") - - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse U1-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - offsets = np.sum([self.offsets, other.offsets]) - fused = np.reshape(self._charges[:, None] + other.charges[None, :], - len(self._charges) * len(other.charges)) - return U1ChargeCoerced(charges=[fused], offsets=offsets, shifts=self.shifts) + charges: Union[List, np.ndarray], + flow: int, + name: Optional[Text] = None, + left_child: Optional["Index"] = None, + right_child: Optional["Index"] = None): + self._charges = np.asarray(charges) + self.flow = flow + self.left_child = left_child + self.right_child = right_child + self._name = name def __repr__(self): - return 'U1-charge: \n' + 'shifts: ' + self.shifts.__repr__( - ) + '\n' + 'offsets: ' + self.offsets.__repr__( - ) + '\n' + 'charges: ' + self._charges.__repr__() - - # def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": - # c1 = U1Charge(self._charges.copy()) #make a copy of the charges (np.ndarray) - # if isinstance(other, U1Charge): - # c2 = type(other).__new__( - # type(other)) #create a new charge object of type type(other) - # c2.__init__(other.charges.copy()) - # return Charge([c1, c2]) - # #`other` should be of type `Charge`. - # return Charge([c1] + _copy_charges(other.charges)) + return str(self.dimension) @property - def dual_charges(self) -> np.ndarray: - #the dual of a U1 charge is its negative value - return (self._charges + self.offsets) * self._charges.dtype.type(-1) + def is_leave(self): + return (self.left_child is None) and (self.right_child is None) @property - def charges(self) -> np.ndarray: - return self._charges + self.offsets - - def get_charges(self, dual: bool) -> np.ndarray: - if dual: - return self.dual_charges - return self._charges + self.offsets - - def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: - if len(target_charges) != len(self.shifts): - raise ValueError("len(target_charges) = {} is different " - "from len(U1ChargeCoerced.shifts) = {}".format( - len(target_charges), len(self.shifts))) - charge = np.asarray(target_charges).astype(self._charges.dtype) - target = np.sum([ - np.left_shift(charge[n], self.shifts[n]) - for n in range(len(self.shifts)) - ]) - return np.nonzero(self._charges + self.offsets == target)[0] - - -class U1Charge(BaseCharge): - """ - A simple charge class for a single U1 symmetry. - """ - - def __init__(self, charges: np.ndarray) -> None: - super().__init__(charges) + def dimension(self): + return np.prod([len(i.charges) for i in self.get_elementary_indices()]) - def __add__(self, other: "U1Charge") -> "U1Charge": + def _copy_helper(self, index: "Index", copied_index: "Index") -> None: """ - Fuse the charges of `self` with charges of `other`, and - return a new `U1Charge` object holding the result. - Args: - other: A `U1Charge` object. - Returns: - U1Charge: The result of fusing `self` with `other`. + Helper function for copy """ - fused = np.reshape(self.charges[:, None] + other.charges[None, :], - len(self.charges) * len(other.charges)) - - return U1Charge(charges=fused) - - def __mul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) - - def __rmul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) - - def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": - c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) - if isinstance(other, U1Charge): - c2 = type(other).__new__( - type(other)) #create a new charge object of type type(other) - c2.__init__(other.charges.copy()) - return Charge([c1, c2]) - #`other` should be of type `Charge`. - return Charge([c1] + _copy_charges(other.charges)) - - @property - def dual_charges(self): - #the dual of a U1 charge is its negative value - return self.charges * self.charges.dtype.type(-1) - - -class Z2Charge(BaseCharge): - """ - A simple charge class for a single Z2 symmetry. - """ + if index.left_child != None: + left_copy = Index( + charges=copy.copy(index.left_child.charges), + flow=copy.copy(index.left_child.flow), + name=copy.copy(index.left_child.name)) - def __init__(self, charges: np.ndarray) -> None: - if charges.dtype is not np.dtype(np.bool): - raise TypeError("Z2 charges have to be boolian") - super().__init__(charges) + copied_index.left_child = left_copy + self._copy_helper(index.left_child, left_copy) + if index.right_child != None: + right_copy = Index( + charges=copy.copy(index.right_child.charges), + flow=copy.copy(index.right_child.flow), + name=copy.copy(index.right_child.name)) + copied_index.right_child = right_copy + self._copy_helper(index.right_child, right_copy) - def __add__(self, other: "U1Charge") -> "U1Charge": + def copy(self): """ - Fuse the charges of `self` with charges of `other`, and - return a new `U1Charge` object holding the result. - Args: - other: A `U1Charge` object. Returns: - U1Charge: The result of fusing `self` with `other`. + Index: A deep copy of `Index`. Note that all children of + `Index` are copied as well. """ - fused = np.reshape( - np.logical_xor(self.charges[:, None], other.charges[None, :]), - len(self.charges) * len(other.charges)) - - return U1Charge(charges=fused) - - def __mul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) - - def __rmul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) - - def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": - c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) - if isinstance(other, U1Charge): - c2 = type(other).__new__( - type(other)) #create a new charge object of type type(other) - c2.__init__(other.charges.copy()) - return Charge([c1, c2]) - #`other` should be of type `Charge`. - return Charge([c1] + _copy_charges(other.charges)) - - @property - def dual_charges(self): - #Z2 charges are self-dual - return self.charges - - -class Charge: + index_copy = Index( + charges=self._charges.copy(), flow=copy.copy(self.flow), name=self.name) - def __init__(self, charges: List[Union[np.ndarray, BaseCharge]]) -> None: - if not isinstance(charges, list): - raise TypeError("only list allowed for argument `charges` " - "in BaseCharge.__init__(charges)") - if not np.all([len(c) == len(charges[0]) for c in charges]): - raise ValueError("not all charges have the same length. " - "Got lengths = {}".format([len(c) for c in charges])) - for n in range(len(charges)): - if not isinstance(charges[n], BaseCharge): - raise TypeError( - "`Charge` can only be initialized with a list of `BaseCharge`. Found {} instead" - .format(type(charges[n]))) + self._copy_helper(self, index_copy) + return index_copy - self.charges = charges + def _leave_helper(self, index: "Index", leave_list: List) -> None: + if index.left_child: + self._leave_helper(index.left_child, leave_list) + if index.right_child: + self._leave_helper(index.right_child, leave_list) + if (index.left_child is None) and (index.right_child is None): + leave_list.append(index) - def __add__(self, other: "Charge") -> "Charge": + def get_elementary_indices(self) -> List: """ - Fuse `self` with `other`. - Args: - other: A `Charge` object. Returns: - Charge: The result of fusing `self` with `other`. + List: A list containing the elementary indices (the leaves) + of `Index`. """ - return Charge([c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) + leave_list = [] + self._leave_helper(self, leave_list) + return leave_list - def __matmul__(self, other: Union["Charge", BaseCharge]) -> "Charge": + def __mul__(self, index: "Index") -> "Index": """ - Product of `self` with `other` (group product). - Args: - other: A `BaseCharge` or `Charge` object. - Returns: - Charge: The resulting charge of the product of `self` with `other`. - + Merge `index` and self into a single larger index. + The flow of the resulting index is set to 1. + Flows of `self` and `index` are multiplied into + the charges upon fusing.n """ - if isinstance(other, BaseCharge): - c = type(other).__new__( - type(other)) #create a new charge object of type type(other) - c.__init__(other.charges.copy()) - return Charge(self.charges + [c]) - elif isinstance(other, Charge): - return Charge(_copy_charges(self.charges) + _copy_charges(other.charges)) - - raise TypeError("datatype not understood") - - def __mul__(self, number: int) -> "Charge": - return Charge(charges=[c * number for c in self.charges]) - - def __rmul__(self, number: int) -> "Charge": - return Charge(charges=[c * number for c in self.charges]) + return fuse_index_pair(self, index) - def __len__(self): - return len(self.charges[0]) + @property + def charges(self): + if self.is_leave: + return self._charges + fused_charges = fuse_charge_pair(self.left_child.charges, + self.left_child.flow, + self.right_child.charges, + self.right_child.flow) - def __repr__(self): - return self.charges.__repr__() + return fused_charges - def get_charges(self, dual: bool) -> List[np.ndarray]: - return [c.get_charges(dual) for c in self.charges] + @property + def name(self): + if self._name: + return self._name + if self.is_leave: + return self.name + return self.left_child.name + ' & ' + self.right_child.name -class Index: +class IndexNew: """ An index class to store indices of a symmetric tensor. An index keeps track of all its childs by storing references diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 08b7f9a86..293b37bd8 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,282 +1,171 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse, U1Charge, Charge, BaseCharge +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse -def test_U1Charge_dual(): - q = U1Charge(np.asarray([-1, 0, 1])) - assert np.all(q.dual_charges == np.asarray([1, 0, -1])) - - -def test_U1Charge_get_charges(): - q = U1Charge(np.asarray([-1, 0, 1])) - assert np.all(q.get_charges(dual=False) == np.asarray([-1, 0, 1])) - assert np.all(q.get_charges(dual=True) == np.asarray([1, 0, -1])) - - -def test_U1Charge_mul(): - q = U1Charge(np.asarray([0, 1])) - q2 = 2 * q - q3 = q * 2 - assert np.all(q2.charges == np.asarray([0, 2])) - assert np.all(q3.charges == np.asarray([0, 2])) - - -def test_U1Charge_add(): - q1 = U1Charge(np.asarray([0, 1])) - q2 = U1Charge(np.asarray([2, 3, 4])) - fused_charges = q1 + q2 - assert np.all(fused_charges.charges == np.asarray([2, 3, 4, 3, 4, 5])) +def test_index_fusion_mul(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + i12 = i1 * i2 + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) -def test_fuse_charge_pair(): - q1 = np.asarray([0, 1]) - q2 = np.asarray([2, 3, 4]) - fused_charges = fuse_charge_pair(q1, 1, q2, 1) - assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) - fused_charges = fuse_charge_pair(q1, 1, q2, -1) - assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) +def test_fuse_index_pair(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 -def test_Charge_mul(): - q = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) - expected = [np.asarray([0, 2]), np.asarray([-4, 6])] - q2 = 2 * q - q3 = q * 2 - for n in range(len(q.charges)): - np.testing.assert_allclose(expected[n], q2.charges[n].charges) - np.testing.assert_allclose(expected[n], q3.charges[n].charges) + i12 = fuse_index_pair(i1, i2) + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) -def test_Charge_add(): - q1 = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) - q2 = Charge([U1Charge(np.asarray([2, 3])), U1Charge(np.asarray([-1, 4]))]) - expected = [np.asarray([2, 3, 3, 4]), np.asarray([-3, 2, 2, 7])] - q12 = q1 + q2 - for n in range(len(q12.charges)): - np.testing.assert_allclose(expected[n], q12.charges[n].charges) +def test_fuse_indices(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + i12 = fuse_indices([i1, i2]) + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) -def test_Charge_product(): - expected = [np.asarray([0, 1]), np.asarray([2, 3])] - q1 = U1Charge(expected[0]) - q2 = U1Charge(expected[1]) - prod = q1 @ q2 - for n in range(len(prod.charges)): - np.testing.assert_allclose(prod.charges[n].charges, expected[n]) +def test_split_index(): + D = 10 B = 4 dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + i1_, i2_ = split_index(i12) + assert i1 is i1_ + assert i2 is i2_ + np.testing.assert_allclose(q1, i1.charges) + np.testing.assert_allclose(q2, i2.charges) + np.testing.assert_allclose(q1, i1_.charges) + np.testing.assert_allclose(q2, i2_.charges) + assert i1_.name == 'index1' + assert i2_.name == 'index2' + assert i1_.flow == i1.flow + assert i2_.flow == i2.flow + + +def test_elementary_indices(): D = 10 - Q1 = Charge(charges=[ - U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - for _ in range(2) - ]) - Q2 = Charge(charges=[ - U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - for _ in range(2) - ]) - prod = Q1 @ Q2 - expected = Q1.charges + Q2.charges - for n in range(len(prod.charges)): - np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) - assert isinstance(prod.charges[n], BaseCharge) - - q1 = U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - prod = q1 @ Q2 - expected = [q1] + Q2.charges - for n in range(len(prod.charges)): - np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) - assert isinstance(prod.charges[n], BaseCharge) - - -def test_Charge_get_charges(): - q = Charge( - [U1Charge(np.asarray([-1, 0, 1])), - U1Charge(np.asarray([-2, 0, 3]))]) - expected = [np.asarray([-1, 0, 1]), np.asarray([-2, 0, 3])] - actual = q.get_charges(dual=False) - for n in range(len(actual)): - np.testing.assert_allclose(expected[n], actual[n]) - - expected = [np.asarray([1, 0, -1]), np.asarray([2, 0, -3])] - actual = q.get_charges(dual=True) - for n in range(len(actual)): - np.testing.assert_allclose(expected[n], actual[n]) - - -def test_fuse_charges(): - q1 = np.asarray([0, 1]) - q2 = np.asarray([2, 3, 4]) - fused_charges = fuse_charges([q1, q2], flows=[1, 1]) - assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) - fused_charges = fuse_charges([q1, q2], flows=[1, -1]) - assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) - - -def test_fuse_degeneracies(): - d1 = np.asarray([0, 1]) - d2 = np.asarray([2, 3, 4]) - fused_degeneracies = fuse_degeneracies(d1, d2) - np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2)) - - -# def test_index_fusion_mul(): -# D = 10 -# B = 4 -# dtype = np.int16 -# q1 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 1 -# q2 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 2 -# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 -# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - -# i12 = i1 * i2 -# assert i12.left_child is i1 -# assert i12.right_child is i2 -# assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - -# def test_fuse_index_pair(): -# D = 10 -# B = 4 -# dtype = np.int16 -# q1 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 1 -# q2 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 2 -# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 -# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - -# i12 = fuse_index_pair(i1, i2) -# assert i12.left_child is i1 -# assert i12.right_child is i2 -# assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - -# def test_fuse_indices(): -# D = 10 -# B = 4 -# dtype = np.int16 -# q1 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 1 -# q2 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 2 -# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 -# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - -# i12 = fuse_indices([i1, i2]) -# assert i12.left_child is i1 -# assert i12.right_child is i2 -# assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - -# def test_split_index(): -# D = 10 -# B = 4 -# dtype = np.int16 -# q1 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 1 -# q2 = np.random.randint(-B // 2, B // 2 + 1, -# D).astype(dtype) #quantum numbers on leg 2 -# i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 -# i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - -# i12 = i1 * i2 -# i1_, i2_ = split_index(i12) -# assert i1 is i1_ -# assert i2 is i2_ -# np.testing.assert_allclose(q1, i1.charges) -# np.testing.assert_allclose(q2, i2.charges) -# np.testing.assert_allclose(q1, i1_.charges) -# np.testing.assert_allclose(q2, i2_.charges) -# assert i1_.name == 'index1' -# assert i2_.name == 'index2' -# assert i1_.flow == i1.flow -# assert i2_.flow == i2.flow - -# def test_elementary_indices(): -# D = 10 -# B = 4 -# dtype = np.int16 -# q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# i1 = Index(charges=q1, flow=1, name='index1') -# i2 = Index(charges=q2, flow=1, name='index2') -# i3 = Index(charges=q3, flow=1, name='index3') -# i4 = Index(charges=q4, flow=1, name='index4') - -# i12 = i1 * i2 -# i34 = i3 * i4 -# elmt12 = i12.get_elementary_indices() -# assert elmt12[0] is i1 -# assert elmt12[1] is i2 - -# i1234 = i12 * i34 -# elmt1234 = i1234.get_elementary_indices() -# assert elmt1234[0] is i1 -# assert elmt1234[1] is i2 -# assert elmt1234[2] is i3 -# assert elmt1234[3] is i4 -# assert elmt1234[0].name == 'index1' -# assert elmt1234[1].name == 'index2' -# assert elmt1234[2].name == 'index3' -# assert elmt1234[3].name == 'index4' -# assert elmt1234[0].flow == i1.flow -# assert elmt1234[1].flow == i2.flow -# assert elmt1234[2].flow == i3.flow -# assert elmt1234[3].flow == i4.flow - -# np.testing.assert_allclose(q1, i1.charges) -# np.testing.assert_allclose(q2, i2.charges) -# np.testing.assert_allclose(q3, i3.charges) -# np.testing.assert_allclose(q4, i4.charges) - -# def test_leave(): -# D = 10 -# B = 4 -# dtype = np.int16 -# q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# i1 = Index(charges=q1, flow=1, name='index1') -# i2 = Index(charges=q2, flow=1, name='index2') -# assert i1.is_leave -# assert i2.is_leave - -# i12 = i1 * i2 -# assert not i12.is_leave - -# def test_copy(): -# D = 10 -# B = 4 -# dtype = np.int16 -# q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# i1 = Index(charges=q1, flow=1, name='index1') -# i2 = Index(charges=q2, flow=1, name='index2') -# i3 = Index(charges=q1, flow=-1, name='index3') -# i4 = Index(charges=q2, flow=-1, name='index4') - -# i12 = i1 * i2 -# i34 = i3 * i4 -# i1234 = i12 * i34 -# i1234_copy = i1234.copy() + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q3, flow=1, name='index3') + i4 = Index(charges=q4, flow=1, name='index4') + + i12 = i1 * i2 + i34 = i3 * i4 + elmt12 = i12.get_elementary_indices() + assert elmt12[0] is i1 + assert elmt12[1] is i2 + + i1234 = i12 * i34 + elmt1234 = i1234.get_elementary_indices() + assert elmt1234[0] is i1 + assert elmt1234[1] is i2 + assert elmt1234[2] is i3 + assert elmt1234[3] is i4 + assert elmt1234[0].name == 'index1' + assert elmt1234[1].name == 'index2' + assert elmt1234[2].name == 'index3' + assert elmt1234[3].name == 'index4' + assert elmt1234[0].flow == i1.flow + assert elmt1234[1].flow == i2.flow + assert elmt1234[2].flow == i3.flow + assert elmt1234[3].flow == i4.flow + + np.testing.assert_allclose(q1, i1.charges) + np.testing.assert_allclose(q2, i2.charges) + np.testing.assert_allclose(q3, i3.charges) + np.testing.assert_allclose(q4, i4.charges) + + +def test_leave(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + assert i1.is_leave + assert i2.is_leave -# elmt1234 = i1234_copy.get_elementary_indices() -# assert elmt1234[0] is not i1 -# assert elmt1234[1] is not i2 -# assert elmt1234[2] is not i3 -# assert elmt1234[3] is not i4 + i12 = i1 * i2 + assert not i12.is_leave -# def test_unfuse(): -# q1 = np.random.randint(-4, 5, 10).astype(np.int16) -# q2 = np.random.randint(-4, 5, 4).astype(np.int16) -# q3 = np.random.randint(-4, 5, 4).astype(np.int16) -# q12 = fuse_charges([q1, q2], [1, 1]) -# q123 = fuse_charges([q12, q3], [1, 1]) -# nz = np.nonzero(q123 == 0)[0] -# q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) -# q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) -# np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], -# np.zeros(len(q1_inds), dtype=np.int16)) +def test_copy(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q1, flow=-1, name='index3') + i4 = Index(charges=q2, flow=-1, name='index4') + + i12 = i1 * i2 + i34 = i3 * i4 + i1234 = i12 * i34 + i1234_copy = i1234.copy() + + elmt1234 = i1234_copy.get_elementary_indices() + assert elmt1234[0] is not i1 + assert elmt1234[1] is not i2 + assert elmt1234[2] is not i3 + assert elmt1234[3] is not i4 + + +def test_unfuse(): + q1 = np.random.randint(-4, 5, 10).astype(np.int16) + q2 = np.random.randint(-4, 5, 4).astype(np.int16) + q3 = np.random.randint(-4, 5, 4).astype(np.int16) + q12 = fuse_charges([q1, q2], [1, 1]) + q123 = fuse_charges([q12, q3], [1, 1]) + nz = np.nonzero(q123 == 0)[0] + q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) + + q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) + np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], + np.zeros(len(q1_inds), dtype=np.int16)) From 8416b42225f1f1a6f619d63fabb8a627fa935b56 Mon Sep 17 00:00:00 2001 From: "martin.ganahl@gmail.com" Date: Thu, 2 Jan 2020 14:32:51 -0500 Subject: [PATCH 097/212] typo --- tensornetwork/block_tensor/charge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index a51fb44b4..f935d2f37 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -180,7 +180,7 @@ def __matmul__(self, other: Union["U1ChargeMerged", "U1ChargeMerged"] ) -> "U1ChargeMerged": itemsize = np.sum(self._itemsizes + other._itemsizes) if itemsize > 8: - raise TypeError("number of bits required to store all charges " + raise TypeError("Number of bits required to store all charges " "in a single int is larger than 64") dtype = np.int16 #need at least np.int16 to store two charges if itemsize > 2: From 33df9227867686b0ee4081c25af604f1c4bcee6e Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 2 Jan 2020 14:39:39 -0500 Subject: [PATCH 098/212] undo typo --- tensornetwork/block_tensor/charge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index f935d2f37..a51fb44b4 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -180,7 +180,7 @@ def __matmul__(self, other: Union["U1ChargeMerged", "U1ChargeMerged"] ) -> "U1ChargeMerged": itemsize = np.sum(self._itemsizes + other._itemsizes) if itemsize > 8: - raise TypeError("Number of bits required to store all charges " + raise TypeError("number of bits required to store all charges " "in a single int is larger than 64") dtype = np.int16 #need at least np.int16 to store two charges if itemsize > 2: From 515870e3d1519107976996d12f6ae42b438e8bec Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 2 Jan 2020 14:41:07 -0500 Subject: [PATCH 099/212] test\ --- tensornetwork/block_tensor/charge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index a51fb44b4..f935d2f37 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -180,7 +180,7 @@ def __matmul__(self, other: Union["U1ChargeMerged", "U1ChargeMerged"] ) -> "U1ChargeMerged": itemsize = np.sum(self._itemsizes + other._itemsizes) if itemsize > 8: - raise TypeError("number of bits required to store all charges " + raise TypeError("Number of bits required to store all charges " "in a single int is larger than 64") dtype = np.int16 #need at least np.int16 to store two charges if itemsize > 2: From 841e59cefbd73a00f6212bcaeb0f8b289ebdf5f3 Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 2 Jan 2020 21:15:57 -0500 Subject: [PATCH 100/212] savety commit, starting to add multiple charges --- tensornetwork/block_tensor/charge.py | 240 +++++++++++----------- tensornetwork/block_tensor/charge_test.py | 180 ++++++++-------- 2 files changed, 204 insertions(+), 216 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index f935d2f37..1399e94ac 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -39,31 +39,22 @@ class BaseCharge: are not products of smaller groups) """ - def __init__(self, charges: np.ndarray) -> None: - if not isinstance(charges, np.ndarray): - raise TypeError("only np.ndarray allowed for argument `charges` " - "in BaseCharge.__init__(charges)") - - self.charges = np.asarray(charges) - def __add__(self, other: "BaseCharge") -> "BaseCharge": raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") - def __mul__(self, number: int) -> "BaseCharge": - raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") - - def __rmul__(self, number: int) -> "BaseCharge": - raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") - def __matmul__(self, other: "BaseCharge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") + def num_symmetries(self): + raise NotImplementedError( + "`num_symmetries` is not implemented for `BaseCharge`") + def __len__(self) -> int: - return len(self.charges) + raise NotImplementedError("`__len__` is not implemented for `BaseCharge`") def __repr__(self) -> str: - return self.charges.__repr__() + raise NotImplementedError("`__repr__` is not implemented for `BaseCharge`") @property def dual_charges(self) -> np.ndarray: @@ -76,9 +67,18 @@ def get_charges(self, dual: bool) -> np.ndarray: return self.charges -class U1ChargeMerged: +class U1Charge(BaseCharge): """ A simple charge class for a single U1 symmetry. + This class can store multiple U1 charges in a single + np.ndarray of integer dtype. Depending on the dtype of + the individual symmetries, this class can store: + * 8 np.int8 + * 4 np.int16 + * 2 np.int32 + * 1 np.int64 + or any suitable combination of dtypes, such that their + bite-sum remains below 64. """ def __init__(self, @@ -149,14 +149,17 @@ def __init__(self, def num_symmetries(self): return len(self.shifts) - def __add__(self, other: "U1ChargeMerged") -> "U1ChargeMerged": + def __len__(self): + return len(self._charges) + + def __add__(self, other: "U1Charge") -> "U1Charge": """ Fuse the charges of `self` with charges of `other`, and return a new `U1Charge` object holding the result. Args: - other: A `U1ChargeMerged` object. + other: A `U1Charge` object. Returns: - U1ChargeMerged: The result of fusing `self` with `other`. + U1Charge: The result of fusing `self` with `other`. """ if self.num_symmetries != other.num_symmetries: raise ValueError( @@ -169,15 +172,14 @@ def __add__(self, other: "U1ChargeMerged") -> "U1ChargeMerged": offsets = np.sum([self.offsets, other.offsets]) fused = np.reshape(self._charges[:, None] + other._charges[None, :], len(self._charges) * len(other._charges)) - return U1ChargeMerged(charges=[fused], offsets=offsets, shifts=self.shifts) + return U1Charge(charges=[fused], offsets=offsets, shifts=self.shifts) def __repr__(self): return 'U1-charge: \n' + 'shifts: ' + self.shifts.__repr__( ) + '\n' + 'offsets: ' + self.offsets.__repr__( ) + '\n' + 'charges: ' + self._charges.__repr__() - def __matmul__(self, other: Union["U1ChargeMerged", "U1ChargeMerged"] - ) -> "U1ChargeMerged": + def __matmul__(self, other: Union["U1Charge", "U1Charge"]) -> "U1Charge": itemsize = np.sum(self._itemsizes + other._itemsizes) if itemsize > 8: raise TypeError("Number of bits required to store all charges " @@ -196,12 +198,12 @@ def __matmul__(self, other: Union["U1ChargeMerged", "U1ChargeMerged"] self.offsets.astype(dtype), 8 * np.sum(other._itemsizes)) + other.offsets.astype(dtype) shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) - return U1ChargeMerged(charges=[charges], offsets=offsets, shifts=shifts) + return U1Charge(charges=[charges], offsets=offsets, shifts=shifts) @property def dual_charges(self) -> np.ndarray: #the dual of a U1 charge is its negative value - return (self._charges + self.offsets) * self._charges.dtype.type(-1) + return self.charges * self._charges.dtype.type(-1) @property def charges(self) -> np.ndarray: @@ -209,15 +211,10 @@ def charges(self) -> np.ndarray: return self._charges + self.offsets return self._charges - def get_charges(self, dual: bool) -> np.ndarray: - if dual: - return self.dual_charges - return self.charges - def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: if len(target_charges) != len(self.shifts): raise ValueError("len(target_charges) = {} is different " - "from len(U1ChargeMerged.shifts) = {}".format( + "from len(U1Charge.shifts) = {}".format( len(target_charges), len(self.shifts))) _target_charges = np.asarray(target_charges).astype(self._charges.dtype) target = np.sum([ @@ -227,96 +224,126 @@ def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: return np.nonzero(self.charges == target)[0] -class U1Charge(BaseCharge): +class Z2Charge(BaseCharge): """ - A simple charge class for a single U1 symmetry. + A simple charge class for Z2 symmetries. """ - def __init__(self, charges: np.ndarray) -> None: - super().__init__(charges) - - def __add__(self, other: "U1Charge") -> "U1Charge": - """ - Fuse the charges of `self` with charges of `other`, and - return a new `U1Charge` object holding the result. - Args: - other: A `U1Charge` object. - Returns: - U1Charge: The result of fusing `self` with `other`. - """ - fused = np.reshape(self.charges[:, None] + other.charges[None, :], - len(self.charges) * len(other.charges)) - - return U1Charge(charges=fused) - - def __mul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) + def __init__(self, + charges: List[np.ndarray], + shifts: Optional[np.ndarray] = None) -> None: + self._itemsizes = [c.dtype.itemsize for c in charges] + if np.sum(self._itemsizes) > 8: + raise TypeError("number of bits required to store all charges " + "in a single int is larger than 64") - def __rmul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) + if len(charges) > 1: + if shifts is not None: + raise ValueError("If `shifts` is passed, only a single charge array " + "can be passed. Got len(charges) = {}".format( + len(charges))) + if not np.all([i == 1 for i in self._itemsizes]): + # martin: This error could come back at us, but I'll leave it for now + raise ValueError("Z2 charges can be entirely stored in " + "np.int8, but found dtypes = {}".format( + [c.dtype for c in charges])) - def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": - c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) - if isinstance(other, U1Charge): - c2 = type(other).__new__( - type(other)) #create a new charge object of type type(other) - c2.__init__(other.charges.copy()) - return Charge([c1, c2]) - #`other` should be of type `Charge`. - return Charge([c1] + _copy_charges(other.charges)) + dtype = np.int8 + if np.sum(self._itemsizes) > 1: + dtype = np.int16 + if np.sum(self._itemsizes) > 2: + dtype = np.int32 + if np.sum(self._itemsizes) > 4: + dtype = np.int64 + #multiply by eight to get number of bits + self.shifts = 8 * np.flip( + np.append(0, np.cumsum(np.flip(self._itemsizes[1::])))).astype(dtype) + dtype_charges = [c.astype(dtype) for c in charges] + self._charges = np.sum([ + np.left_shift(dtype_charges[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + else: + if shifts is None: + shifts = np.asarray([0]).astype(charges[0].dtype) + self.shifts = shifts + self._charges = charges[0] @property - def dual_charges(self): - #the dual of a U1 charge is its negative value - return self.charges * self.charges.dtype.type(-1) - - -class Z2Charge(BaseCharge): - """ - A simple charge class for a single Z2 symmetry. - """ + def num_symmetries(self): + return len(self.shifts) - def __init__(self, charges: np.ndarray) -> None: - if charges.dtype is not np.dtype(np.bool): - raise TypeError("Z2 charges have to be boolian") - super().__init__(charges) + def __len__(self): + return len(self._charges) - def __add__(self, other: "U1Charge") -> "U1Charge": + def __add__(self, other: "Z2Charge") -> "Z2Charge": """ Fuse the charges of `self` with charges of `other`, and - return a new `U1Charge` object holding the result. + return a new `Z2Charge` object holding the result. Args: - other: A `U1Charge` object. + other: A `Z2Charge` object. Returns: - U1Charge: The result of fusing `self` with `other`. + Z2Charge: The result of fusing `self` with `other`. """ + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse Z2-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + fused = np.reshape( - np.logical_xor(self.charges[:, None], other.charges[None, :]), + np.bitwise_xor(self.charges[:, None], other.charges[None, :]), len(self.charges) * len(other.charges)) - return U1Charge(charges=fused) + return Z2Charge(charges=[fused], shifts=self.shifts) - def __mul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) + def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": + itemsize = np.sum(self._itemsizes + other._itemsizes) + if itemsize > 8: + raise TypeError("Number of bits required to store all charges " + "in a single int is larger than 64") + dtype = np.int16 #need at least np.int16 to store two charges + if itemsize > 2: + dtype = np.int32 + if itemsize > 4: + dtype = np.int64 - def __rmul__(self, number: int) -> "U1Charge": - return U1Charge(charges=self.charges * number) + charges = np.left_shift( + self.charges.astype(dtype), + 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) - def __matmul__(self, other: Union["U1Charge", "Charge"]) -> "Charge": - c1 = U1Charge(self.charges.copy()) #make a copy of the charges (np.ndarray) - if isinstance(other, U1Charge): - c2 = type(other).__new__( - type(other)) #create a new charge object of type type(other) - c2.__init__(other.charges.copy()) - return Charge([c1, c2]) - #`other` should be of type `Charge`. - return Charge([c1] + _copy_charges(other.charges)) + shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) + return Z2Charge(charges=[charges], shifts=shifts) @property def dual_charges(self): #Z2 charges are self-dual return self.charges + @property + def charges(self) -> np.ndarray: + return self._charges + + def __repr__(self): + return 'Z2-charge: \n' + 'shifts: ' + self.shifts.__repr__( + ) + '\n' + 'charges: ' + self._charges.__repr__() + + def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: + if len(target_charges) != len(self.shifts): + raise ValueError("len(target_charges) = {} is different " + "from len(U1Charge.shifts) = {}".format( + len(target_charges), len(self.shifts))) + + if not np.all(np.isin(target_charges, np.asarray([0, 1]))): + raise ValueError("Z2-charges can only be 0 or 1, found {}".format( + np.unique(target_charges))) + _target_charges = np.asarray(target_charges).astype(self._charges.dtype) + target = np.sum([ + np.left_shift(_target_charges[n], self.shifts[n]) + for n in range(len(self.shifts)) + ]) + return np.nonzero(self.charges == target)[0] + class Charge: @@ -345,31 +372,6 @@ def __add__(self, other: "Charge") -> "Charge": """ return Charge([c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) - def __matmul__(self, other: Union["Charge", BaseCharge]) -> "Charge": - """ - Product of `self` with `other` (group product). - Args: - other: A `BaseCharge` or `Charge` object. - Returns: - Charge: The resulting charge of the product of `self` with `other`. - - """ - if isinstance(other, BaseCharge): - c = type(other).__new__( - type(other)) #create a new charge object of type type(other) - c.__init__(other.charges.copy()) - return Charge(self.charges + [c]) - elif isinstance(other, Charge): - return Charge(_copy_charges(self.charges) + _copy_charges(other.charges)) - - raise TypeError("datatype not understood") - - def __mul__(self, number: int) -> "Charge": - return Charge(charges=[c * number for c in self.charges]) - - def __rmul__(self, number: int) -> "Charge": - return Charge(charges=[c * number for c in self.charges]) - def __len__(self): return len(self.charges[0]) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index 680ba5205..f3aaf915b 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -1,36 +1,10 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.charge import U1Charge, Charge, BaseCharge, U1ChargeMerged +from tensornetwork.block_tensor.charge import Charge, BaseCharge, U1Charge, Z2Charge from tensornetwork.block_tensor.index import fuse_charges, fuse_degeneracies, fuse_charge_pair -def test_U1Charge_dual(): - q = U1Charge(np.asarray([-1, 0, 1])) - assert np.all(q.dual_charges == np.asarray([1, 0, -1])) - - -def test_U1Charge_get_charges(): - q = U1Charge(np.asarray([-1, 0, 1])) - assert np.all(q.get_charges(dual=False) == np.asarray([-1, 0, 1])) - assert np.all(q.get_charges(dual=True) == np.asarray([1, 0, -1])) - - -def test_U1Charge_mul(): - q = U1Charge(np.asarray([0, 1])) - q2 = 2 * q - q3 = q * 2 - assert np.all(q2.charges == np.asarray([0, 2])) - assert np.all(q3.charges == np.asarray([0, 2])) - - -def test_U1Charge_add(): - q1 = U1Charge(np.asarray([0, 1])) - q2 = U1Charge(np.asarray([2, 3, 4])) - fused_charges = q1 + q2 - assert np.all(fused_charges.charges == np.asarray([2, 3, 4, 3, 4, 5])) - - def test_fuse_charge_pair(): q1 = np.asarray([0, 1]) q2 = np.asarray([2, 3, 4]) @@ -40,62 +14,19 @@ def test_fuse_charge_pair(): assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) -def test_Charge_mul(): - q = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) - expected = [np.asarray([0, 2]), np.asarray([-4, 6])] - q2 = 2 * q - q3 = q * 2 - for n in range(len(q.charges)): - np.testing.assert_allclose(expected[n], q2.charges[n].charges) - np.testing.assert_allclose(expected[n], q3.charges[n].charges) - - def test_Charge_add(): - q1 = Charge([U1Charge(np.asarray([0, 1])), U1Charge(np.asarray([-2, 3]))]) - q2 = Charge([U1Charge(np.asarray([2, 3])), U1Charge(np.asarray([-1, 4]))]) + q1 = Charge([U1Charge([np.asarray([0, 1])]), U1Charge([np.asarray([-2, 3])])]) + q2 = Charge([U1Charge([np.asarray([2, 3])]), U1Charge([np.asarray([-1, 4])])]) expected = [np.asarray([2, 3, 3, 4]), np.asarray([-3, 2, 2, 7])] q12 = q1 + q2 for n in range(len(q12.charges)): np.testing.assert_allclose(expected[n], q12.charges[n].charges) -def test_Charge_product(): - expected = [np.asarray([0, 1]), np.asarray([2, 3])] - q1 = U1Charge(expected[0]) - q2 = U1Charge(expected[1]) - prod = q1 @ q2 - for n in range(len(prod.charges)): - np.testing.assert_allclose(prod.charges[n].charges, expected[n]) - - B = 4 - dtype = np.int16 - D = 10 - Q1 = Charge(charges=[ - U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - for _ in range(2) - ]) - Q2 = Charge(charges=[ - U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - for _ in range(2) - ]) - prod = Q1 @ Q2 - expected = Q1.charges + Q2.charges - for n in range(len(prod.charges)): - np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) - assert isinstance(prod.charges[n], BaseCharge) - - q1 = U1Charge(charges=np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - prod = q1 @ Q2 - expected = [q1] + Q2.charges - for n in range(len(prod.charges)): - np.testing.assert_allclose(prod.charges[n].charges, expected[n].charges) - assert isinstance(prod.charges[n], BaseCharge) - - def test_Charge_get_charges(): q = Charge( - [U1Charge(np.asarray([-1, 0, 1])), - U1Charge(np.asarray([-2, 0, 3]))]) + [U1Charge([np.asarray([-1, 0, 1])]), + U1Charge([np.asarray([-2, 0, 3])])]) expected = [np.asarray([-1, 0, 1]), np.asarray([-2, 0, 3])] actual = q.get_charges(dual=False) for n in range(len(actual)): @@ -123,7 +54,7 @@ def test_fuse_degeneracies(): np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2)) -def test_U1ChargeMerged_charges(): +def test_U1Charge_charges(): D = 100 B = 6 charges = [ @@ -139,11 +70,11 @@ def test_U1ChargeMerged_charges(): merged_charges = np.left_shift(pos_charges[0], 16) + pos_charges[1] merged_offsets = np.left_shift(offsets[0], 16) + offsets[1] - q1 = U1ChargeMerged(charges) + q1 = U1Charge(charges) assert np.all(q1.charges == merged_charges + merged_offsets) -def test_U1ChargeMerged_dual(): +def test_U1Charge_dual(): D = 100 B = 6 charges = [ @@ -159,52 +90,52 @@ def test_U1ChargeMerged_dual(): merged_charges = np.left_shift(pos_charges[0], 16) + pos_charges[1] merged_offsets = np.left_shift(offsets[0], 16) + offsets[1] - q1 = U1ChargeMerged(charges) + q1 = U1Charge(charges) assert np.all(q1.dual_charges == -(merged_charges + merged_offsets)) -def test_U1ChargeMerged_get_charges(): +def test_U1Charge_get_charges(): D = 100 B = 6 charges = [ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) for _ in range(2) ] - q1 = U1ChargeMerged(charges) + q1 = U1Charge(charges) assert np.all(q1.get_charges(False) == q1.charges) assert np.all(q1.get_charges(True) == q1.dual_charges) -def test_U1ChargeMerged_raises(): +def test_U1Charge_raises(): D = 100 B = 6 with pytest.raises(TypeError): - q1 = U1ChargeMerged([ + q1 = U1Charge([ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) for _ in range(2) ]) with pytest.raises(ValueError): - q1 = U1ChargeMerged([ + q1 = U1Charge([ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) for _ in range(2) ], - offsets=[-5, -6]) + offsets=[-5, -6]) with pytest.raises(ValueError): - q1 = U1ChargeMerged([ + q1 = U1Charge([ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) for _ in range(2) ], - shifts=[16, 0]) + shifts=[16, 0]) with pytest.raises(ValueError): - q1 = U1ChargeMerged([ + q1 = U1Charge([ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) for _ in range(2) ], - offsets=[-5, -6], - shifts=[16, 0]) + offsets=[-5, -6], + shifts=[16, 0]) -def test_U1ChargeMerged_fusion(): +def test_U1Charge_fusion(): D = 1000 B = 6 O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) @@ -221,8 +152,8 @@ def test_U1ChargeMerged_fusion(): fused_1 = fuse_charges(charges_1, [1, 1]) fused_2 = fuse_charges(charges_2, [1, 1]) fused_3 = fuse_charges(charges_3, [1, 1]) - q1 = U1ChargeMerged([O1, P1, Q1]) - q2 = U1ChargeMerged([O2, P2, Q2]) + q1 = U1Charge([O1, P1, Q1]) + q2 = U1Charge([O2, P2, Q2]) target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 + q2 @@ -235,20 +166,75 @@ def test_U1ChargeMerged_fusion(): assert np.all(nz_1 == nz_2) -def test_U1ChargeMerged_matmul(): +def test_U1Charge_matmul(): D = 1000 B = 5 C1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) C3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q1 = U1ChargeMerged([C1]) - q2 = U1ChargeMerged([C2]) - q3 = U1ChargeMerged([C3]) + q1 = U1Charge([C1]) + q2 = U1Charge([C2]) + q3 = U1Charge([C3]) Q = q1 @ q2 @ q3 - Q_ = U1ChargeMerged([C1, C2, C3]) + Q_ = U1Charge([C1, C2, C3]) assert np.all(Q.charges == Q_.charges) assert np.all(Q._charges == Q_._charges) assert Q.offsets == Q_.offsets assert np.all(Q.shifts == Q_.shifts) + + +def test_Z2Charge_fusion(): + D = 1000 + B = 6 + O1 = np.random.randint(0, 2, D).astype(np.int8) + O2 = np.random.randint(0, 2, D).astype(np.int8) + P1 = np.random.randint(0, 2, D).astype(np.int8) + P2 = np.random.randint(0, 2, D).astype(np.int8) + Q1 = np.random.randint(0, 2, D).astype(np.int8) + Q2 = np.random.randint(0, 2, D).astype(np.int8) + + charges_1 = [O1, O2] + charges_2 = [P1, P2] + charges_3 = [Q1, Q2] + + def fuse_z2_charges(c1, c2): + return np.reshape( + np.bitwise_xor(c1[:, None], c2[None, :]), + len(c1) * len(c2)) + + fused_1 = fuse_z2_charges(*charges_1) + fused_2 = fuse_z2_charges(*charges_2) + fused_3 = fuse_z2_charges(*charges_3) + + q1 = Z2Charge([O1, P1, Q1]) + q2 = Z2Charge([O2, P2, Q2]) + + target = np.random.randint(0, 2, 3) + q12 = q1 + q2 + + nz_1 = q12.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + assert np.all(nz_1 == nz_2) + + +def test_U1Charge_matmul(): + D = 1000 + B = 5 + C1 = np.random.randint(0, 2, D).astype(np.int8) + C2 = np.random.randint(0, 2, D).astype(np.int8) + C3 = np.random.randint(0, 2, D).astype(np.int8) + + q1 = Z2Charge([C1]) + q2 = Z2Charge([C2]) + q3 = Z2Charge([C3]) + + Q = q1 @ q2 @ q3 + Q_ = Z2Charge([C1, C2, C3]) + assert np.all(Q.charges == Q_.charges) + assert np.all(Q._charges == Q_._charges) + assert np.all(Q.shifts == Q_.shifts) From 0e74d4947f57a0a186fa4062b4cb72b7b1fb6372 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 15:10:24 -0500 Subject: [PATCH 101/212] Charge -> ChargeCollection --- tensornetwork/block_tensor/index.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 10648ffc8..5dd641e5d 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -17,7 +17,7 @@ from __future__ import print_function import numpy as np from tensornetwork.network_components import Node, contract, contract_between -from tensornetwork.block_tensor.charge import BaseCharge, Charge +from tensornetwork.block_tensor.charge import BaseCharge, ChargeCollection # pylint: disable=line-too-long from tensornetwork.backends import backend_factory import copy @@ -63,7 +63,7 @@ def fuse_charges(charges: List[Union[List, np.ndarray]], """ if len(charges) == 1: #nothing to do - return charges[0] + return flows[0] * charges[0] fused_charges = charges[0] * flows[0] for n in range(1, len(charges)): fused_charges = fuse_charge_pair( @@ -229,10 +229,9 @@ def __mul__(self, index: "Index") -> "Index": def charges(self): if self.is_leave: return self._charges - fused_charges = fuse_charge_pair(self.left_child.charges, - self.left_child.flow, - self.right_child.charges, - self.right_child.flow) + fused_charges = fuse_charge_pair( + self.left_child.charges, self.left_child.flow, self.right_child.charges, + self.right_child.flow) return fused_charges @@ -253,14 +252,14 @@ class IndexNew: """ def __init__(self, - charges: Union[Charge, BaseCharge], + charges: Union[ChargeCollection, BaseCharge], flow: int, name: Optional[Text] = None, left_child: Optional["Index"] = None, right_child: Optional["Index"] = None): if isinstance(charges, BaseCharge): - self._charges = Charge([charges]) - elif isinstance(charges, Charge): + self._charges = ChargeCollection([charges]) + elif isinstance(charges, ChargeCollection): self._charges = charges self.flow = flow self.left_child = left_child From 0c928eeec5ecd09aa946e59c9c04e5ef027d86b8 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 15:11:20 -0500 Subject: [PATCH 102/212] removed offsets from U1Charge (unnecessary), Charge -> ChargeCollection --- tensornetwork/block_tensor/charge.py | 369 ++++++++++++++++----------- 1 file changed, 225 insertions(+), 144 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 1399e94ac..15872b41b 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -39,19 +39,59 @@ class BaseCharge: are not products of smaller groups) """ + def __init__(self, + charges: List[np.ndarray], + shifts: Optional[np.ndarray] = None) -> None: + self._itemsizes = [c.dtype.itemsize for c in charges] + if np.sum(self._itemsizes) > 8: + raise TypeError("number of bits required to store all charges " + "in a single int is larger than 64") + + if len(charges) > 1: + if shifts is not None: + raise ValueError("If `shifts` is passed, only a single charge array " + "can be passed. Got len(charges) = {}".format( + len(charges))) + + if len(charges) > 1: + dtype = np.int8 + if np.sum(self._itemsizes) > 1: + dtype = np.int16 + if np.sum(self._itemsizes) > 2: + dtype = np.int32 + if np.sum(self._itemsizes) > 4: + dtype = np.int64 + #multiply by eight to get number of bits + self.shifts = 8 * np.flip( + np.append(0, np.cumsum(np.flip(self._itemsizes[1::])))).astype(dtype) + dtype_charges = [c.astype(dtype) for c in charges] + self.charges = np.sum([ + np.left_shift(dtype_charges[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + else: + if shifts is None: + shifts = np.asarray([0]).astype(charges[0].dtype) + self.shifts = shifts + self.charges = charges[0] + def __add__(self, other: "BaseCharge") -> "BaseCharge": raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") + def __sub__(self, other: "BaseCharge") -> "BaseCharge": + raise NotImplementedError("`__sub__` is not implemented for `BaseCharge`") + def __matmul__(self, other: "BaseCharge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") + @property def num_symmetries(self): - raise NotImplementedError( - "`num_symmetries` is not implemented for `BaseCharge`") + return len(self.shifts) def __len__(self) -> int: - raise NotImplementedError("`__len__` is not implemented for `BaseCharge`") + return len(self.charges) def __repr__(self) -> str: raise NotImplementedError("`__repr__` is not implemented for `BaseCharge`") @@ -61,10 +101,15 @@ def dual_charges(self) -> np.ndarray: raise NotImplementedError( "`dual_charges` is not implemented for `BaseCharge`") - def get_charges(self, dual: bool) -> np.ndarray: - if dual: - return self.dual_charges - return self.charges + def __mul__(self, number: Union[bool, int]) -> "BaseCharge": + raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") + + def __rmul__(self, number: Union[bool, int]) -> "BaseCharge": + raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") + + @property + def dtype(self): + return self.charges.dtype class U1Charge(BaseCharge): @@ -83,74 +128,8 @@ class U1Charge(BaseCharge): def __init__(self, charges: List[np.ndarray], - offsets: Optional[np.ndarray] = None, shifts: Optional[np.ndarray] = None) -> None: - if len(charges) > 1: - if offsets is not None: - raise ValueError("If `offsets` is passed, only a single charge array " - "can be passed. Got len(charges) = {}".format( - len(charges))) - if shifts is not None: - raise ValueError("If `shifts` is passed, only a single charge array " - "can be passed. Got len(charges) = {}".format( - len(charges))) - - self._itemsizes = [c.dtype.itemsize for c in charges] - if np.sum(self._itemsizes) > 8: - raise TypeError("number of bits required to store all charges " - "in a single int is larger than 64") - - if len(charges) > 1: - dtype = np.int8 - if np.sum(self._itemsizes) > 1: - dtype = np.int16 - if np.sum(self._itemsizes) > 2: - dtype = np.int32 - if np.sum(self._itemsizes) > 4: - dtype = np.int64 - #multiply by eight to get number of bits - self.shifts = 8 * np.flip( - np.append(0, np.cumsum(np.flip(self._itemsizes[1::])))).astype(dtype) - dtype_charges = [c.astype(dtype) for c in charges] - offsets = [ - np.min([0, np.min(dtype_charges[n])]) - for n in range(len(dtype_charges)) - ] - pos_charges = [ - dtype_charges[n] - offsets[n] for n in range(len(dtype_charges)) - ] - self.offsets = np.sum([ - np.left_shift(offsets[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) - self._charges = np.sum([ - np.left_shift(pos_charges[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) - else: - if shifts is None: - shifts = np.asarray([0]).astype(charges[0].dtype) - self.shifts = shifts - if offsets is None: - self.offsets = np.min([0, np.min(charges[0])]) - self._charges = charges[0] - self.offsets - else: - #we assume that `charges` are in this case already - #positive - self.offsets = offsets - if np.min(charges[0]) < 0: - raise ValueError("Expected all charges to be >= 0, " - "but found negative charges") - self._charges = charges[0] - - @property - def num_symmetries(self): - return len(self.shifts) - - def __len__(self): - return len(self._charges) + super().__init__(charges=charges, shifts=shifts) def __add__(self, other: "U1Charge") -> "U1Charge": """ @@ -169,15 +148,43 @@ def __add__(self, other: "U1Charge") -> "U1Charge": raise ValueError( "Cannot fuse U1-charges with different shifts {} and {}".format( self.shifts, other.shifts)) - offsets = np.sum([self.offsets, other.offsets]) - fused = np.reshape(self._charges[:, None] + other._charges[None, :], - len(self._charges) * len(other._charges)) - return U1Charge(charges=[fused], offsets=offsets, shifts=self.shifts) + if not isinstance(other, U1Charge): + raise TypeError( + "can only add objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + fused = np.reshape(self.charges[:, None] + other.charges[None, :], + len(self.charges) * len(other.charges)) + return U1Charge(charges=[fused], shifts=self.shifts) + + def __sub__(self, other: "U1Charge") -> "U1Charge": + """ + Subtract the charges of `other` from charges of `self` and + return a new `U1Charge` object holding the result. + Args: + other: A `U1Charge` object. + Returns: + U1Charge: The result of fusing `self` with `other`. + """ + if self.num_symmetries != other.num_symmetries: + raise ValueError( + "cannot fuse charges with different number of symmetries") + + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse U1-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, U1Charge): + raise TypeError( + "can only subtract objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + + fused = np.reshape(self.charges[:, None] - other.charges[None, :], + len(self.charges) * len(other.charges)) + return U1Charge(charges=[fused], shifts=self.shifts) def __repr__(self): return 'U1-charge: \n' + 'shifts: ' + self.shifts.__repr__( - ) + '\n' + 'offsets: ' + self.offsets.__repr__( - ) + '\n' + 'charges: ' + self._charges.__repr__() + ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' def __matmul__(self, other: Union["U1Charge", "U1Charge"]) -> "U1Charge": itemsize = np.sum(self._itemsizes + other._itemsizes) @@ -191,32 +198,45 @@ def __matmul__(self, other: Union["U1Charge", "U1Charge"]) -> "U1Charge": dtype = np.int64 charges = np.left_shift( - self._charges.astype(dtype), - 8 * np.sum(other._itemsizes)) + other._charges.astype(dtype) + self.charges.astype(dtype), + 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) - offsets = np.left_shift( - self.offsets.astype(dtype), - 8 * np.sum(other._itemsizes)) + other.offsets.astype(dtype) shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) - return U1Charge(charges=[charges], offsets=offsets, shifts=shifts) + return U1Charge(charges=[charges], shifts=shifts) + + def __mul__(self, number: Union[bool, int]) -> "U1Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + #outflowing charges + if number in (0, False, -1): + charges = self.dtype.type(-1) * self.charges + shifts = self.shifts + return U1Charge(charges=[charges], shifts=shifts) + #inflowing charges + if number in (1, True): + #Note: the returned U1Charge shares its data with self + return U1Charge(charges=[self.charges], shifts=self.shifts) + + def __rmul__(self, number: Union[bool, int]) -> "U1Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + return self.__mul__(number) @property def dual_charges(self) -> np.ndarray: #the dual of a U1 charge is its negative value - return self.charges * self._charges.dtype.type(-1) - - @property - def charges(self) -> np.ndarray: - if self.offsets != 0: - return self._charges + self.offsets - return self._charges + return self.charges * self.dtype.type(-1) def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: if len(target_charges) != len(self.shifts): raise ValueError("len(target_charges) = {} is different " "from len(U1Charge.shifts) = {}".format( len(target_charges), len(self.shifts))) - _target_charges = np.asarray(target_charges).astype(self._charges.dtype) + _target_charges = np.asarray(target_charges).astype(self.charges.dtype) target = np.sum([ np.left_shift(_target_charges[n], self.shifts[n]) for n in range(len(self.shifts)) @@ -232,50 +252,15 @@ class Z2Charge(BaseCharge): def __init__(self, charges: List[np.ndarray], shifts: Optional[np.ndarray] = None) -> None: - self._itemsizes = [c.dtype.itemsize for c in charges] - if np.sum(self._itemsizes) > 8: - raise TypeError("number of bits required to store all charges " - "in a single int is larger than 64") - if len(charges) > 1: - if shifts is not None: - raise ValueError("If `shifts` is passed, only a single charge array " - "can be passed. Got len(charges) = {}".format( - len(charges))) - if not np.all([i == 1 for i in self._itemsizes]): + itemsizes = [c.dtype.itemsize for c in charges] + if not np.all([i == 1 for i in itemsizes]): # martin: This error could come back at us, but I'll leave it for now raise ValueError("Z2 charges can be entirely stored in " "np.int8, but found dtypes = {}".format( [c.dtype for c in charges])) - dtype = np.int8 - if np.sum(self._itemsizes) > 1: - dtype = np.int16 - if np.sum(self._itemsizes) > 2: - dtype = np.int32 - if np.sum(self._itemsizes) > 4: - dtype = np.int64 - #multiply by eight to get number of bits - self.shifts = 8 * np.flip( - np.append(0, np.cumsum(np.flip(self._itemsizes[1::])))).astype(dtype) - dtype_charges = [c.astype(dtype) for c in charges] - self._charges = np.sum([ - np.left_shift(dtype_charges[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) - else: - if shifts is None: - shifts = np.asarray([0]).astype(charges[0].dtype) - self.shifts = shifts - self._charges = charges[0] - - @property - def num_symmetries(self): - return len(self.shifts) - - def __len__(self): - return len(self._charges) + super().__init__(charges, shifts) def __add__(self, other: "Z2Charge") -> "Z2Charge": """ @@ -290,6 +275,10 @@ def __add__(self, other: "Z2Charge") -> "Z2Charge": raise ValueError( "Cannot fuse Z2-charges with different shifts {} and {}".format( self.shifts, other.shifts)) + if not isinstance(other, Z2Charge): + raise TypeError( + "can only add objects of identical types, found {} and {} instead" + .format(type(self), type(other))) fused = np.reshape( np.bitwise_xor(self.charges[:, None], other.charges[None, :]), @@ -297,6 +286,27 @@ def __add__(self, other: "Z2Charge") -> "Z2Charge": return Z2Charge(charges=[fused], shifts=self.shifts) + def __sub__(self, other: "Z2Charge") -> "Z2Charge": + """ + Subtract charges of `other` from charges of `self` and + return a new `Z2Charge` object holding the result. + Note that ofr Z2 charges, subtraction and addition are identical + Args: + other: A `Z2Charge` object. + Returns: + Z2Charge: The result of fusing `self` with `other`. + """ + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse Z2-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, Z2Charge): + raise TypeError( + "can only subtract objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + + return self.__add__(other) + def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": itemsize = np.sum(self._itemsizes + other._itemsizes) if itemsize > 8: @@ -315,18 +325,30 @@ def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) return Z2Charge(charges=[charges], shifts=shifts) + def __mul__(self, number: Union[bool, int]) -> "Z2Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + #Z2 is self-dual + return U1Charge(charges=[self.charges], shifts=self.shifts) + + def __rmul__(self, number: Union[bool, int]) -> "Z2Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + + return self.__mul__(number) + @property def dual_charges(self): #Z2 charges are self-dual return self.charges - @property - def charges(self) -> np.ndarray: - return self._charges - def __repr__(self): return 'Z2-charge: \n' + 'shifts: ' + self.shifts.__repr__( - ) + '\n' + 'charges: ' + self._charges.__repr__() + ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: if len(target_charges) != len(self.shifts): @@ -337,7 +359,7 @@ def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: if not np.all(np.isin(target_charges, np.asarray([0, 1]))): raise ValueError("Z2-charges can only be 0 or 1, found {}".format( np.unique(target_charges))) - _target_charges = np.asarray(target_charges).astype(self._charges.dtype) + _target_charges = np.asarray(target_charges).astype(self.charges.dtype) target = np.sum([ np.left_shift(_target_charges[n], self.shifts[n]) for n in range(len(self.shifts)) @@ -345,9 +367,12 @@ def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: return np.nonzero(self.charges == target)[0] -class Charge: +class ChargeCollection: + """ - def __init__(self, charges: List[Union[np.ndarray, BaseCharge]]) -> None: + """ + + def __init__(self, charges: List[BaseCharge]) -> None: if not isinstance(charges, list): raise TypeError("only list allowed for argument `charges` " "in BaseCharge.__init__(charges)") @@ -358,10 +383,16 @@ def __init__(self, charges: List[Union[np.ndarray, BaseCharge]]) -> None: if not isinstance(charges[n], BaseCharge): raise TypeError( "`Charge` can only be initialized with a list of `BaseCharge`. Found {} instead" - .format(type(charges[n]))) + .format([type(charges[n]) for n in range(len(charges))])) self.charges = charges + def __getitem__(self, n: int) -> BaseCharge: + return self.charges[n] + + def __setitem__(self, n: int, val: BaseCharge) -> None: + self.charges[n] = val + def __add__(self, other: "Charge") -> "Charge": """ Fuse `self` with `other`. @@ -370,7 +401,19 @@ def __add__(self, other: "Charge") -> "Charge": Returns: Charge: The result of fusing `self` with `other`. """ - return Charge([c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) + return ChargeCollection( + [c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) + + def __sub__(self, other: "Charge") -> "Charge": + """ + Subtract `other` from `self`. + Args: + other: A `Charge` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + return ChargeCollection( + [c1 - c2 for c1, c2 in zip(self.charges, other.charges)]) def __len__(self): return len(self.charges[0]) @@ -378,5 +421,43 @@ def __len__(self): def __repr__(self): return self.charges.__repr__() - def get_charges(self, dual: bool) -> List[np.ndarray]: - return [c.get_charges(dual) for c in self.charges] + def __mul__(self, number: Union[bool, int]) -> "Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + return ChargeCollection(charges=[c * number for c in self.charges]) + + def __rmul__(self, number: Union[bool, int]) -> "Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + + return self.__mul__(number) + + @property + def num_symmetries(self): + return np.sum([c.num_symmetries for c in self.charges]) + + +def fuse_charges(charges: List[ChargeCollection], + flows: List[Union[bool, int]]) -> ChargeCollection: + """ + Fuse all `charges` by simple addition (valid + for U(1) charges). Charges are fused from "right to left", + in accordance with row-major order (see `fuse_charges_pair`). + + Args: + chargs: A list of charges to be fused. + flows: A list of flows, one for each element in `charges`. + Returns: + np.ndarray: The result of fusing `charges`. + """ + if len(charges) == 1: + #nothing to do + return flows[0] * charges[0] + fused_charges = charges[0] * flows[0] + for n in range(1, len(charges)): + fused_charges = fused_charges + flows[n] * charges[n] + return fused_charges From fe1bd47dd4dc733211d1b1fc89bf0024d17f6dd1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 15:11:27 -0500 Subject: [PATCH 103/212] new tests --- tensornetwork/block_tensor/charge_test.py | 494 ++++++++++++++++------ 1 file changed, 369 insertions(+), 125 deletions(-) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index f3aaf915b..3c0d6d0fc 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -1,7 +1,7 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.charge import Charge, BaseCharge, U1Charge, Z2Charge +from tensornetwork.block_tensor.charge import ChargeCollection, BaseCharge, U1Charge, Z2Charge from tensornetwork.block_tensor.index import fuse_charges, fuse_degeneracies, fuse_charge_pair @@ -14,30 +14,6 @@ def test_fuse_charge_pair(): assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) -def test_Charge_add(): - q1 = Charge([U1Charge([np.asarray([0, 1])]), U1Charge([np.asarray([-2, 3])])]) - q2 = Charge([U1Charge([np.asarray([2, 3])]), U1Charge([np.asarray([-1, 4])])]) - expected = [np.asarray([2, 3, 3, 4]), np.asarray([-3, 2, 2, 7])] - q12 = q1 + q2 - for n in range(len(q12.charges)): - np.testing.assert_allclose(expected[n], q12.charges[n].charges) - - -def test_Charge_get_charges(): - q = Charge( - [U1Charge([np.asarray([-1, 0, 1])]), - U1Charge([np.asarray([-2, 0, 3])])]) - expected = [np.asarray([-1, 0, 1]), np.asarray([-2, 0, 3])] - actual = q.get_charges(dual=False) - for n in range(len(actual)): - np.testing.assert_allclose(expected[n], actual[n]) - - expected = [np.asarray([1, 0, -1]), np.asarray([2, 0, -3])] - actual = q.get_charges(dual=True) - for n in range(len(actual)): - np.testing.assert_allclose(expected[n], actual[n]) - - def test_fuse_charges(): q1 = np.asarray([0, 1]) q2 = np.asarray([2, 3, 4]) @@ -62,16 +38,11 @@ def test_U1Charge_charges(): for _ in range(2) ] - offsets = [np.min([0, np.min(c)]) for c in charges] - pos_charges = [ - charges[n].astype(np.int32) - offsets[n].astype(np.int32) - for n in range(2) - ] - merged_charges = np.left_shift(pos_charges[0], 16) + pos_charges[1] - merged_offsets = np.left_shift(offsets[0], 16) + offsets[1] + merged_charges = np.left_shift(charges[0].astype(np.int64), + 16) + charges[1].astype(np.int64) q1 = U1Charge(charges) - assert np.all(q1.charges == merged_charges + merged_offsets) + assert np.all(q1.charges == merged_charges) def test_U1Charge_dual(): @@ -81,88 +52,225 @@ def test_U1Charge_dual(): np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) for _ in range(2) ] - - offsets = [np.min([0, np.min(c)]) for c in charges] - pos_charges = [ - charges[n].astype(np.int32) - offsets[n].astype(np.int32) - for n in range(2) - ] - merged_charges = np.left_shift(pos_charges[0], 16) + pos_charges[1] - merged_offsets = np.left_shift(offsets[0], 16) + offsets[1] + merged_charges = np.left_shift(charges[0].astype(np.int64), + 16) + charges[1].astype(np.int64) q1 = U1Charge(charges) - assert np.all(q1.dual_charges == -(merged_charges + merged_offsets)) + assert np.all(q1.dual_charges == -merged_charges) -def test_U1Charge_get_charges(): - D = 100 - B = 6 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - q1 = U1Charge(charges) - assert np.all(q1.get_charges(False) == q1.charges) - assert np.all(q1.get_charges(True) == q1.dual_charges) - - -def test_U1Charge_raises(): +def test_BaseCharge_raises(): D = 100 B = 6 with pytest.raises(TypeError): - q1 = U1Charge([ + q1 = BaseCharge([ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) for _ in range(2) ]) with pytest.raises(ValueError): q1 = U1Charge([ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) for _ in range(2) ], - offsets=[-5, -6]) - with pytest.raises(ValueError): - q1 = U1Charge([ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) - for _ in range(2) - ], - shifts=[16, 0]) - with pytest.raises(ValueError): - q1 = U1Charge([ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) - for _ in range(2) - ], - offsets=[-5, -6], shifts=[16, 0]) def test_U1Charge_fusion(): - D = 1000 - B = 6 - O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - P1 = np.random.randint(0, B + 1, D).astype(np.int16) - P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - Q1 = np.random.randint(1, B + 1, D).astype(np.int8) - Q2 = np.random.randint(1, B + 1, D).astype(np.int8) - - charges_1 = [O1, O2] - charges_2 = [P1, P2] - charges_3 = [Q1, Q2] - - fused_1 = fuse_charges(charges_1, [1, 1]) - fused_2 = fuse_charges(charges_2, [1, 1]) - fused_3 = fuse_charges(charges_3, [1, 1]) - q1 = U1Charge([O1, P1, Q1]) - q2 = U1Charge([O2, P2, Q2]) - - target = np.random.randint(-B // 2, B // 2 + 1, 3) - q12 = q1 + q2 - nz_1 = q12.nonzero(target) - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + def run_test(): + D = 2000 + B = 6 + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + P1 = np.random.randint(0, B + 1, D).astype(np.int16) + P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q1 = np.random.randint(1, B + 1, D).astype(np.int8) + Q2 = np.random.randint(1, B + 1, D).astype(np.int8) + + charges_1 = [O1, O2] + charges_2 = [P1, P2] + charges_3 = [Q1, Q2] + + fused_1 = fuse_charges(charges_1, [1, 1]) + fused_2 = fuse_charges(charges_2, [1, 1]) + fused_3 = fuse_charges(charges_3, [1, 1]) + q1 = U1Charge([O1, P1, Q1]) + q2 = U1Charge([O2, P2, Q2]) + + target = np.random.randint(-B // 2, B // 2 + 1, 3) + q12 = q1 + q2 + + nz_1 = q12.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + return nz_1, nz_2 + + nz_1, nz_2 = run_test() + while len(nz_1) == 0: + nz_1, nz_2 = run_test() + + assert np.all(nz_1 == nz_2) + + +def test_U1Charge_multiple_fusion(): + + def run_test(): + D = 300 + B = 4 + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + P1 = np.random.randint(0, B + 1, D).astype(np.int16) + P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + P3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q1 = np.random.randint(1, B + 1, D).astype(np.int8) + Q2 = np.random.randint(0, B + 1, D).astype(np.int8) + Q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + + charges_1 = [O1, O2, O3] + charges_2 = [P1, P2, P3] + charges_3 = [Q1, Q2, Q3] + + fused_1 = fuse_charges(charges_1, [1, 1, 1]) + fused_2 = fuse_charges(charges_2, [1, 1, 1]) + fused_3 = fuse_charges(charges_3, [1, 1, 1]) + q1 = U1Charge([O1, P1, Q1]) + q2 = U1Charge([O2, P2, Q2]) + q3 = U1Charge([O3, P3, Q3]) + + target = np.random.randint(-B // 2, B // 2 + 1, 3) + q123 = q1 + q2 + q3 + + nz_1 = q123.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + return nz_1, nz_2 + + nz_1, nz_2 = run_test() + while len(nz_1) == 0: + nz_1, nz_2 = run_test() + assert np.all(nz_1 == nz_2) + + +def test_U1Charge_multiple_fusion_with_flow(): + + def run_test(): + D = 300 + B = 4 + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + P1 = np.random.randint(0, B + 1, D).astype(np.int16) + P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + P3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q1 = np.random.randint(1, B + 1, D).astype(np.int8) + Q2 = np.random.randint(0, B + 1, D).astype(np.int8) + Q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + + charges_1 = [O1, O2, O3] + charges_2 = [P1, P2, P3] + charges_3 = [Q1, Q2, Q3] + + fused_1 = fuse_charges(charges_1, [1, -1, 1]) + fused_2 = fuse_charges(charges_2, [1, -1, 1]) + fused_3 = fuse_charges(charges_3, [1, -1, 1]) + q1 = U1Charge([O1, P1, Q1]) + q2 = U1Charge([O2, P2, Q2]) + q3 = U1Charge([O3, P3, Q3]) + + target = np.random.randint(-B // 2, B // 2 + 1, 3) + q123 = q1 + (-1) * q2 + q3 + + nz_1 = q123.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + return nz_1, nz_2 + + nz_1, nz_2 = run_test() + while len(nz_1) == 0: + nz_1, nz_2 = run_test() + assert np.all(nz_1 == nz_2) + + +def test_U1Charge_fusion_with_flow(): + + def run_test(): + D = 2000 + B = 6 + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + P1 = np.random.randint(0, B + 1, D).astype(np.int16) + P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q1 = np.random.randint(1, B + 1, D).astype(np.int8) + Q2 = np.random.randint(1, B + 1, D).astype(np.int8) + + charges_1 = [O1, O2] + charges_2 = [P1, P2] + charges_3 = [Q1, Q2] + + fused_1 = fuse_charges(charges_1, [1, -1]) + fused_2 = fuse_charges(charges_2, [1, -1]) + fused_3 = fuse_charges(charges_3, [1, -1]) + q1 = U1Charge([O1, P1, Q1]) + q2 = U1Charge([O2, P2, Q2]) + + target = np.random.randint(-B // 2, B // 2 + 1, 3) + q12 = q1 + (-1) * q2 + + nz_1 = q12.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + return nz_1, nz_2 + + nz_1, nz_2 = run_test() + while len(nz_1) == 0: + nz_1, nz_2 = run_test() + assert np.all(nz_1 == nz_2) + + +def test_U1Charge_sub(): + + def run_test(): + D = 2000 + B = 6 + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + P1 = np.random.randint(0, B + 1, D).astype(np.int16) + P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + Q1 = np.random.randint(1, B + 1, D).astype(np.int8) + Q2 = np.random.randint(1, B + 1, D).astype(np.int8) + + charges_1 = [O1, O2] + charges_2 = [P1, P2] + charges_3 = [Q1, Q2] + + fused_1 = fuse_charges(charges_1, [1, -1]) + fused_2 = fuse_charges(charges_2, [1, -1]) + fused_3 = fuse_charges(charges_3, [1, -1]) + q1 = U1Charge([O1, P1, Q1]) + q2 = U1Charge([O2, P2, Q2]) + + target = np.random.randint(-B // 2, B // 2 + 1, 3) + q12 = q1 - q2 + + nz_1 = q12.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + return nz_1, nz_2 + + nz_1, nz_2 = run_test() + while len(nz_1) == 0: + nz_1, nz_2 = run_test() assert np.all(nz_1 == nz_2) @@ -180,51 +288,98 @@ def test_U1Charge_matmul(): Q = q1 @ q2 @ q3 Q_ = U1Charge([C1, C2, C3]) assert np.all(Q.charges == Q_.charges) - assert np.all(Q._charges == Q_._charges) - assert Q.offsets == Q_.offsets + #assert Q.offsets == Q_.offsets assert np.all(Q.shifts == Q_.shifts) def test_Z2Charge_fusion(): - D = 1000 - B = 6 - O1 = np.random.randint(0, 2, D).astype(np.int8) - O2 = np.random.randint(0, 2, D).astype(np.int8) - P1 = np.random.randint(0, 2, D).astype(np.int8) - P2 = np.random.randint(0, 2, D).astype(np.int8) - Q1 = np.random.randint(0, 2, D).astype(np.int8) - Q2 = np.random.randint(0, 2, D).astype(np.int8) - - charges_1 = [O1, O2] - charges_2 = [P1, P2] - charges_3 = [Q1, Q2] def fuse_z2_charges(c1, c2): return np.reshape( np.bitwise_xor(c1[:, None], c2[None, :]), len(c1) * len(c2)) - fused_1 = fuse_z2_charges(*charges_1) - fused_2 = fuse_z2_charges(*charges_2) - fused_3 = fuse_z2_charges(*charges_3) + def run_test(): + D = 1000 + O1 = np.random.randint(0, 2, D).astype(np.int8) + O2 = np.random.randint(0, 2, D).astype(np.int8) + P1 = np.random.randint(0, 2, D).astype(np.int8) + P2 = np.random.randint(0, 2, D).astype(np.int8) + Q1 = np.random.randint(0, 2, D).astype(np.int8) + Q2 = np.random.randint(0, 2, D).astype(np.int8) + + charges_1 = [O1, O2] + charges_2 = [P1, P2] + charges_3 = [Q1, Q2] + + fused_1 = fuse_z2_charges(*charges_1) + fused_2 = fuse_z2_charges(*charges_2) + fused_3 = fuse_z2_charges(*charges_3) + + q1 = Z2Charge([O1, P1, Q1]) + q2 = Z2Charge([O2, P2, Q2]) + + target = np.random.randint(0, 2, 3) + q12 = q1 + q2 + + nz_1 = q12.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + return nz_1, nz_2 + + nz_1, nz_2 = run_test() + while len(nz_1) == 0: + nz_1, nz_2 = run_test() + assert np.all(nz_1 == nz_2) - q1 = Z2Charge([O1, P1, Q1]) - q2 = Z2Charge([O2, P2, Q2]) - target = np.random.randint(0, 2, 3) - q12 = q1 + q2 +def test_Z2Charge_sub(): - nz_1 = q12.nonzero(target) - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + def fuse_z2_charges(c1, c2): + return np.reshape( + np.bitwise_xor(c1[:, None], c2[None, :]), + len(c1) * len(c2)) + + def run_test(): + D = 1000 + O1 = np.random.randint(0, 2, D).astype(np.int8) + O2 = np.random.randint(0, 2, D).astype(np.int8) + P1 = np.random.randint(0, 2, D).astype(np.int8) + P2 = np.random.randint(0, 2, D).astype(np.int8) + Q1 = np.random.randint(0, 2, D).astype(np.int8) + Q2 = np.random.randint(0, 2, D).astype(np.int8) + + charges_1 = [O1, O2] + charges_2 = [P1, P2] + charges_3 = [Q1, Q2] + + fused_1 = fuse_z2_charges(*charges_1) + fused_2 = fuse_z2_charges(*charges_2) + fused_3 = fuse_z2_charges(*charges_3) + + q1 = Z2Charge([O1, P1, Q1]) + q2 = Z2Charge([O2, P2, Q2]) + + target = np.random.randint(0, 2, 3) + q12 = q1 - q2 + + nz_1 = q12.nonzero(target) + i1 = fused_1 == target[0] + i2 = fused_2 == target[1] + i3 = fused_3 == target[2] + nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + return nz_1, nz_2 + + nz_1, nz_2 = run_test() + while len(nz_1) == 0: + nz_1, nz_2 = run_test() assert np.all(nz_1 == nz_2) -def test_U1Charge_matmul(): +def test_Z2Charge_matmul(): D = 1000 - B = 5 C1 = np.random.randint(0, 2, D).astype(np.int8) C2 = np.random.randint(0, 2, D).astype(np.int8) C3 = np.random.randint(0, 2, D).astype(np.int8) @@ -236,5 +391,94 @@ def test_U1Charge_matmul(): Q = q1 @ q2 @ q3 Q_ = Z2Charge([C1, C2, C3]) assert np.all(Q.charges == Q_.charges) - assert np.all(Q._charges == Q_._charges) assert np.all(Q.shifts == Q_.shifts) + + +def test_Charge_U1_add(): + q1 = ChargeCollection( + [U1Charge([np.asarray([0, 1])]), + U1Charge([np.asarray([-2, 3])])]) + q2 = ChargeCollection( + [U1Charge([np.asarray([2, 3])]), + U1Charge([np.asarray([-1, 4])])]) + expected = [np.asarray([2, 3, 3, 4]), np.asarray([-3, 2, 2, 7])] + q12 = q1 + q2 + for n in range(len(q12.charges)): + np.testing.assert_allclose(expected[n], q12.charges[n].charges) + + +def test_Charge_U1_sub(): + q1 = ChargeCollection( + [U1Charge([np.asarray([0, 1])]), + U1Charge([np.asarray([-2, 3])])]) + q2 = ChargeCollection( + [U1Charge([np.asarray([2, 3])]), + U1Charge([np.asarray([-1, 4])])]) + expected = [np.asarray([-2, -3, -1, -2]), np.asarray([-1, -6, 4, -1])] + q12 = q1 - q2 + for n in range(len(q12.charges)): + np.testing.assert_allclose(expected[n], q12.charges[n].charges) + + +def test_Charge_Z2_add(): + q1 = ChargeCollection( + [Z2Charge([np.asarray([0, 1])]), + Z2Charge([np.asarray([1, 0])])]) + q2 = ChargeCollection( + [Z2Charge([np.asarray([0, 0])]), + Z2Charge([np.asarray([1, 1])])]) + expected = [np.asarray([0, 0, 1, 1]), np.asarray([0, 0, 1, 1])] + q12 = q1 + q2 + for n in range(len(q12.charges)): + np.testing.assert_allclose(expected[n], q12.charges[n].charges) + + +def test_Charge_Z2_sub(): + q1 = ChargeCollection( + [Z2Charge([np.asarray([0, 1])]), + Z2Charge([np.asarray([1, 0])])]) + q2 = ChargeCollection( + [Z2Charge([np.asarray([0, 0])]), + Z2Charge([np.asarray([1, 1])])]) + expected = [np.asarray([0, 0, 1, 1]), np.asarray([0, 0, 1, 1])] + q12 = q1 - q2 + for n in range(len(q12.charges)): + np.testing.assert_allclose(expected[n], q12.charges[n].charges) + + +def test_Charge_Z2_U1_add(): + q1 = ChargeCollection( + [Z2Charge([np.asarray([0, 1])]), + U1Charge([np.asarray([-2, 3])])]) + q2 = ChargeCollection( + [Z2Charge([np.asarray([0, 0])]), + U1Charge([np.asarray([-1, 4])])]) + expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] + + q12 = q1 + q2 + for n in range(len(q12.charges)): + np.testing.assert_allclose(expected[n], q12.charges[n].charges) + + +def test_Charge_add_Z2_U1_raises(): + q1 = ChargeCollection( + [Z2Charge([np.asarray([0, 1])]), + Z2Charge([np.asarray([-2, 3])])]) + q2 = ChargeCollection( + [U1Charge([np.asarray([0, 0])]), + U1Charge([np.asarray([-1, 4])])]) + expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] + with pytest.raises(TypeError): + q12 = q1 + q2 + + +def test_Charge_sub_Z2_U1_raises(): + q1 = ChargeCollection( + [Z2Charge([np.asarray([0, 1])]), + Z2Charge([np.asarray([-2, 3])])]) + q2 = ChargeCollection( + [U1Charge([np.asarray([0, 0])]), + U1Charge([np.asarray([-1, 4])])]) + expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] + with pytest.raises(TypeError): + q12 = q1 - q2 From 53c5063e8e6fa0e29a9374b0c9f14434b131aaf2 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 16:31:33 -0500 Subject: [PATCH 104/212] tests for new index --- tensornetwork/block_tensor/index_new_test.py | 151 +++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 tensornetwork/block_tensor/index_new_test.py diff --git a/tensornetwork/block_tensor/index_new_test.py b/tensornetwork/block_tensor/index_new_test.py new file mode 100644 index 000000000..f924be6e2 --- /dev/null +++ b/tensornetwork/block_tensor/index_new_test.py @@ -0,0 +1,151 @@ +import numpy as np +# pylint: disable=line-too-long +from tensornetwork.block_tensor.index_new import Index, fuse_index_pair, split_index, fuse_indices +from tensornetwork.block_tensor.charge import U1Charge, Z2Charge, ChargeCollection + + +def test_index_fusion_mul(): + D = 10 + B = 4 + dtype = np.int16 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + assert i12.left_child is i1 + assert i12.right_child is i2 + for n in range(i12.charges.num_symmetries): + assert np.all(i12.charges[n].charges == (q1 + q2).charges) + + +def test_fuse_indices(): + D = 10 + B = 4 + dtype = np.int16 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = fuse_indices([i1, i2]) + assert i12.left_child is i1 + assert i12.right_child is i2 + for n in range(i12.charges.num_symmetries): + assert np.all(i12.charges[n].charges == (q1 + q2).charges) + + +def test_split_index(): + D = 10 + B = 4 + dtype = np.int16 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + i1_, i2_ = split_index(i12) + assert i1 is i1_ + assert i2 is i2_ + np.testing.assert_allclose(q1.charges, i1.charges[0].charges) + np.testing.assert_allclose(q2.charges, i2.charges[0].charges) + np.testing.assert_allclose(q1.charges, i1_.charges[0].charges) + np.testing.assert_allclose(q2.charges, i2_.charges[0].charges) + assert i1_.name == 'index1' + assert i2_.name == 'index2' + assert i1_.flow == i1.flow + assert i2_.flow == i2.flow + + +def test_elementary_indices(): + D = 10 + B = 4 + dtype = np.int16 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) + q3 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) + q4 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q3, flow=1, name='index3') + i4 = Index(charges=q4, flow=1, name='index4') + + i12 = i1 * i2 + i34 = i3 * i4 + elmt12 = i12.get_elementary_indices() + assert elmt12[0] is i1 + assert elmt12[1] is i2 + + i1234 = i12 * i34 + elmt1234 = i1234.get_elementary_indices() + assert elmt1234[0] is i1 + assert elmt1234[1] is i2 + assert elmt1234[2] is i3 + assert elmt1234[3] is i4 + assert elmt1234[0].name == 'index1' + assert elmt1234[1].name == 'index2' + assert elmt1234[2].name == 'index3' + assert elmt1234[3].name == 'index4' + assert elmt1234[0].flow == i1.flow + assert elmt1234[1].flow == i2.flow + assert elmt1234[2].flow == i3.flow + assert elmt1234[3].flow == i4.flow + + np.testing.assert_allclose(q1.charges, i1.charges[0].charges) + np.testing.assert_allclose(q2.charges, i2.charges[0].charges) + np.testing.assert_allclose(q3.charges, i3.charges[0].charges) + np.testing.assert_allclose(q4.charges, i4.charges[0].charges) + + +def test_leave(): + D = 10 + B = 4 + dtype = np.int16 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + assert i1.is_leave + assert i2.is_leave + + i12 = i1 * i2 + assert not i12.is_leave + + +def test_copy(): + D = 10 + B = 4 + dtype = np.int16 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q1, flow=-1, name='index3') + i4 = Index(charges=q2, flow=-1, name='index4') + + i12 = i1 * i2 + i34 = i3 * i4 + i1234 = i12 * i34 + i1234_copy = i1234.copy() + + elmt1234 = i1234_copy.get_elementary_indices() + assert elmt1234[0] is not i1 + assert elmt1234[1] is not i2 + assert elmt1234[2] is not i3 + assert elmt1234[3] is not i4 From 707fb5524594bccf0702365c0eb0ef2711ae8107 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 16:31:42 -0500 Subject: [PATCH 105/212] new Index class --- tensornetwork/block_tensor/index_new.py | 183 ++++++++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100644 tensornetwork/block_tensor/index_new.py diff --git a/tensornetwork/block_tensor/index_new.py b/tensornetwork/block_tensor/index_new.py new file mode 100644 index 000000000..66e97b283 --- /dev/null +++ b/tensornetwork/block_tensor/index_new.py @@ -0,0 +1,183 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensornetwork.block_tensor.charge import BaseCharge, ChargeCollection +import copy +from typing import List, Union, Any, Optional, Tuple, Text + + +class Index: + """ + An index class to store indices of a symmetric tensor. + An index keeps track of all its childs by storing references + to them (i.e. it is a binary tree). + """ + + def __init__(self, + charges: Union[ChargeCollection, BaseCharge], + flow: int, + name: Optional[Text] = None, + left_child: Optional["Index"] = None, + right_child: Optional["Index"] = None): + if isinstance(charges, BaseCharge): + self._charges = ChargeCollection([charges]) + elif isinstance(charges, ChargeCollection) or (charges is None): + self._charges = charges + else: + raise TypeError("Unknown type {}".format(type(chargesp))) + self.flow = flow + self.left_child = left_child + self.right_child = right_child + self._name = name + + def __repr__(self): + return str(self.dimension) + + @property + def is_leave(self): + return (self.left_child is None) and (self.right_child is None) + + @property + def dimension(self): + return np.prod([len(i.charges) for i in self.get_elementary_indices()]) + + def _copy_helper(self, index: "Index", copied_index: "Index") -> None: + """ + Helper function for copy + """ + if index.left_child != None: + left_copy = Index( + charges=copy.copy(index.left_child.charges), + flow=copy.copy(index.left_child.flow), + name=copy.copy(index.left_child.name)) + + copied_index.left_child = left_copy + self._copy_helper(index.left_child, left_copy) + if index.right_child != None: + right_copy = Index( + charges=copy.copy(index.right_child.charges), + flow=copy.copy(index.right_child.flow), + name=copy.copy(index.right_child.name)) + copied_index.right_child = right_copy + self._copy_helper(index.right_child, right_copy) + + def copy(self): + """ + Returns: + Index: A deep copy of `Index`. Note that all children of + `Index` are copied as well. + """ + index_copy = Index( + charges=copy.copy(self._charges), + flow=copy.copy(self.flow), + name=self.name) + + self._copy_helper(self, index_copy) + return index_copy + + def _leave_helper(self, index: "Index", leave_list: List) -> None: + if index.left_child: + self._leave_helper(index.left_child, leave_list) + if index.right_child: + self._leave_helper(index.right_child, leave_list) + if (index.left_child is None) and (index.right_child is None): + leave_list.append(index) + + def get_elementary_indices(self) -> List: + """ + Returns: + List: A list containing the elementary indices (the leaves) + of `Index`. + """ + leave_list = [] + self._leave_helper(self, leave_list) + return leave_list + + def __mul__(self, index: "Index") -> "Index": + """ + Merge `index` and self into a single larger index. + The flow of the resulting index is set to 1. + Flows of `self` and `index` are multiplied into + the charges upon fusing.n + """ + return fuse_index_pair(self, index) + + @property + def charges(self): + if self.is_leave: + return self._charges + return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow + + @property + def name(self): + if self._name: + return self._name + if self.is_leave: + return self.name + return self.left_child.name + ' & ' + self.right_child.name + + +def fuse_index_pair(left_index: Index, + right_index: Index, + flow: Optional[int] = 1) -> Index: + """ + Fuse two consecutive indices (legs) of a symmetric tensor. + Args: + left_index: A tensor Index. + right_index: A tensor Index. + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `index1` and `index2`. + """ + #Fuse the charges of the two indices + if left_index is right_index: + raise ValueError( + "index1 and index2 are the same object. Can only fuse distinct objects") + + return Index( + charges=None, flow=flow, left_child=left_index, right_child=right_index) + + +def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: + """ + Fuse a list of indices (legs) of a symmetric tensor. + Args: + indices: A list of tensor Index objects + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `indices`. + """ + + index = indices[0] + for n in range(1, len(indices)): + index = fuse_index_pair(index, indices[n], flow=flow) + return index + + +def split_index(index: Index) -> Tuple[Index, Index]: + """ + Split an index (leg) of a symmetric tensor into two legs. + Args: + index: A tensor Index. + Returns: + Tuple[Index, Index]: The result of splitting `index`. + """ + if index.is_leave: + raise ValueError("cannot split an elementary index") + + return index.left_child, index.right_child From 5230997d9a88091e66380e55a06c77ab1d5134dc Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 16:32:18 -0500 Subject: [PATCH 106/212] new block tensor --- .../block_tensor/block_tensor_new.py | 1673 +++++++++++++++++ 1 file changed, 1673 insertions(+) create mode 100644 tensornetwork/block_tensor/block_tensor_new.py diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py new file mode 100644 index 000000000..42eaf96c9 --- /dev/null +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -0,0 +1,1673 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +#from tensornetwork.block_tensor.lookup import lookup +# pylint: disable=line-too-long +from tensornetwork.network_components import Node, contract, contract_between +from tensornetwork.backends import backend_factory +# pylint: disable=line-too-long +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges, unfuse +import numpy as np +import scipy as sp +import itertools +import time +from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable +Tensor = Any + + +def _check_flows(flows) -> None: + if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): + raise ValueError( + "flows = {} contains values different from 1 and -1".format(flows)) + + +def _find_best_partition(charges, flows): + if len(charges) == 1: + raise ValueError( + '_expecting `charges` with a length of at least 2, got `len(charges)={}`' + .format(len(charges))) + dims = np.asarray([len(c) for c in charges]) + min_ind = np.argmin([ + np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) + for n in range(1, len(charges)) + ]) + fused_left_charges = fuse_charges(charges[0:min_ind + 1], + flows[0:min_ind + 1]) + fused_right_charges = fuse_charges(charges[min_ind + 1::], + flows[min_ind + 1::]) + + return fused_left_charges, fused_right_charges, min_ind + 1 + + +def map_to_integer(dims: Union[List, np.ndarray], + table: np.ndarray, + dtype: Optional[Type[np.number]] = np.int64): + """ + Map a `table` of integers of shape (N, r) bijectively into + an np.ndarray `integers` of length N of unique numbers. + The mapping is done using + ``` + `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` + + Args: + dims: An iterable of integers. + table: An array of shape (N,r) of integers. + dtype: An optional dtype used for the conversion. + Care should be taken when choosing this to avoid overflow issues. + Returns: + np.ndarray: An array of integers. + """ + converter_table = np.expand_dims( + np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) + tmp = table * converter_table + integers = np.sum(tmp, axis=1) + return integers + + +def compute_fused_charge_degeneracies(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> Dict: + """ + For a list of charges, compute all possible fused charges resulting + from fusing `charges`, together with their respective degeneracyn + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + dict: Mapping fused charges (int) to degeneracies (int) + """ + if len(charges) == 1: + return np.unique(charges[0], return_counts=True) + + # get unique charges and their degeneracies on the first leg. + # We are fusing from "left" to "right". + accumulated_charges, accumulated_degeneracies = np.unique( + charges[0], return_counts=True) + #multiply the flow into the charges of first leg + accumulated_charges *= flows[0] + for n in range(1, len(charges)): + #list of unique charges and list of their degeneracies + #on the next unfused leg of the tensor + leg_charges, leg_degeneracies = np.unique(charges[n], return_counts=True) + + #fuse the unique charges + #Note: entries in `fused_charges` are not unique anymore. + #flow1 = 1 because the flow of leg 0 has already been + #mulitplied above + fused_charges = fuse_charge_pair( + q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n]) + #compute the degeneracies of `fused_charges` charges + #`fused_degeneracies` is a list of degeneracies such that + # `fused_degeneracies[n]` is the degeneracy of of + # charge `c = fused_charges[n]`. + fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, + leg_degeneracies) + #compute the new degeneracies resulting from fusing + #`accumulated_charges` and `leg_charges_2` + accumulated_charges = np.unique(fused_charges) + accumulated_degeneracies = np.empty( + len(accumulated_charges), dtype=np.int64) + for n in range(len(accumulated_charges)): + accumulated_degeneracies[n] = np.sum( + fused_degeneracies[fused_charges == accumulated_charges[n]]) + return accumulated_charges, accumulated_degeneracies + + +def compute_num_nonzero(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> int: + """ + Compute the number of non-zero elements, given the meta-data of + a symmetric tensor. + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + int: The number of non-zero elements. + """ + accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( + charges, flows) + if len(np.nonzero(accumulated_charges == 0)[0]) == 0: + raise ValueError( + "given leg-charges `charges` and flows `flows` are incompatible " + "with a symmetric tensor") + return accumulated_degeneracies[accumulated_charges == 0][0] + + +def compute_nonzero_block_shapes(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> Dict: + """ + Compute the blocks and their respective shapes of a symmetric tensor, + given its meta-data. + Args: + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + dict: Dictionary mapping a tuple of charges to a shape tuple. + Each element corresponds to a non-zero valued block of the tensor. + """ + #FIXME: this routine is slow + _check_flows(flows) + degeneracies = [] + unique_charges = [] + rank = len(charges) + #find the unique quantum numbers and their degeneracy on each leg + for leg in range(rank): + c, d = np.unique(charges[leg], return_counts=True) + unique_charges.append(c) + degeneracies.append(dict(zip(c, d))) + + #find all possible combination of leg charges c0, c1, ... + #(with one charge per leg 0, 1, ...) + #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0 + charge_combinations = list( + itertools.product(*[ + unique_charges[leg] * flows[leg] + for leg in range(len(unique_charges)) + ])) + net_charges = np.array([np.sum(c) for c in charge_combinations]) + zero_idxs = np.nonzero(net_charges == 0)[0] + charge_shape_dict = {} + for idx in zero_idxs: + c = charge_combinations[idx] + shapes = [degeneracies[leg][flows[leg] * c[leg]] for leg in range(rank)] + charge_shape_dict[c] = shapes + return charge_shape_dict + + +def find_diagonal_sparse_blocks(data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. Note that `column_charges` + are never explicitly fused (`row_charges` are). + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the sparse locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + flows = row_flows.copy() + flows.extend(column_flows) + _check_flows(flows) + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) + + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + column_charges, column_flows) + #convenience container for storing the degeneracies of each + #column charge + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + if len(row_charges) > 1: + left_row_charges, right_row_charges, _ = _find_best_partition( + row_charges, row_flows) + unique_left = np.unique(left_row_charges) + unique_right = np.unique(right_row_charges) + unique_row_charges = np.unique( + fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) + + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + row_locations = find_sparse_positions( + left_charges=left_row_charges, + left_flow=1, + right_charges=right_row_charges, + right_flow=1, + target_charges=common_charges) + elif len(row_charges) == 1: + fused_row_charges = fuse_charges(row_charges, row_flows) + + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + relevant_fused_row_charges = fused_row_charges[np.isin( + fused_row_charges, common_charges)] + row_locations = {} + for c in common_charges: + row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] + else: + raise ValueError('Found an empty sequence for `row_charges`') + #some numpy magic to get the index locations of the blocks + degeneracy_vector = np.empty( + np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + degeneracy_vector[row_locations[c]] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) + inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) + if not return_data: + blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] + else: + blocks[c] = np.reshape(data[inds], + (len(row_locations[c]), column_degeneracies[-c])) + return blocks + + +def find_diagonal_sparse_blocks_depreacated_1( + data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Deprecated + + This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. + + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. Note that `column_charges` + are never explicitly fused (`row_charges` are). + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the sparse locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + flows = row_flows.copy() + flows.extend(column_flows) + _check_flows(flows) + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) + + #since we are using row-major we have to fuse the row charges anyway. + fused_row_charges = fuse_charges(row_charges, row_flows) + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + column_charges, column_flows) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + # we only care about charges common to row and columns + mask = np.isin(fused_row_charges, common_charges) + relevant_row_charges = fused_row_charges[mask] + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_row_charges == c + masks[c] = mask + degeneracy_vector[mask] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(start_positions[masks[c]], 1) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) + if not return_data: + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] + else: + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def find_diagonal_sparse_blocks_deprecated_0( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Deprecated: this version is about 2 times slower (worst case) than the current used + implementation + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + _check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + #we multiply the flows into the charges + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column + + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + # we only care about charges common to row and columns + mask = np.isin(row_charges, common_charges) + relevant_row_charges = row_charges[mask] + + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_row_charges == c + masks[c] = mask + degeneracy_vector[mask] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) + if not return_data: + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] + else: + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def find_diagonal_sparse_blocks_column_major( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Deprecated + + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict, assuming column-major + ordering. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + _check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + #we multiply the flows into the charges + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column + + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + # we only care about charges common to row and columns + mask = np.isin(column_charges, -common_charges) + relevant_column_charges = column_charges[mask] + + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_column_charges) which, + #for each charge `c` in `relevant_column_charges` holds the + #row-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_column_charges == -c + masks[c] = mask + degeneracy_vector[mask] = row_degeneracies[c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each column + # within the data vector. + # E.g. for `relevant_column_charges` = [0,1,0,0,3], and + # row_degeneracies[0] = 10 + # row_degeneracies[1] = 20 + # row_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in column-major order) in + # each column with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - row_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) + b = np.expand_dims(np.arange(row_degeneracies[c]), 1) + if not return_data: + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] + else: + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def find_dense_positions_deprecated(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the all different blocks + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + together with their corresponding index-values of the data in the dense array. + `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` + to an array of integers. + For the above example, we get: + * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` + was obtained from fusing -2 and 2. + * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, + `fused_charges[5,13,17]` were obtained from fusing 0 and 0. + * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` + was obtained from fusing 1 and -1. + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping tuples of integers to np.ndarray of integers. + """ + _check_flows([left_flow, right_flow]) + unique_left = np.unique(left_charges) + unique_right = np.unique(right_charges) + fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + left_inds, right_inds = unfuse( + np.nonzero(fused == target_charge)[0], len(unique_left), + len(unique_right)) + left_c = unique_left[left_inds] + right_c = unique_right[right_inds] + len_right_charges = len(right_charges) + linear_positions = {} + for left_charge, right_charge in zip(left_c, right_c): + left_positions = np.nonzero(left_charges == left_charge)[0] + left_offsets = np.expand_dims(left_positions * len_right_charges, 1) + right_offsets = np.expand_dims( + np.nonzero(right_charges == right_charge)[0], 0) + linear_positions[(left_charge, right_charge)] = np.reshape( + left_offsets + right_offsets, + left_offsets.shape[0] * right_offsets.shape[1]) + return np.sort(np.concatenate(list(linear_positions.values()))) + + +def find_dense_positions(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the all different blocks + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + together with their corresponding index-values of the data in the dense array. + `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` + to an array of integers. + For the above example, we get: + * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` + was obtained from fusing -2 and 2. + * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, + `fused_charges[5,13,17]` were obtained from fusing 0 and 0. + * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` + was obtained from fusing 1 and -1. + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping tuples of integers to np.ndarray of integers. + """ + _check_flows([left_flow, right_flow]) + unique_left, left_degeneracies = np.unique(left_charges, return_counts=True) + unique_right, right_degeneracies = np.unique( + right_charges, return_counts=True) + + common_charges = np.intersect1d( + unique_left, (target_charge - right_flow * unique_right) * left_flow, + assume_unique=True) + + right_locations = {} + for c in common_charges: + right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( + right_charges == (target_charge - left_flow * c) * right_flow)[0] + + len_right_charges = len(right_charges) + indices = [] + for n in range(len(left_charges)): + c = left_charges[n] + indices.append(n * len_right_charges + right_locations[ + (target_charge - left_flow * c) * right_flow]) + return np.concatenate(indices) + + +def find_sparse_positions(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charges: Union[List[int], np.ndarray]) -> Dict: + """ + Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`, + assuming that all elements different from `target_charges` are `0`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charges = [0,1] + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` 0 1 2 3 4 5 6 7 8 + we want to find the all different blocks + that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, + together with their corresponding sparse index-values of the data in the sparse array, + assuming that all elements in `fused_charges` different from `target_charges` are 0. + + `find_sparse_blocks` returns a dict mapping integers `target_charge` + to an array of integers denoting the sparse locations of elements within + `fused_charges`. + For the above example, we get: + * `target_charge=0`: [0,1,3,5,7] + * `target_charge=1`: [2,4,6,8] + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping integers to np.ndarray of integers. + """ + #FIXME: this is probably still not optimal + + _check_flows([left_flow, right_flow]) + target_charges = np.unique(target_charges) + unique_left = np.unique(left_charges) + unique_right = np.unique(right_charges) + fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + + #compute all unique charges that can add up to + #target_charges + left_inds, right_inds = [], [] + for target_charge in target_charges: + li, ri = unfuse( + np.nonzero(fused == target_charge)[0], len(unique_left), + len(unique_right)) + left_inds.append(li) + right_inds.append(ri) + + #now compute the relevant unique left and right charges + unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] + unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] + + #only keep those charges that are relevant + relevant_left_charges = left_charges[np.isin(left_charges, + unique_left_charges)] + relevant_right_charges = right_charges[np.isin(right_charges, + unique_right_charges)] + + unique_right_charges, right_dims = np.unique( + relevant_right_charges, return_counts=True) + right_degeneracies = dict(zip(unique_right_charges, right_dims)) + #generate a degeneracy vector which for each value r in relevant_right_charges + #holds the corresponding number of non-zero elements `relevant_right_charges` + #that can add up to `target_charges`. + degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) + right_indices = {} + for left_charge in unique_left_charges: + total_degeneracy = np.sum(right_dims[np.isin( + left_flow * left_charge + right_flow * unique_right_charges, + target_charges)]) + tmp_relevant_right_charges = relevant_right_charges[np.isin( + relevant_right_charges, + (target_charges - left_flow * left_charge) * right_flow)] + + for target_charge in target_charges: + right_indices[(left_charge, target_charge)] = np.nonzero( + tmp_relevant_right_charges == + (target_charge - left_flow * left_charge) * right_flow)[0] + + degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy + + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + blocks = {t: [] for t in target_charges} + for left_charge in unique_left_charges: + a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) + for target_charge in target_charges: + ri = right_indices[(left_charge, target_charge)] + if len(ri) != 0: + b = np.expand_dims(ri, 1) + tmp = a + b + blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) + out = {} + for target_charge in target_charges: + out[target_charge] = np.concatenate(blocks[target_charge]) + return out + + +def compute_dense_to_sparse_mapping_deprecated(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` + the rank of the tensor. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + t1 = time.time() + fused_charges = fuse_charges(charges, flows) + nz_indices = np.nonzero(fused_charges == target_charge)[0] + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + + index_locations = [] + for n in reversed(range(len(charges))): + t1 = time.time() + nz_indices, right_indices = unfuse(nz_indices, np.prod(dims[0:n]), dims[n]) + index_locations.insert(0, right_indices) + print(time.time() - t1) + return index_locations + + +def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` + the rank of the tensor. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + + #note: left_charges and right_charges have been fused from RIGHT to LEFT + left_charges, right_charges, partition = _find_best_partition(charges, flows) + t1 = time.time() + nz_indices = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=target_charge) + print(time.time() - t1) + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + t1 = time.time() + nz_left_indices, nz_right_indices = unfuse(nz_indices, len(left_charges), + len(right_charges)) + print(time.time() - t1) + index_locations = [] + #first unfuse left charges + for n in range(partition): + t1 = time.time() + indices, nz_left_indices = unfuse(nz_left_indices, dims[n], + np.prod(dims[n + 1:partition])) + index_locations.append(indices) + print(time.time() - t1) + for n in range(partition, len(dims)): + t1 = time.time() + indices, nz_right_indices = unfuse(nz_right_indices, dims[n], + np.prod(dims[n + 1::])) + index_locations.append(indices) + print(time.time() - t1) + + return index_locations + + +def compute_dense_to_sparse_mapping(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + list of np.ndarray: A list of length `r`, with `r` the rank of the tensor. + Each element in the list is an N-dimensional np.ndarray of int, + with `N` the number of non-zero elements. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + #note: left_charges and right_charges have been fused from RIGHT to LEFT + left_charges, right_charges, partition = _find_best_partition(charges, flows) + nz_indices = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=target_charge) + + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + return np.unravel_index(nz_indices, dims) + + +class BlockSparseTensor: + """ + Minimal class implementation of block sparsity. + The class design follows Glen's proposal (Design 0). + The class currently only supports a single U(1) symmetry + and only numpy.ndarray. + + Attributes: + * self.data: A 1d np.ndarray storing the underlying + data of the tensor + * self.charges: A list of `np.ndarray` of shape + (D,), where D is the bond dimension. Once we go beyond + a single U(1) symmetry, this has to be updated. + + * self.flows: A list of integers of length `k`. + `self.flows` determines the flows direction of charges + on each leg of the tensor. A value of `-1` denotes + outflowing charge, a value of `1` denotes inflowing + charge. + + The tensor data is stored in self.data, a 1d np.ndarray. + """ + + def __init__(self, data: np.ndarray, indices: List[Index]) -> None: + """ + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + indices: List of `Index` objecst, one for each leg. + """ + self.indices = indices + _check_flows(self.flows) + num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) + + if num_non_zero_elements != len(data.flat): + raise ValueError("number of tensor elements defined " + "by `charges` is different from" + " len(data)={}".format(len(data.flat))) + + self.data = np.asarray(data.flat) #do not copy data + + @classmethod + def randn(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a random symmetric tensor from random normal distribution. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + backend = backend_factory.get_backend('numpy') + data = backend.randn((num_non_zero_elements,), dtype=dtype) + return cls(data=data, indices=indices) + + @classmethod + def random(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a random symmetric tensor from random normal distribution. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + dtype = dtype if dtype is not None else self.np.float64 + + def init_random(): + if ((np.dtype(dtype) is np.dtype(np.complex128)) or + (np.dtype(dtype) is np.dtype(np.complex64))): + return np.random.rand(num_non_zero_elements).astype( + dtype) - 0.5 + 1j * ( + np.random.rand(num_non_zero_elements).astype(dtype) - 0.5) + return np.random.randn(num_non_zero_elements).astype(dtype) - 0.5 + + return cls(data=init_random(), indices=indices) + + @property + def rank(self): + return len(self.indices) + + @property + def dense_shape(self) -> Tuple: + """ + The dense shape of the tensor. + Returns: + Tuple: A tuple of `int`. + """ + return tuple([i.dimension for i in self.indices]) + + @property + def shape(self) -> Tuple: + """ + The sparse shape of the tensor. + Returns: + Tuple: A tuple of `Index` objects. + """ + return tuple(self.indices) + + @property + def dtype(self) -> Type[np.number]: + return self.data.dtype + + @property + def flows(self): + return [i.flow for i in self.indices] + + @property + def charges(self): + return [i.charges for i in self.indices] + + def transpose(self, + order: Union[List[int], np.ndarray], + transposed_linear_positions: Optional[np.ndarray] = None + ) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order`. This routine currently shuffles + data. + Args: + order: The new order of indices. + transposed_linear_positions: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + can greatly speed up the transposition. + Returns: + BlockSparseTensor: The transposed tensor. + """ + #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the + #lookup-table from dense to sparse indices. According to some quick + #testing, the final lookup is currently the bottleneck. + #FIXME: transpose currently shuffles data. This can in principle be postponed + #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of + #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse + #positions + if len(order) != self.rank: + raise ValueError( + "`len(order)={}` is different form `self.rank={}`".format( + len(order), self.rank)) + #transpose is the only function using self.dense_to_sparse_table + #so we can initialize it here. This will change if we are implementing + #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` + #also needs + + #we use elementary indices here because it is + #more efficient to get the fused charges using + #the best partition + if transposed_linear_positions is None: + elementary_indices = {} + flat_elementary_indices = [] + + for n in range(self.rank): + elementary_indices[n] = self.indices[n].get_elementary_indices() + flat_elementary_indices.extend(elementary_indices[n]) + flat_index_list = np.arange(len(flat_elementary_indices)) + cum_num_legs = np.append( + 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) + flat_order = np.concatenate( + [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + + flat_charges = [i.charges for i in flat_elementary_indices] + flat_flows = [i.flow for i in flat_elementary_indices] + flat_dims = [len(c) for c in flat_charges] + flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + if not hasattr(self, 'dense_to_sparse_table'): + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition( + flat_charges, flat_flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) + + flat_tr_charges = [flat_charges[n] for n in flat_order] + flat_tr_flows = [flat_flows[n] for n in flat_order] + flat_tr_strides = [flat_strides[n] for n in flat_order] + flat_tr_dims = [flat_dims[n] for n in flat_order] + + tr_left_charges, tr_right_charges, _ = _find_best_partition( + flat_tr_charges, flat_tr_flows) + #FIXME: this should be done without fully fusing the strides + tr_dense_linear_positions = fuse_charges([ + np.arange(flat_tr_dims[n]) * flat_tr_strides[n] + for n in range(len(flat_tr_dims)) + ], + flows=[1] * len(flat_tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + + inds = np.squeeze(self.dense_to_sparse_table[ + tr_dense_linear_positions[tr_linear_positions], 0].toarray()) + else: + inds = transposed_linear_positions + self.data = self.data[inds] + return inds + + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order` + Args: pp + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ + #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the + #lookup-table from dense to sparse indices. According to some quick + #testing, the final lookup is currently the bottleneck. + #FIXME: transpose currently shuffles data. This can in principle be postponed + #until `tensordot` or `find_diagonal_sparse_blocks` + if len(order) != self.rank: + raise ValueError(len(order), self.rank) + charges = self.charges #call only once in case some of the indices are merged indices + dims = [len(c) for c in charges] + + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + tr_charges = [charges[n] for n in order] + tr_flows = [self.flows[n] for n in order] + tr_strides = [strides[n] for n in order] + tr_dims = [dims[n] for n in order] + tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_charges, tr_flows) + + tr_dense_linear_positions = fuse_charges( + [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + flows=[1] * len(tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + new_linear_positions = tr_dense_linear_positions[tr_linear_positions] + _, _, inds = np.intersect1d( + linear_positions, + new_linear_positions, + return_indices=True, + assume_unique=True) + self.data = self.data[inds] + + # def transpose_lookup(self, order: Union[List[int], np.ndarray] + # ) -> "BlockSparseTensor": + # """ + # Deprecated + + # Transpose the tensor into the new order `order`. Uses a simple cython std::map + # for the lookup + # Args: + # order: The new order of indices. + # Returns: + # BlockSparseTensor: The transposed tensor. + # """ + # if len(order) != self.rank: + # raise ValueError( + # "`len(order)={}` is different form `self.rank={}`".format( + # len(order), self.rank)) + # charges = self.charges #call only once in case some of the indices are merged indices + # dims = [len(c) for c in charges] + + # strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + # #find the best partition into left and right charges + # left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + # #find the index-positions of the elements in the fusion + # #of `left_charges` and `right_charges` that have `0` + # #total charge (those are the only non-zero elements). + # linear_positions = find_dense_positions( + # left_charges, 1, right_charges, 1, target_charge=0) + + # tr_charges = [charges[n] for n in order] + # tr_flows = [self.flows[n] for n in order] + # tr_strides = [strides[n] for n in order] + # tr_dims = [dims[n] for n in order] + # tr_left_charges, tr_right_charges, _ = _find_best_partition( + # tr_charges, tr_flows) + # #FIXME: this should be done without fully fusing the strides + # tr_dense_linear_positions = fuse_charges( + # [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + # flows=[1] * len(tr_dims)) + # tr_linear_positions = find_dense_positions(tr_left_charges, 1, + # tr_right_charges, 1, 0) + # inds = lookup(linear_positions, + # tr_dense_linear_positions[tr_linear_positions]) + # self.data = self.data[inds] + + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + """ + Deprecated: + + Transpose the tensor into the new order `order`. Uses `np.searchsorted` + for the lookup. + Args: + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ + if len(order) != self.rank: + raise ValueError( + "`len(order)={}` is different form `self.rank={}`".format( + len(order), self.rank)) + charges = self.charges #call only once in case some of the indices are merged indices + dims = [len(c) for c in charges] + + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + tr_charges = [charges[n] for n in order] + tr_flows = [self.flows[n] for n in order] + tr_strides = [strides[n] for n in order] + tr_dims = [dims[n] for n in order] + tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_charges, tr_flows) + #FIXME: this should be done without fully fusing the strides + tr_dense_linear_positions = fuse_charges( + [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + flows=[1] * len(tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + + inds = np.searchsorted(linear_positions, + tr_dense_linear_positions[tr_linear_positions]) + self.data = self.data[inds] + + def reset_shape(self) -> None: + """ + Bring the tensor back into its elementary shape. + """ + self.indices = self.get_elementary_indices() + + def get_elementary_indices(self) -> List: + """ + Compute the elementary indices of the array. + """ + elementary_indices = [] + for i in self.indices: + elementary_indices.extend(i.get_elementary_indices()) + + return elementary_indices + + def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: + """ + Reshape `tensor` into `shape` in place. + `BlockSparseTensor.reshape` works essentially the same as the dense + version, with the notable exception that the tensor can only be + reshaped into a form compatible with its elementary indices. + The elementary indices are the indices at the leaves of the `Index` + objects `tensors.indices`. + For example, while the following reshaping is possible for regular + dense numpy tensor, + ``` + A = np.random.rand(6,6,6) + np.reshape(A, (2,3,6,6)) + ``` + the same code for BlockSparseTensor + ``` + q1 = np.random.randint(0,10,6) + q2 = np.random.randint(0,10,6) + q3 = np.random.randint(0,10,6) + i1 = Index(charges=q1,flow=1) + i2 = Index(charges=q2,flow=-1) + i3 = Index(charges=q3,flow=1) + A=BlockSparseTensor.randn(indices=[i1,i2,i3]) + print(A.shape) #prints (6,6,6) + A.reshape((2,3,6,6)) #raises ValueError + ``` + raises a `ValueError` since (2,3,6,6) + is incompatible with the elementary shape (6,6,6) of the tensor. + + Args: + tensor: A symmetric tensor. + shape: The new shape. Can either be a list of `Index` + or a list of `int`. + Returns: + BlockSparseTensor: A new tensor reshaped into `shape` + """ + dense_shape = [] + for s in shape: + if isinstance(s, Index): + dense_shape.append(s.dimension) + else: + dense_shape.append(s) + # a few simple checks + if np.prod(dense_shape) != np.prod(self.dense_shape): + raise ValueError("A tensor with {} elements cannot be " + "reshaped into a tensor with {} elements".format( + np.prod(self.shape), np.prod(dense_shape))) + + #keep a copy of the old indices for the case where reshaping fails + #FIXME: this is pretty hacky! + index_copy = [i.copy() for i in self.indices] + + def raise_error(): + #if this error is raised then `shape` is incompatible + #with the elementary indices. We then reset the shape + #to what is was before the call to `reshape`. + self.indices = index_copy + elementary_indices = [] + for i in self.indices: + elementary_indices.extend(i.get_elementary_indices()) + raise ValueError("The shape {} is incompatible with the " + "elementary shape {} of the tensor.".format( + dense_shape, + tuple([e.dimension for e in elementary_indices]))) + + self.reset_shape() #bring tensor back into its elementary shape + for n in range(len(dense_shape)): + if dense_shape[n] > self.dense_shape[n]: + while dense_shape[n] > self.dense_shape[n]: + #fuse indices + i1, i2 = self.indices.pop(n), self.indices.pop(n) + #note: the resulting flow is set to one since the flow + #is multiplied into the charges. As a result the tensor + #will then be invariant in any case. + self.indices.insert(n, fuse_index_pair(i1, i2)) + if self.dense_shape[n] > dense_shape[n]: + raise_error() + elif dense_shape[n] < self.dense_shape[n]: + raise_error() + #at this point the first len(dense_shape) indices of the tensor + #match the `dense_shape`. + while len(dense_shape) < len(self.indices): + i2, i1 = self.indices.pop(), self.indices.pop() + self.indices.append(fuse_index_pair(i1, i2)) + + def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: + """ + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + For matrices with shape[0] << shape[1], this routine avoids explicit fusion + of column charges. + + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + + row_indices = self.indices[0].get_elementary_indices() + column_indices = self.indices[1].get_elementary_indices() + + return find_diagonal_sparse_blocks( + data=self.data, + row_charges=[i.charges for i in row_indices], + column_charges=[i.charges for i in column_indices], + row_flows=[i.flow for i in row_indices], + column_flows=[i.flow for i in column_indices], + return_data=return_data) + + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: + """ + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + For matrices with shape[0] << shape[1], this routine avoids explicit fusion + of column charges. + + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + + row_indices = self.indices[0].get_elementary_indices() + column_indices = self.indices[1].get_elementary_indices() + + return find_diagonal_sparse_blocks_deprecated_1( + data=self.data, + row_charges=[i.charges for i in row_indices], + column_charges=[i.charges for i in column_indices], + row_flows=[i.flow for i in row_indices], + column_flows=[i.flow for i in column_indices], + return_data=return_data) + + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: + """ + Deprecated + + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + + return find_diagonal_sparse_blocks_deprecated_0( + data=self.data, + charges=self.charges, + flows=self.flows, + return_data=return_data) + + +def reshape(tensor: BlockSparseTensor, + shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: + """ + Reshape `tensor` into `shape`. + `reshape` works essentially the same as the dense version, with the + notable exception that the tensor can only be reshaped into a form + compatible with its elementary indices. The elementary indices are + the indices at the leaves of the `Index` objects `tensors.indices`. + For example, while the following reshaping is possible for regular + dense numpy tensor, + ``` + A = np.random.rand(6,6,6) + np.reshape(A, (2,3,6,6)) + ``` + the same code for BlockSparseTensor + ``` + q1 = np.random.randint(0,10,6) + q2 = np.random.randint(0,10,6) + q3 = np.random.randint(0,10,6) + i1 = Index(charges=q1,flow=1) + i2 = Index(charges=q2,flow=-1) + i3 = Index(charges=q3,flow=1) + A=BlockSparseTensor.randn(indices=[i1,i2,i3]) + print(A.shape) #prints (6,6,6) + reshape(A, (2,3,6,6)) #raises ValueError + ``` + raises a `ValueError` since (2,3,6,6) + is incompatible with the elementary shape (6,6,6) of the tensor. + + Args: + tensor: A symmetric tensor. + shape: The new shape. Can either be a list of `Index` + or a list of `int`. + Returns: + BlockSparseTensor: A new tensor reshaped into `shape` + """ + result = BlockSparseTensor( + data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) + result.reshape(shape) + return result From 71f3e2ca609f50bc99d65bd4322f4a1391bff6b9 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 16:51:48 -0500 Subject: [PATCH 107/212] shorter code --- tensornetwork/block_tensor/charge.py | 42 +++++++++++++++++++++------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 15872b41b..0c8a7834d 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -441,23 +441,45 @@ def num_symmetries(self): return np.sum([c.num_symmetries for c in self.charges]) -def fuse_charges(charges: List[ChargeCollection], - flows: List[Union[bool, int]]) -> ChargeCollection: +def fuse_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: """ - Fuse all `charges` by simple addition (valid - for U(1) charges). Charges are fused from "right to left", - in accordance with row-major order (see `fuse_charges_pair`). + Fuse all `charges` into a new charge. + Charges are fused from "right to left", + in accordance with row-major order. Args: - chargs: A list of charges to be fused. + charges: A list of charges to be fused. flows: A list of flows, one for each element in `charges`. Returns: - np.ndarray: The result of fusing `charges`. + ChargeCollection: The result of fusing `charges`. """ - if len(charges) == 1: - #nothing to do - return flows[0] * charges[0] + if len(charges) != len(flows): + raise ValueError( + "`charges` and `flows` are of unequal lengths {} != {}".format( + len(charges), len(flows))) fused_charges = charges[0] * flows[0] for n in range(1, len(charges)): fused_charges = fused_charges + flows[n] * charges[n] return fused_charges + + +def fuse_degeneracies(degen1: Union[List, np.ndarray], + degen2: Union[List, np.ndarray]) -> np.ndarray: + """ + Fuse degeneracies `degen1` and `degen2` of two leg-charges + by simple kronecker product. `degen1` and `degen2` typically belong to two + consecutive legs of `BlockSparseTensor`. + Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns + `[10, 100, 20, 200, 30, 300]`. + When using row-major ordering of indices in `BlockSparseTensor`, + the position of `degen1` should be "to the left" of the position of `degen2`. + Args: + degen1: Iterable of integers + degen2: Iterable of integers + Returns: + np.ndarray: The result of fusing `dege1` with `degen2`. + """ + return np.reshape(degen1[:, None] * degen2[None, :], + len(degen1) * len(degen2)) From dd6d99235c16d3c8ab87c07a4ea2acad2e1724af Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 3 Jan 2020 21:29:10 -0500 Subject: [PATCH 108/212] add __eq__, remove nonzero, add unique --- tensornetwork/block_tensor/charge.py | 74 +++++++++++++++++----------- 1 file changed, 46 insertions(+), 28 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 0c8a7834d..bdf444490 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -111,6 +111,28 @@ def __rmul__(self, number: Union[bool, int]) -> "BaseCharge": def dtype(self): return self.charges.dtype + def unique(self, + return_index=False, + return_inverse=False, + return_counts=False): + return np.unique( + self.charges, + return_index=return_index, + return_inverse=return_inverse, + return_counts=return_counts) + + def __eq__(self, target_charges): + if len(target_charges) != len(self.shifts): + raise ValueError("len(target_charges) = {} is different " + "from len(shifts) = {}".format( + len(target_charges), len(self.shifts))) + _target_charges = np.asarray(target_charges).astype(self.charges.dtype) + target = np.sum([ + np.left_shift(_target_charges[n], self.shifts[n]) + for n in range(len(self.shifts)) + ]) + return self.charges == target + class U1Charge(BaseCharge): """ @@ -231,18 +253,6 @@ def dual_charges(self) -> np.ndarray: #the dual of a U1 charge is its negative value return self.charges * self.dtype.type(-1) - def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: - if len(target_charges) != len(self.shifts): - raise ValueError("len(target_charges) = {} is different " - "from len(U1Charge.shifts) = {}".format( - len(target_charges), len(self.shifts))) - _target_charges = np.asarray(target_charges).astype(self.charges.dtype) - target = np.sum([ - np.left_shift(_target_charges[n], self.shifts[n]) - for n in range(len(self.shifts)) - ]) - return np.nonzero(self.charges == target)[0] - class Z2Charge(BaseCharge): """ @@ -350,21 +360,11 @@ def __repr__(self): return 'Z2-charge: \n' + 'shifts: ' + self.shifts.__repr__( ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' - def nonzero(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: - if len(target_charges) != len(self.shifts): - raise ValueError("len(target_charges) = {} is different " - "from len(U1Charge.shifts) = {}".format( - len(target_charges), len(self.shifts))) - + def __eq__(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: if not np.all(np.isin(target_charges, np.asarray([0, 1]))): - raise ValueError("Z2-charges can only be 0 or 1, found {}".format( + raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( np.unique(target_charges))) - _target_charges = np.asarray(target_charges).astype(self.charges.dtype) - target = np.sum([ - np.left_shift(_target_charges[n], self.shifts[n]) - for n in range(len(self.shifts)) - ]) - return np.nonzero(self.charges == target)[0] + return super().__eq__(target_charges) class ChargeCollection: @@ -436,9 +436,27 @@ def __rmul__(self, number: Union[bool, int]) -> "Charge": return self.__mul__(number) - @property - def num_symmetries(self): - return np.sum([c.num_symmetries for c in self.charges]) + def unique(self, + return_index=False, + return_inverse=False, + return_counts=False): + return np.unique( + np.stack([self.charges[n].charges for n in range(len(self.charges))], + axis=1), + return_index=return_index, + return_inverse=return_inverse, + return_counts=return_counts, + axis=0) + + def __eq__(self, target_charges): + if len(target_charges) != len(self.charges): + raise ValueError( + "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" + .format(len(target_charges), len(self.charges))) + return np.logical_and.reduce([ + self.charges[n] == target_charges[n] + for n in range(len(target_charges)) + ]) def fuse_charges( From 093d696543aabcf80c3fcdf8520b5321a0dcfb72 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 08:56:17 -0500 Subject: [PATCH 109/212] working on charges.py --- tensornetwork/block_tensor/block_tensor.py | 52 ++++----- .../block_tensor/block_tensor_new.py | 47 ++------ tensornetwork/block_tensor/charge.py | 72 ++++++++++-- tensornetwork/block_tensor/charge_test.py | 26 +++-- tensornetwork/block_tensor/index.py | 110 ------------------ tensornetwork/block_tensor/index_new_test.py | 4 +- 6 files changed, 122 insertions(+), 189 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index e54f6727c..42eaf96c9 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -464,11 +464,11 @@ def find_diagonal_sparse_blocks_depreacated_1( return blocks -def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True - ) -> Dict: +def find_diagonal_sparse_blocks_deprecated_0( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Deprecated: this version is about 2 times slower (worst case) than the current used implementation @@ -570,11 +570,11 @@ def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, return blocks -def find_diagonal_sparse_blocks_column_major(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True - ) -> Dict: +def find_diagonal_sparse_blocks_column_major( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Deprecated @@ -789,9 +789,8 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + - right_locations[(target_charge - left_flow * c) * - right_flow]) + indices.append(n * len_right_charges + right_locations[ + (target_charge - left_flow * c) * right_flow]) return np.concatenate(indices) @@ -877,9 +876,8 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == (target_charge - - left_flow * left_charge) * - right_flow)[0] + tmp_relevant_right_charges == + (target_charge - left_flow * left_charge) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1266,9 +1264,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix( - (np.arange(len(self.data)), - (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1293,8 +1291,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -1385,8 +1383,8 @@ def transpose_intersect1d(self, order: Union[List[int], np.ndarray] # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Deprecated: @@ -1566,8 +1564,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1602,8 +1600,8 @@ def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: """ Deprecated diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 42eaf96c9..39f66e725 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -18,10 +18,8 @@ import numpy as np #from tensornetwork.block_tensor.lookup import lookup # pylint: disable=line-too-long -from tensornetwork.network_components import Node, contract, contract_between -from tensornetwork.backends import backend_factory -# pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges, unfuse +from tensornetwork.block_tensor.index_new import Index, fuse_index_pair, split_index, +from tensornetwork.block_tensor.charges import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, ChargeCollection import numpy as np import scipy as sp import itertools @@ -30,13 +28,14 @@ Tensor = Any -def _check_flows(flows) -> None: +def _check_flows(flows: List[int]) -> None: if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): raise ValueError( "flows = {} contains values different from 1 and -1".format(flows)) -def _find_best_partition(charges, flows): +def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[int]): if len(charges) == 1: raise ValueError( '_expecting `charges` with a length of at least 2, got `len(charges)={}`' @@ -54,33 +53,9 @@ def _find_best_partition(charges, flows): return fused_left_charges, fused_right_charges, min_ind + 1 -def map_to_integer(dims: Union[List, np.ndarray], - table: np.ndarray, - dtype: Optional[Type[np.number]] = np.int64): - """ - Map a `table` of integers of shape (N, r) bijectively into - an np.ndarray `integers` of length N of unique numbers. - The mapping is done using - ``` - `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` - - Args: - dims: An iterable of integers. - table: An array of shape (N,r) of integers. - dtype: An optional dtype used for the conversion. - Care should be taken when choosing this to avoid overflow issues. - Returns: - np.ndarray: An array of integers. - """ - converter_table = np.expand_dims( - np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) - tmp = table * converter_table - integers = np.sum(tmp, axis=1) - return integers - - -def compute_fused_charge_degeneracies(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: +def compute_fused_charge_degeneracies( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Dict: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`, together with their respective degeneracyn @@ -97,12 +72,12 @@ def compute_fused_charge_degeneracies(charges: List[np.ndarray], dict: Mapping fused charges (int) to degeneracies (int) """ if len(charges) == 1: - return np.unique(charges[0], return_counts=True) + return charges[0].unique(return_counts=True) # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = np.unique( - charges[0], return_counts=True) + accumulated_charges, accumulated_degeneracies = charges[0].unique( + return_counts=True) #multiply the flow into the charges of first leg accumulated_charges *= flows[0] for n in range(1, len(charges)): diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index bdf444490..1f0c54b07 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -114,12 +114,38 @@ def dtype(self): def unique(self, return_index=False, return_inverse=False, - return_counts=False): - return np.unique( + return_counts=False + ) -> Tuple["BaseCharge", np.ndarray, np.ndarray, np.ndarray]: + """ + Compute the unique charges in `BaseCharge`. + See np.unique for a more detailed explanation. This function + does the same but instead of a np.ndarray, it returns the unique + elements in a `BaseCharge` object. + Args: + return_index: If `True`, also return the indices of `self.charges` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse: If `True`, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `self.charges`. + return_counts: If `True`, also return the number of times each unique item appears + in `self.charges`. + Returns: + BaseCharge: The sorted unique values. + np.ndarray: The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + np.ndarray: The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + np.ndarray: The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + """ + result = np.unique( self.charges, return_index=return_index, return_inverse=return_inverse, return_counts=return_counts) + out = self.__new__(type(self)) + out.__init__([result[0]], self.shifts) + + return tuple([out] + [result[n] for n in range(1, len(result))]) def __eq__(self, target_charges): if len(target_charges) != len(self.shifts): @@ -436,11 +462,35 @@ def __rmul__(self, number: Union[bool, int]) -> "Charge": return self.__mul__(number) - def unique(self, - return_index=False, - return_inverse=False, - return_counts=False): - return np.unique( + def unique( + self, + return_index=False, + return_inverse=False, + return_counts=False, + ) -> Tuple["ChargeCollection", np.ndarray, np.ndarray, np.ndarray]: + """ + Compute the unique charges in `BaseCharge`. + See np.unique for a more detailed explanation. This function + does the same but instead of a np.ndarray, it returns the unique + elements in a `BaseCharge` object. + Args: + return_index: If `True`, also return the indices of `self.charges` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse: If `True`, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `self.charges`. + return_counts: If `True`, also return the number of times each unique item appears + in `self.charges`. + Returns: + BaseCharge: The sorted unique values. + np.ndarray: The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + np.ndarray: The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + np.ndarray: The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + """ + + result = np.unique( np.stack([self.charges[n].charges for n in range(len(self.charges))], axis=1), return_index=return_index, @@ -448,6 +498,14 @@ def unique(self, return_counts=return_counts, axis=0) + charges = [] + for n in range(len(self.charges)): + obj = self.charges[n].__new__(type(self.charges[n])) + obj.__init__(charges=[result[0][:, n]], shifts=self.charges[n].shifts) + charges.append(obj) + out = ChargeCollection(charges) + return tuple([out] + [result[n] for n in range(1, len(result))]) + def __eq__(self, target_charges): if len(target_charges) != len(self.charges): raise ValueError( diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index 3c0d6d0fc..d61d81dfc 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -100,7 +100,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 + q2 - nz_1 = q12.nonzero(target) + nz_1 = np.nonzero(q12 == target)[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -143,7 +143,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q123 = q1 + q2 + q3 - nz_1 = q123.nonzero(target) + nz_1 = np.nonzero(q123 == target)[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -185,7 +185,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q123 = q1 + (-1) * q2 + q3 - nz_1 = q123.nonzero(target) + nz_1 = np.nonzero(q123 == target)[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -223,7 +223,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 + (-1) * q2 - nz_1 = q12.nonzero(target) + nz_1 = np.nonzero(q12 == target)[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -261,7 +261,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 - q2 - nz_1 = q12.nonzero(target) + nz_1 = np.nonzero(q12 == target)[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -322,7 +322,7 @@ def run_test(): target = np.random.randint(0, 2, 3) q12 = q1 + q2 - nz_1 = q12.nonzero(target) + nz_1 = np.nonzero(q12 == target)[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -365,7 +365,7 @@ def run_test(): target = np.random.randint(0, 2, 3) q12 = q1 - q2 - nz_1 = q12.nonzero(target) + nz_1 = np.nonzero(q12 == target)[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -482,3 +482,15 @@ def test_Charge_sub_Z2_U1_raises(): expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] with pytest.raises(TypeError): q12 = q1 - q2 + + +def test_BaseCharge_eq(): + D = 1000 + B = 5 + C1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + C3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + + q1 = U1Charge([C1]) + q2 = U1Charge([C2]) + q3 = U1Charge([C3]) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 5dd641e5d..fa6af358e 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -17,7 +17,6 @@ from __future__ import print_function import numpy as np from tensornetwork.network_components import Node, contract, contract_between -from tensornetwork.block_tensor.charge import BaseCharge, ChargeCollection # pylint: disable=line-too-long from tensornetwork.backends import backend_factory import copy @@ -244,115 +243,6 @@ def name(self): return self.left_child.name + ' & ' + self.right_child.name -class IndexNew: - """ - An index class to store indices of a symmetric tensor. - An index keeps track of all its childs by storing references - to them (i.e. it is a binary tree). - """ - - def __init__(self, - charges: Union[ChargeCollection, BaseCharge], - flow: int, - name: Optional[Text] = None, - left_child: Optional["Index"] = None, - right_child: Optional["Index"] = None): - if isinstance(charges, BaseCharge): - self._charges = ChargeCollection([charges]) - elif isinstance(charges, ChargeCollection): - self._charges = charges - self.flow = flow - self.left_child = left_child - self.right_child = right_child - self._name = name - - def __repr__(self): - return str(self.dimension) - - @property - def is_leave(self): - return (self.left_child is None) and (self.right_child is None) - - @property - def dimension(self): - return np.prod([len(i.charges) for i in self.get_elementary_indices()]) - - def _copy_helper(self, index: "Index", copied_index: "Index") -> None: - """ - Helper function for copy - """ - if index.left_child != None: - left_copy = Index( - charges=copy.copy(index.left_child.charges), - flow=copy.copy(index.left_child.flow), - name=copy.copy(index.left_child.name)) - - copied_index.left_child = left_copy - self._copy_helper(index.left_child, left_copy) - if index.right_child != None: - right_copy = Index( - charges=copy.copy(index.right_child.charges), - flow=copy.copy(index.right_child.flow), - name=copy.copy(index.right_child.name)) - copied_index.right_child = right_copy - self._copy_helper(index.right_child, right_copy) - - def copy(self): - """ - Returns: - Index: A deep copy of `Index`. Note that all children of - `Index` are copied as well. - """ - index_copy = Index( - charges=copy.copy(self._charges), - flow=copy.copy(self.flow), - name=self.name) - - self._copy_helper(self, index_copy) - return index_copy - - def _leave_helper(self, index: "Index", leave_list: List) -> None: - if index.left_child: - self._leave_helper(index.left_child, leave_list) - if index.right_child: - self._leave_helper(index.right_child, leave_list) - if (index.left_child is None) and (index.right_child is None): - leave_list.append(index) - - def get_elementary_indices(self) -> List: - """ - Returns: - List: A list containing the elementary indices (the leaves) - of `Index`. - """ - leave_list = [] - self._leave_helper(self, leave_list) - return leave_list - - def __mul__(self, index: "Index") -> "Index": - """ - Merge `index` and self into a single larger index. - The flow of the resulting index is set to 1. - Flows of `self` and `index` are multiplied into - the charges upon fusing.n - """ - return fuse_index_pair(self, index) - - @property - def charges(self): - if self.is_leave: - return self._charges - return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow - - @property - def name(self): - if self._name: - return self._name - if self.is_leave: - return self.name - return self.left_child.name + ' & ' + self.right_child.name - - def fuse_index_pair(left_index: Index, right_index: Index, flow: Optional[int] = 1) -> Index: diff --git a/tensornetwork/block_tensor/index_new_test.py b/tensornetwork/block_tensor/index_new_test.py index f924be6e2..97ef38a4d 100644 --- a/tensornetwork/block_tensor/index_new_test.py +++ b/tensornetwork/block_tensor/index_new_test.py @@ -19,7 +19,7 @@ def test_index_fusion_mul(): i12 = i1 * i2 assert i12.left_child is i1 assert i12.right_child is i2 - for n in range(i12.charges.num_symmetries): + for n in range(len(i12.charges.charges)): assert np.all(i12.charges[n].charges == (q1 + q2).charges) @@ -37,7 +37,7 @@ def test_fuse_indices(): i12 = fuse_indices([i1, i2]) assert i12.left_child is i1 assert i12.right_child is i2 - for n in range(i12.charges.num_symmetries): + for n in range(len(i12.charges.charges)): assert np.all(i12.charges[n].charges == (q1 + q2).charges) From e650938b222b76d450dcd59b740f6dd721345bf6 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 09:47:35 -0500 Subject: [PATCH 110/212] fix small bug in BaseCharge.__init__ --- tensornetwork/block_tensor/charge.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 1f0c54b07..3b86646c2 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -40,8 +40,10 @@ class BaseCharge: """ def __init__(self, - charges: List[np.ndarray], - shifts: Optional[np.ndarray] = None) -> None: + charges: Union[List[np.ndarray], np.ndarray], + shifts: Optional[Union[List[int], np.ndarray]] = None) -> None: + if isinstance(charges, np.ndarray): + charges = [charges] self._itemsizes = [c.dtype.itemsize for c in charges] if np.sum(self._itemsizes) > 8: raise TypeError("number of bits required to store all charges " @@ -53,7 +55,7 @@ def __init__(self, "can be passed. Got len(charges) = {}".format( len(charges))) - if len(charges) > 1: + if shifts is None: dtype = np.int8 if np.sum(self._itemsizes) > 1: dtype = np.int16 @@ -71,9 +73,7 @@ def __init__(self, ], axis=0).astype(dtype) else: - if shifts is None: - shifts = np.asarray([0]).astype(charges[0].dtype) - self.shifts = shifts + self.shifts = np.asarray(shifts) self.charges = charges[0] def __add__(self, other: "BaseCharge") -> "BaseCharge": @@ -288,7 +288,10 @@ class Z2Charge(BaseCharge): def __init__(self, charges: List[np.ndarray], shifts: Optional[np.ndarray] = None) -> None: - if len(charges) > 1: + if isinstance(charges, np.ndarray): + charges = [charges] + + if shifts is None: itemsizes = [c.dtype.itemsize for c in charges] if not np.all([i == 1 for i in itemsizes]): # martin: This error could come back at us, but I'll leave it for now @@ -517,9 +520,9 @@ def __eq__(self, target_charges): ]) -def fuse_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]] + ) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", From ea81cdafab7cd207dda4b229fae9b71e83355248 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 09:47:44 -0500 Subject: [PATCH 111/212] fix tests after bugfix --- tensornetwork/block_tensor/charge_test.py | 158 ++++++++++++++++++---- 1 file changed, 128 insertions(+), 30 deletions(-) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index d61d81dfc..88fc88583 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -421,12 +421,14 @@ def test_Charge_U1_sub(): def test_Charge_Z2_add(): - q1 = ChargeCollection( - [Z2Charge([np.asarray([0, 1])]), - Z2Charge([np.asarray([1, 0])])]) - q2 = ChargeCollection( - [Z2Charge([np.asarray([0, 0])]), - Z2Charge([np.asarray([1, 1])])]) + q1 = ChargeCollection([ + Z2Charge([np.asarray([0, 1]).astype(np.int8)]), + Z2Charge([np.asarray([1, 0]).astype(np.int8)]) + ]) + q2 = ChargeCollection([ + Z2Charge([np.asarray([0, 0]).astype(np.int8)]), + Z2Charge([np.asarray([1, 1]).astype(np.int8)]) + ]) expected = [np.asarray([0, 0, 1, 1]), np.asarray([0, 0, 1, 1])] q12 = q1 + q2 for n in range(len(q12.charges)): @@ -434,12 +436,14 @@ def test_Charge_Z2_add(): def test_Charge_Z2_sub(): - q1 = ChargeCollection( - [Z2Charge([np.asarray([0, 1])]), - Z2Charge([np.asarray([1, 0])])]) - q2 = ChargeCollection( - [Z2Charge([np.asarray([0, 0])]), - Z2Charge([np.asarray([1, 1])])]) + q1 = ChargeCollection([ + Z2Charge([np.asarray([0, 1]).astype(np.int8)]), + Z2Charge([np.asarray([1, 0]).astype(np.int8)]) + ]) + q2 = ChargeCollection([ + Z2Charge([np.asarray([0, 0]).astype(np.int8)]), + Z2Charge([np.asarray([1, 1]).astype(np.int8)]) + ]) expected = [np.asarray([0, 0, 1, 1]), np.asarray([0, 0, 1, 1])] q12 = q1 - q2 for n in range(len(q12.charges)): @@ -447,12 +451,14 @@ def test_Charge_Z2_sub(): def test_Charge_Z2_U1_add(): - q1 = ChargeCollection( - [Z2Charge([np.asarray([0, 1])]), - U1Charge([np.asarray([-2, 3])])]) - q2 = ChargeCollection( - [Z2Charge([np.asarray([0, 0])]), - U1Charge([np.asarray([-1, 4])])]) + q1 = ChargeCollection([ + Z2Charge([np.asarray([0, 1]).astype(np.int8)]), + U1Charge([np.asarray([-2, 3]).astype(np.int8)]) + ]) + q2 = ChargeCollection([ + Z2Charge([np.asarray([0, 0]).astype(np.int8)]), + U1Charge([np.asarray([-1, 4]).astype(np.int8)]) + ]) expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] q12 = q1 + q2 @@ -461,9 +467,10 @@ def test_Charge_Z2_U1_add(): def test_Charge_add_Z2_U1_raises(): - q1 = ChargeCollection( - [Z2Charge([np.asarray([0, 1])]), - Z2Charge([np.asarray([-2, 3])])]) + q1 = ChargeCollection([ + Z2Charge([np.asarray([0, 1]).astype(np.int8)]), + Z2Charge([np.asarray([-2, 3]).astype(np.int8)]) + ]) q2 = ChargeCollection( [U1Charge([np.asarray([0, 0])]), U1Charge([np.asarray([-1, 4])])]) @@ -473,9 +480,10 @@ def test_Charge_add_Z2_U1_raises(): def test_Charge_sub_Z2_U1_raises(): - q1 = ChargeCollection( - [Z2Charge([np.asarray([0, 1])]), - Z2Charge([np.asarray([-2, 3])])]) + q1 = ChargeCollection([ + Z2Charge([np.asarray([0, 1]).astype(np.int8)]), + Z2Charge([np.asarray([-2, 3]).astype(np.int8)]) + ]) q2 = ChargeCollection( [U1Charge([np.asarray([0, 0])]), U1Charge([np.asarray([-1, 4])])]) @@ -487,10 +495,100 @@ def test_Charge_sub_Z2_U1_raises(): def test_BaseCharge_eq(): D = 1000 B = 5 - C1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - C3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + Q = BaseCharge(charges=[q1, q2]) + target_charge = np.asarray([ + np.random.randint(-B // 2, B // 2 + 1), + np.random.randint(-B // 2 - 1, B // 2 + 2) + ]) + assert np.all( + (Q == target_charge + ) == np.logical_and(q1 == target_charge[0], q2 == target_charge[1])) + + +def test_BaseCharge_unique(): + D = 1000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + Q = BaseCharge(charges=[q1, q2]) + expected = np.unique( + Q.charges, + return_index=True, + return_inverse=True, + return_counts=True, + axis=0) + actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) + assert np.all(actual[0].charges == expected[0]) + assert np.all(actual[1] == expected[1]) + assert np.all(actual[2] == expected[2]) + assert np.all(actual[3] == expected[3]) + + +def test_Charge_U1_U1_eq(): + D = 1000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + p1 = np.random.randint(-B // 2 - 2, B // 2 + 3, D).astype(np.int16) + Q = ChargeCollection(charges=[U1Charge([q1, q2]), U1Charge(p1)]) + target_q = [ + np.random.randint(-B // 2, B // 2 + 1), + np.random.randint(-B // 2 - 1, B // 2 + 2) + ] + target_p = [np.random.randint(-B // 2 - 2, B // 2 + 3)] + target_charge = [target_q, target_p] + assert np.all((Q == target_charge) == np.logical_and.reduce( + [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) - q1 = U1Charge([C1]) - q2 = U1Charge([C2]) - q3 = U1Charge([C3]) + +def test_Charge_Z2_Z2_eq(): + D = 1000 + q1 = np.random.randint(0, 2, D).astype(np.int8) + q2 = np.random.randint(0, 2, D).astype(np.int8) + p1 = np.random.randint(0, 2, D).astype(np.int8) + Q = ChargeCollection(charges=[Z2Charge([q1, q2]), Z2Charge(p1)]) + target_q = [np.random.randint(0, 2), np.random.randint(0, 2)] + target_p = [np.random.randint(0, 2)] + target_charge = [target_q, target_p] + assert np.all((Q == target_charge) == np.logical_and.reduce( + [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + + +def test_Charge_U1_Z2_eq(): + D = 1000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + p1 = np.random.randint(0, 2, D).astype(np.int8) + Q = ChargeCollection(charges=[U1Charge([q1, q2]), Z2Charge(p1)]) + target_q = [ + np.random.randint(-B // 2, B // 2 + 1), + np.random.randint(-B // 2 - 1, B // 2 + 2) + ] + target_p = [np.random.randint(0, 2)] + target_charge = [target_q, target_p] + assert np.all((Q == target_charge) == np.logical_and.reduce( + [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + + +def test_Charge_U1_U1_unique(): + D = 1000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + p1 = np.random.randint(-B // 2 - 2, B // 2 + 3, D).astype(np.int16) + Q = ChargeCollection(charges=[U1Charge([q1, q2]), U1Charge(p1)]) + expected = np.unique( + np.stack([Q[0].charges, Q[1].charges], axis=1), + return_index=True, + return_inverse=True, + return_counts=True, + axis=0) + actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) + assert np.all(actual[0][0].charges == expected[0][:, 0]) + assert np.all(actual[0][1].charges == expected[0][:, 1]) + assert np.all(actual[1] == expected[1]) + assert np.all(actual[2] == expected[2]) + assert np.all(actual[3] == expected[3]) From 906e486727039f0d7d0182cc53965921c5a1d900 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 10:47:05 -0500 Subject: [PATCH 112/212] tests for equals() and __eq__ --- tensornetwork/block_tensor/charge_test.py | 147 +++++++++++++++++++--- 1 file changed, 127 insertions(+), 20 deletions(-) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index 88fc88583..123831547 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -100,7 +100,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 + q2 - nz_1 = np.nonzero(q12 == target)[0] + nz_1 = np.nonzero(q12.equals(target))[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -143,7 +143,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q123 = q1 + q2 + q3 - nz_1 = np.nonzero(q123 == target)[0] + nz_1 = np.nonzero(q123.equals(target))[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -185,7 +185,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q123 = q1 + (-1) * q2 + q3 - nz_1 = np.nonzero(q123 == target)[0] + nz_1 = np.nonzero(q123.equals(target))[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -223,7 +223,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 + (-1) * q2 - nz_1 = np.nonzero(q12 == target)[0] + nz_1 = np.nonzero(q12.equals(target))[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -261,7 +261,7 @@ def run_test(): target = np.random.randint(-B // 2, B // 2 + 1, 3) q12 = q1 - q2 - nz_1 = np.nonzero(q12 == target)[0] + nz_1 = np.nonzero(q12.equals(target))[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -322,7 +322,7 @@ def run_test(): target = np.random.randint(0, 2, 3) q12 = q1 + q2 - nz_1 = np.nonzero(q12 == target)[0] + nz_1 = np.nonzero(q12.equals(target))[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -365,7 +365,7 @@ def run_test(): target = np.random.randint(0, 2, 3) q12 = q1 - q2 - nz_1 = np.nonzero(q12 == target)[0] + nz_1 = np.nonzero(q12.equals(target))[0] i1 = fused_1 == target[0] i2 = fused_2 == target[1] i3 = fused_3 == target[2] @@ -493,7 +493,7 @@ def test_Charge_sub_Z2_U1_raises(): def test_BaseCharge_eq(): - D = 1000 + D = 3000 B = 5 q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) @@ -503,12 +503,27 @@ def test_BaseCharge_eq(): np.random.randint(-B // 2 - 1, B // 2 + 2) ]) assert np.all( - (Q == target_charge + (Q == np.left_shift(target_charge[0], 16) + target_charge[1] + ) == np.logical_and(q1 == target_charge[0], q2 == target_charge[1])) + + +def test_BaseCharge_equals(): + D = 3000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + Q = BaseCharge(charges=[q1, q2]) + target_charge = np.asarray([ + np.random.randint(-B // 2, B // 2 + 1), + np.random.randint(-B // 2 - 1, B // 2 + 2) + ]) + assert np.all( + (Q.equals(target_charge) ) == np.logical_and(q1 == target_charge[0], q2 == target_charge[1])) def test_BaseCharge_unique(): - D = 1000 + D = 3000 B = 5 q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) @@ -526,8 +541,8 @@ def test_BaseCharge_unique(): assert np.all(actual[3] == expected[3]) -def test_Charge_U1_U1_eq(): - D = 1000 +def test_Charge_U1_U1_equals(): + D = 3000 B = 5 q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) @@ -539,12 +554,30 @@ def test_Charge_U1_U1_eq(): ] target_p = [np.random.randint(-B // 2 - 2, B // 2 + 3)] target_charge = [target_q, target_p] + assert np.all((Q.equals(target_charge)) == np.logical_and.reduce( + [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + + +def test_Charge_U1_U1_eq(): + D = 3000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + p1 = np.random.randint(-B // 2 - 2, B // 2 + 3, D).astype(np.int16) + Q = ChargeCollection(charges=[U1Charge([q1, q2]), U1Charge(p1)]) + target_q = [ + np.random.randint(-B // 2, B // 2 + 1), + np.random.randint(-B // 2 - 1, B // 2 + 2) + ] + target_q_shifted = np.left_shift(target_q[0], 16) + target_q[1] + target_p = [np.random.randint(-B // 2 - 2, B // 2 + 3)] + target_charge = [target_q_shifted, target_p] assert np.all((Q == target_charge) == np.logical_and.reduce( [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) -def test_Charge_Z2_Z2_eq(): - D = 1000 +def test_Charge_Z2_Z2_equals(): + D = 3000 q1 = np.random.randint(0, 2, D).astype(np.int8) q2 = np.random.randint(0, 2, D).astype(np.int8) p1 = np.random.randint(0, 2, D).astype(np.int8) @@ -552,12 +585,26 @@ def test_Charge_Z2_Z2_eq(): target_q = [np.random.randint(0, 2), np.random.randint(0, 2)] target_p = [np.random.randint(0, 2)] target_charge = [target_q, target_p] + assert np.all((Q.equals(target_charge)) == np.logical_and.reduce( + [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + + +def test_Charge_Z2_Z2_eq(): + D = 3000 + q1 = np.random.randint(0, 2, D).astype(np.int8) + q2 = np.random.randint(0, 2, D).astype(np.int8) + p1 = np.random.randint(0, 2, D).astype(np.int8) + Q = ChargeCollection(charges=[Z2Charge([q1, q2]), Z2Charge(p1)]) + target_q = [np.random.randint(0, 2), np.random.randint(0, 2)] + target_q_shifted = np.left_shift(target_q[0], 8) + target_q[1] + target_p = [np.random.randint(0, 2)] + target_charge = [target_q_shifted, target_p] assert np.all((Q == target_charge) == np.logical_and.reduce( [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) -def test_Charge_U1_Z2_eq(): - D = 1000 +def test_Charge_U1_Z2_equals(): + D = 3000 B = 5 q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) @@ -569,26 +616,86 @@ def test_Charge_U1_Z2_eq(): ] target_p = [np.random.randint(0, 2)] target_charge = [target_q, target_p] + assert np.all((Q.equals(target_charge)) == np.logical_and.reduce( + [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + + +def test_Charge_U1_Z2_eq(): + D = 3000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + p1 = np.random.randint(0, 2, D).astype(np.int8) + Q = ChargeCollection(charges=[U1Charge([q1, q2]), Z2Charge(p1)]) + target_q = [ + np.random.randint(-B // 2, B // 2 + 1), + np.random.randint(-B // 2 - 1, B // 2 + 2) + ] + target_q_shifted = np.left_shift(target_q[0], 16) + target_q[1] + target_p = [np.random.randint(0, 2)] + target_charge = [target_q_shifted, target_p] assert np.all((Q == target_charge) == np.logical_and.reduce( [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) def test_Charge_U1_U1_unique(): - D = 1000 + D = 3000 B = 5 q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) p1 = np.random.randint(-B // 2 - 2, B // 2 + 3, D).astype(np.int16) Q = ChargeCollection(charges=[U1Charge([q1, q2]), U1Charge(p1)]) expected = np.unique( - np.stack([Q[0].charges, Q[1].charges], axis=1), + np.stack([Q.charges[0].charges, Q.charges[1].charges], axis=1), + return_index=True, + return_inverse=True, + return_counts=True, + axis=0) + actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) + assert np.all(actual[0].charges[0].charges == expected[0][:, 0]) + assert np.all(actual[0].charges[1].charges == expected[0][:, 1]) + assert np.all(actual[1] == expected[1]) + assert np.all(actual[2] == expected[2]) + assert np.all(actual[3] == expected[3]) + + +def test_Charge_Z2_Z2_unique(): + D = 3000 + B = 5 + q1 = np.random.randint(0, 2, D).astype(np.int8) + q2 = np.random.randint(0, 2, D).astype(np.int8) + p1 = np.random.randint(0, 2, D).astype(np.int8) + Q = ChargeCollection(charges=[Z2Charge([q1, q2]), Z2Charge(p1)]) + expected = np.unique( + np.stack([Q.charges[0].charges, Q.charges[1].charges], axis=1), + return_index=True, + return_inverse=True, + return_counts=True, + axis=0) + actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) + assert np.all(actual[0].charges[0].charges == expected[0][:, 0]) + assert np.all(actual[0].charges[1].charges == expected[0][:, 1]) + assert np.all(actual[1] == expected[1]) + assert np.all(actual[2] == expected[2]) + assert np.all(actual[3] == expected[3]) + + +def test_Charge_U1_Z2_unique(): + D = 3000 + B = 5 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + p1 = np.random.randint(0, 2, D).astype(np.int8) + Q = ChargeCollection(charges=[U1Charge([q1, q2]), Z2Charge(p1)]) + expected = np.unique( + np.stack([Q.charges[0].charges, Q.charges[1].charges], axis=1), return_index=True, return_inverse=True, return_counts=True, axis=0) actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) - assert np.all(actual[0][0].charges == expected[0][:, 0]) - assert np.all(actual[0][1].charges == expected[0][:, 1]) + assert np.all(actual[0].charges[0].charges == expected[0][:, 0]) + assert np.all(actual[0].charges[1].charges == expected[0][:, 1]) assert np.all(actual[1] == expected[1]) assert np.all(actual[2] == expected[2]) assert np.all(actual[3] == expected[3]) From 729fe6c7eadd2d07a0c09bd4569cf69326795578 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 10:47:56 -0500 Subject: [PATCH 113/212] added equals() for comparing with unshifted target charges __eq__ now only compares shifted target charges --- tensornetwork/block_tensor/charge.py | 54 +++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 3b86646c2..459488b13 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -20,6 +20,7 @@ # pylint: disable=line-too-long from tensornetwork.backends import backend_factory import copy +import warnings from typing import List, Union, Any, Optional, Tuple, Text @@ -86,6 +87,9 @@ def __matmul__(self, other: "BaseCharge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") + def __getitem__(self, n: int) -> "BaseCharge": + return self.charges[n] + @property def num_symmetries(self): return len(self.shifts) @@ -147,7 +151,18 @@ def unique(self, return tuple([out] + [result[n] for n in range(1, len(result))]) - def __eq__(self, target_charges): + def equals(self, target_charges): + """ + Find indices where `BaseCharge` equals `target_charges`. + `target_charges` has to be an array of the same lenghts + as `BaseCharge.shifts`, containing one integer per symmetry of + `BaseCharge` + Args: + target_charges: np.ndarray of integers encoding charges. + Returns: + np.ndarray: Boolean array with `True` where `BaseCharge` equals + `target_charges` and `False` everywhere else. + """ if len(target_charges) != len(self.shifts): raise ValueError("len(target_charges) = {} is different " "from len(shifts) = {}".format( @@ -159,6 +174,19 @@ def __eq__(self, target_charges): ]) return self.charges == target + def __eq__(self, target): + """ + Find indices where `BaseCharge` equals `target_charges`. + `target` is a single integer encoding all symmetries of + `BaseCharge` + Args: + target: integerger encoding charges. + Returns: + np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals + `target` and `False` everywhere else. + """ + return self.charges == target + class U1Charge(BaseCharge): """ @@ -295,9 +323,12 @@ def __init__(self, itemsizes = [c.dtype.itemsize for c in charges] if not np.all([i == 1 for i in itemsizes]): # martin: This error could come back at us, but I'll leave it for now - raise ValueError("Z2 charges can be entirely stored in " - "np.int8, but found dtypes = {}".format( - [c.dtype for c in charges])) + warnings.warn( + "Z2 charges can be entirely stored in " + "np.int8, but found dtypes = {}. Converting to np.int8.".format( + [c.dtype for c in charges])) + + charges = [c.astype(np.int8) for c in charges] super().__init__(charges, shifts) @@ -417,10 +448,7 @@ def __init__(self, charges: List[BaseCharge]) -> None: self.charges = charges def __getitem__(self, n: int) -> BaseCharge: - return self.charges[n] - - def __setitem__(self, n: int, val: BaseCharge) -> None: - self.charges[n] = val + return np.asarray([c.charges[n] for c in self.charges]) def __add__(self, other: "Charge") -> "Charge": """ @@ -509,6 +537,16 @@ def unique( out = ChargeCollection(charges) return tuple([out] + [result[n] for n in range(1, len(result))]) + def equals(self, target_charges): + if len(target_charges) != len(self.charges): + raise ValueError( + "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" + .format(len(target_charges), len(self.charges))) + return np.logical_and.reduce([ + self.charges[n].equals(target_charges[n]) + for n in range(len(target_charges)) + ]) + def __eq__(self, target_charges): if len(target_charges) != len(self.charges): raise ValueError( From a965e58ed98813bd550f0e828b86d9f1948c4ed2 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 10:58:32 -0500 Subject: [PATCH 114/212] added typing --- tensornetwork/block_tensor/charge.py | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 459488b13..ebdbd6797 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -55,7 +55,6 @@ def __init__(self, raise ValueError("If `shifts` is passed, only a single charge array " "can be passed. Got len(charges) = {}".format( len(charges))) - if shifts is None: dtype = np.int8 if np.sum(self._itemsizes) > 1: @@ -97,8 +96,9 @@ def num_symmetries(self): def __len__(self) -> int: return len(self.charges) - def __repr__(self) -> str: - raise NotImplementedError("`__repr__` is not implemented for `BaseCharge`") + def __repr__(self): + return str(type(self)) + '\nshifts: ' + self.shifts.__repr__( + ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' @property def dual_charges(self) -> np.ndarray: @@ -151,7 +151,7 @@ def unique(self, return tuple([out] + [result[n] for n in range(1, len(result))]) - def equals(self, target_charges): + def equals(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. `target_charges` has to be an array of the same lenghts @@ -174,7 +174,7 @@ def equals(self, target_charges): ]) return self.charges == target - def __eq__(self, target): + def __eq__(self, target: int) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. `target` is a single integer encoding all symmetries of @@ -258,10 +258,6 @@ def __sub__(self, other: "U1Charge") -> "U1Charge": len(self.charges) * len(other.charges)) return U1Charge(charges=[fused], shifts=self.shifts) - def __repr__(self): - return 'U1-charge: \n' + 'shifts: ' + self.shifts.__repr__( - ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' - def __matmul__(self, other: Union["U1Charge", "U1Charge"]) -> "U1Charge": itemsize = np.sum(self._itemsizes + other._itemsizes) if itemsize > 8: @@ -416,15 +412,11 @@ def dual_charges(self): #Z2 charges are self-dual return self.charges - def __repr__(self): - return 'Z2-charge: \n' + 'shifts: ' + self.shifts.__repr__( - ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' - - def __eq__(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: + def equals(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: if not np.all(np.isin(target_charges, np.asarray([0, 1]))): raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( np.unique(target_charges))) - return super().__eq__(target_charges) + return super().equals(target_charges) class ChargeCollection: @@ -536,9 +528,9 @@ def unique( charges.append(obj) out = ChargeCollection(charges) return tuple([out] + [result[n] for n in range(1, len(result))]) - - def equals(self, target_charges): - if len(target_charges) != len(self.charges): + + def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: + if len(target_charges) != len(self.charges): raise ValueError( "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" .format(len(target_charges), len(self.charges))) From 17224b10c9900358d832aa5566948c293772b27b Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 11:38:01 -0500 Subject: [PATCH 115/212] ChargeCollection.__repr__ modified --- tensornetwork/block_tensor/charge.py | 36 +++++++++++++++++++--------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index ebdbd6797..fbb6e907b 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -146,10 +146,14 @@ def unique(self, return_index=return_index, return_inverse=return_inverse, return_counts=return_counts) - out = self.__new__(type(self)) - out.__init__([result[0]], self.shifts) - - return tuple([out] + [result[n] for n in range(1, len(result))]) + if not (return_index or return_inverse or return_counts): + out = self.__new__(type(self)) + out.__init__([result], self.shifts) + return out + else: + out = self.__new__(type(self)) + out.__init__([result[0]], self.shifts) + return tuple([out] + [result[n] for n in range(1, len(result))]) def equals(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: """ @@ -464,12 +468,17 @@ def __sub__(self, other: "Charge") -> "Charge": return ChargeCollection( [c1 - c2 for c1, c2 in zip(self.charges, other.charges)]) + def __repr__(self): + text = str(type(self)) + '\n ' + for n in range(len(self.charges)): + tmp = self.charges[n].__repr__() + tmp = tmp.replace('\n', '\n\t') + text += (tmp + '\n\t') + return text + def __len__(self): return len(self.charges[0]) - def __repr__(self): - return self.charges.__repr__() - def __mul__(self, number: Union[bool, int]) -> "Charge": if number not in (True, False, 0, 1, -1): raise ValueError( @@ -520,17 +529,22 @@ def unique( return_inverse=return_inverse, return_counts=return_counts, axis=0) - charges = [] + if not (return_index or return_inverse or return_counts): + for n in range(len(self.charges)): + obj = self.charges[n].__new__(type(self.charges[n])) + obj.__init__(charges=[result[:, n]], shifts=self.charges[n].shifts) + charges.append(obj) + return ChargeCollection(charges) for n in range(len(self.charges)): obj = self.charges[n].__new__(type(self.charges[n])) obj.__init__(charges=[result[0][:, n]], shifts=self.charges[n].shifts) charges.append(obj) - out = ChargeCollection(charges) + out = ChargeCollection(charges) return tuple([out] + [result[n] for n in range(1, len(result))]) - + def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: - if len(target_charges) != len(self.charges): + if len(target_charges) != len(self.charges): raise ValueError( "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" .format(len(target_charges), len(self.charges))) From b9c3de0e18b54588be28395c1514378af93884ff Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 14:35:39 -0500 Subject: [PATCH 116/212] *** empty log message *** --- .../block_tensor/block_tensor_new.py | 151 +++++++----------- 1 file changed, 58 insertions(+), 93 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 39f66e725..91232c906 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -18,8 +18,8 @@ import numpy as np #from tensornetwork.block_tensor.lookup import lookup # pylint: disable=line-too-long -from tensornetwork.block_tensor.index_new import Index, fuse_index_pair, split_index, -from tensornetwork.block_tensor.charges import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, ChargeCollection +from tensornetwork.block_tensor.index_new import Index, fuse_index_pair, split_index +from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, ChargeCollection import numpy as np import scipy as sp import itertools @@ -58,7 +58,7 @@ def compute_fused_charge_degeneracies( flows: List[Union[bool, int]]) -> Dict: """ For a list of charges, compute all possible fused charges resulting - from fusing `charges`, together with their respective degeneracyn + from fusing `charges`, together with their respective degeneracies Args: charges: List of np.ndarray of int, one for each leg of the underlying tensor. Each np.ndarray `charges[leg]` @@ -69,42 +69,51 @@ def compute_fused_charge_degeneracies( of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. Returns: - dict: Mapping fused charges (int) to degeneracies (int) + dict: Mapping fused charges to degeneracies """ if len(charges) == 1: - return charges[0].unique(return_counts=True) + return (flows[0] * charges[0]).unique(return_counts=True) # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = charges[0].unique( - return_counts=True) - #multiply the flow into the charges of first leg - accumulated_charges *= flows[0] + accumulated_charges, accumulated_degeneracies = (flows[0] * + charges[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor - leg_charges, leg_degeneracies = np.unique(charges[n], return_counts=True) + print(n, len(accumulated_charges)) + t1 = time.time() + leg_charges, leg_degeneracies = charges[n].unique(return_counts=True) + print('unique', time.time() - t1) #fuse the unique charges #Note: entries in `fused_charges` are not unique anymore. #flow1 = 1 because the flow of leg 0 has already been #mulitplied above - fused_charges = fuse_charge_pair( - q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n]) + t1 = time.time() + fused_charges = accumulated_charges + leg_charges * flows[n] + print('fusing charges', time.time() - t1) #compute the degeneracies of `fused_charges` charges #`fused_degeneracies` is a list of degeneracies such that # `fused_degeneracies[n]` is the degeneracy of of # charge `c = fused_charges[n]`. + t1 = time.time() fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, leg_degeneracies) - #compute the new degeneracies resulting from fusing - #`accumulated_charges` and `leg_charges_2` - accumulated_charges = np.unique(fused_charges) + print('fusing degeneracies', time.time() - t1) + + t1 = time.time() + accumulated_charges = fused_charges.unique() + print('second unique', time.time() - t1) accumulated_degeneracies = np.empty( len(accumulated_charges), dtype=np.int64) + print(len(accumulated_charges)) + fused_charges == accumulated_charges[n] for n in range(len(accumulated_charges)): accumulated_degeneracies[n] = np.sum( fused_degeneracies[fused_charges == accumulated_charges[n]]) + return accumulated_charges, accumulated_degeneracies @@ -134,52 +143,6 @@ def compute_num_nonzero(charges: List[np.ndarray], return accumulated_degeneracies[accumulated_charges == 0][0] -def compute_nonzero_block_shapes(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: - """ - Compute the blocks and their respective shapes of a symmetric tensor, - given its meta-data. - Args: - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - Returns: - dict: Dictionary mapping a tuple of charges to a shape tuple. - Each element corresponds to a non-zero valued block of the tensor. - """ - #FIXME: this routine is slow - _check_flows(flows) - degeneracies = [] - unique_charges = [] - rank = len(charges) - #find the unique quantum numbers and their degeneracy on each leg - for leg in range(rank): - c, d = np.unique(charges[leg], return_counts=True) - unique_charges.append(c) - degeneracies.append(dict(zip(c, d))) - - #find all possible combination of leg charges c0, c1, ... - #(with one charge per leg 0, 1, ...) - #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0 - charge_combinations = list( - itertools.product(*[ - unique_charges[leg] * flows[leg] - for leg in range(len(unique_charges)) - ])) - net_charges = np.array([np.sum(c) for c in charge_combinations]) - zero_idxs = np.nonzero(net_charges == 0)[0] - charge_shape_dict = {} - for idx in zero_idxs: - c = charge_combinations[idx] - shapes = [degeneracies[leg][flows[leg] * c[leg]] for leg in range(rank)] - charge_shape_dict[c] = shapes - return charge_shape_dict - - def find_diagonal_sparse_blocks(data: np.ndarray, row_charges: List[Union[List, np.ndarray]], column_charges: List[Union[List, np.ndarray]], @@ -234,6 +197,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly + #`compute_fused_charge_degeneracies` multiplies flows into the column_charges unique_column_charges, column_dims = compute_fused_charge_degeneracies( column_charges, column_flows) #convenience container for storing the degeneracies of each @@ -243,15 +207,14 @@ def find_diagonal_sparse_blocks(data: np.ndarray, if len(row_charges) > 1: left_row_charges, right_row_charges, _ = _find_best_partition( row_charges, row_flows) - unique_left = np.unique(left_row_charges) - unique_right = np.unique(right_row_charges) - unique_row_charges = np.unique( - fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) + unique_left = left_row_charges.unique() + unique_right = right_row_charges.unique() + unique_row_charges = (unique_left + unique_right).unique() #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - + concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) + tmp_unique, counts = concatenated.unique(return_counts=True) + common_charges = tmp_unique[counts == 2] row_locations = find_sparse_positions( left_charges=left_row_charges, left_flow=1, @@ -439,11 +402,11 @@ def find_diagonal_sparse_blocks_depreacated_1( return blocks -def find_diagonal_sparse_blocks_deprecated_0( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True + ) -> Dict: """ Deprecated: this version is about 2 times slower (worst case) than the current used implementation @@ -545,11 +508,11 @@ def find_diagonal_sparse_blocks_deprecated_0( return blocks -def find_diagonal_sparse_blocks_column_major( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_column_major(data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True + ) -> Dict: """ Deprecated @@ -764,8 +727,9 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + right_locations[ - (target_charge - left_flow * c) * right_flow]) + indices.append(n * len_right_charges + + right_locations[(target_charge - left_flow * c) * + right_flow]) return np.concatenate(indices) @@ -851,8 +815,9 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == - (target_charge - left_flow * left_charge) * right_flow)[0] + tmp_relevant_right_charges == (target_charge - + left_flow * left_charge) * + right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1239,9 +1204,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix( + (np.arange(len(self.data)), + (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1266,8 +1231,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_intersect1d(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -1358,8 +1323,8 @@ def transpose_intersect1d( # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_searchsorted(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Deprecated: @@ -1539,8 +1504,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True + ) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1575,8 +1540,8 @@ def get_diagonal_blocks_deprecated_1( column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True + ) -> Dict: """ Deprecated From 656e2d5413cf967a7b67906c0cff70ef23fcab33 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 14:35:45 -0500 Subject: [PATCH 117/212] this commit is not working --- tensornetwork/block_tensor/block_tensor.py | 54 ++++++++--------- tensornetwork/block_tensor/charge.py | 68 +++++++++++++++++++--- tensornetwork/block_tensor/charge_test.py | 12 ++-- 3 files changed, 95 insertions(+), 39 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 42eaf96c9..454d03a6d 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -97,7 +97,7 @@ def compute_fused_charge_degeneracies(charges: List[np.ndarray], dict: Mapping fused charges (int) to degeneracies (int) """ if len(charges) == 1: - return np.unique(charges[0], return_counts=True) + return np.unique(flows[0] * charges[0], return_counts=True) # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". @@ -464,11 +464,11 @@ def find_diagonal_sparse_blocks_depreacated_1( return blocks -def find_diagonal_sparse_blocks_deprecated_0( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True + ) -> Dict: """ Deprecated: this version is about 2 times slower (worst case) than the current used implementation @@ -570,11 +570,11 @@ def find_diagonal_sparse_blocks_deprecated_0( return blocks -def find_diagonal_sparse_blocks_column_major( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def find_diagonal_sparse_blocks_column_major(data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True + ) -> Dict: """ Deprecated @@ -789,8 +789,9 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + right_locations[ - (target_charge - left_flow * c) * right_flow]) + indices.append(n * len_right_charges + + right_locations[(target_charge - left_flow * c) * + right_flow]) return np.concatenate(indices) @@ -876,8 +877,9 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == - (target_charge - left_flow * left_charge) * right_flow)[0] + tmp_relevant_right_charges == (target_charge - + left_flow * left_charge) * + right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1264,9 +1266,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix( + (np.arange(len(self.data)), + (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1291,8 +1293,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_intersect1d(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -1383,8 +1385,8 @@ def transpose_intersect1d( # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_searchsorted(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Deprecated: @@ -1564,8 +1566,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True + ) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1600,8 +1602,8 @@ def get_diagonal_blocks_deprecated_1( column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True + ) -> Dict: """ Deprecated diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index fbb6e907b..285d15a44 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -86,8 +86,11 @@ def __matmul__(self, other: "BaseCharge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") - def __getitem__(self, n: int) -> "BaseCharge": - return self.charges[n] + def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": + charges = self.charges[n] + obj = self.__new__(type(self)) + obj.__init__(charges=[charges], shifts=self.shifts) + return obj @property def num_symmetries(self): @@ -191,6 +194,28 @@ def __eq__(self, target: int) -> np.ndarray: """ return self.charges == target + def concatenate(self, others: Union["BaseCharge", List["BaseCharge"]]): + """ + Concatenate `self.charges` with `others.charges`. + Args: + others: List of `BaseCharge` objects. + Returns: + BaseCharge: The concatenated charges. + """ + if isinstance(others, type(self)): + others = [others] + for o in others: + if not np.all(self.shifts == o.shifts): + raise ValueError( + "Cannot fuse charges with different shifts {} and {}".format( + self.shifts, o.shifts)) + + charges = np.concatenate( + [self.charges] + [o.charges for o in others], axis=0) + out = self.__new__(type(self)) + out.__init__([charges], self.shifts) + return out + class U1Charge(BaseCharge): """ @@ -444,7 +469,12 @@ def __init__(self, charges: List[BaseCharge]) -> None: self.charges = charges def __getitem__(self, n: int) -> BaseCharge: - return np.asarray([c.charges[n] for c in self.charges]) + if not hasattr(self, '_stacked_charges'): + self._stacked_charges = np.stack([c.charges for c in self.charges], + axis=1) + return self._stacked_charges[n, :] + + #return np.asarray([c.charges[n] for c in self.charges]) def __add__(self, other: "Charge") -> "Charge": """ @@ -558,10 +588,34 @@ def __eq__(self, target_charges): raise ValueError( "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" .format(len(target_charges), len(self.charges))) - return np.logical_and.reduce([ - self.charges[n] == target_charges[n] - for n in range(len(target_charges)) - ]) + if not hasattr(self, '_stacked_charges'): + self._stacked_charges = np.stack([c.charges for c in self.charges], + axis=1) + target = np.reshape(target_charges, (1, len(target_charges))) + return np.logical_and.reduce(self._stacked_charges == target, axis=1) + # return np.logical_and.reduce([ + # self.charges[n] == target_charges[n] + # for n in range(len(target_charges)) + # ]) + + def concatenate(self, + others: Union["ChargeCollection", List["ChargeCollection"]]): + """ + Concatenate `self.charges` with `others.charges`. + Args: + others: List of `BaseCharge` objects. + Returns: + BaseCharge: The concatenated charges. + """ + if isinstance(others, type(self)): + others = [others] + + charges = [ + self.charges[n].concatenate([o.charges[n] + for o in others]) + for n in range(len(self.charges)) + ] + return ChargeCollection(charges) def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index 123831547..df0266db8 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -570,10 +570,10 @@ def test_Charge_U1_U1_eq(): np.random.randint(-B // 2 - 1, B // 2 + 2) ] target_q_shifted = np.left_shift(target_q[0], 16) + target_q[1] - target_p = [np.random.randint(-B // 2 - 2, B // 2 + 3)] + target_p = np.random.randint(-B // 2 - 2, B // 2 + 3) target_charge = [target_q_shifted, target_p] assert np.all((Q == target_charge) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + [q1 == target_q[0], q2 == target_q[1], p1 == target_p])) def test_Charge_Z2_Z2_equals(): @@ -597,10 +597,10 @@ def test_Charge_Z2_Z2_eq(): Q = ChargeCollection(charges=[Z2Charge([q1, q2]), Z2Charge(p1)]) target_q = [np.random.randint(0, 2), np.random.randint(0, 2)] target_q_shifted = np.left_shift(target_q[0], 8) + target_q[1] - target_p = [np.random.randint(0, 2)] + target_p = np.random.randint(0, 2) target_charge = [target_q_shifted, target_p] assert np.all((Q == target_charge) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + [q1 == target_q[0], q2 == target_q[1], p1 == target_p])) def test_Charge_U1_Z2_equals(): @@ -632,10 +632,10 @@ def test_Charge_U1_Z2_eq(): np.random.randint(-B // 2 - 1, B // 2 + 2) ] target_q_shifted = np.left_shift(target_q[0], 16) + target_q[1] - target_p = [np.random.randint(0, 2)] + target_p = np.random.randint(0, 2) target_charge = [target_q_shifted, target_p] assert np.all((Q == target_charge) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) + [q1 == target_q[0], q2 == target_q[1], p1 == target_p])) def test_Charge_U1_U1_unique(): From bb2966d1a6ae8e265aec4161efc8b0bdbb2ffb81 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 16:47:16 -0500 Subject: [PATCH 118/212] fix bug in __len__ fix various bugs in __eq__ and __getitem__ --- tensornetwork/block_tensor/charge.py | 87 ++++++++++++++++++++++------ 1 file changed, 69 insertions(+), 18 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 285d15a44..f50760793 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -88,8 +88,9 @@ def __matmul__(self, other: "BaseCharge") -> "Charge": def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": charges = self.charges[n] + obj = self.__new__(type(self)) - obj.__init__(charges=[charges], shifts=self.shifts) + obj.__init__(charges=[np.asarray([charges])], shifts=self.shifts) return obj @property @@ -97,7 +98,7 @@ def num_symmetries(self): return len(self.shifts) def __len__(self) -> int: - return len(self.charges) + return np.prod(self.charges.shape) def __repr__(self): return str(type(self)) + '\nshifts: ' + self.shifts.__repr__( @@ -181,7 +182,7 @@ def equals(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: ]) return self.charges == target - def __eq__(self, target: int) -> np.ndarray: + def __eq__(self, target: Union[int, "BaseCharge"]) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. `target` is a single integer encoding all symmetries of @@ -192,7 +193,9 @@ def __eq__(self, target: int) -> np.ndarray: np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals `target` and `False` everywhere else. """ - return self.charges == target + if isinstance(target, (np.integer, int)): + return self.charges == target + return self.charges == target.charges def concatenate(self, others: Union["BaseCharge", List["BaseCharge"]]): """ @@ -216,6 +219,16 @@ def concatenate(self, others: Union["BaseCharge", List["BaseCharge"]]): out.__init__([charges], self.shifts) return out + @property + def dtype(self): + return self.charges.dtype + + @property + def zero_charge(self): + obj = self.__new__(type(self)) + obj.__init__(charges=[np.asarray([self.dtype.type(0)])], shifts=self.shifts) + return obj + class U1Charge(BaseCharge): """ @@ -468,11 +481,36 @@ def __init__(self, charges: List[BaseCharge]) -> None: self.charges = charges - def __getitem__(self, n: int) -> BaseCharge: + def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: if not hasattr(self, '_stacked_charges'): self._stacked_charges = np.stack([c.charges for c in self.charges], axis=1) - return self._stacked_charges[n, :] + + array = self._stacked_charges[n, :] + charges = [] + if len(array) == 0: + for n in range(len(self.charges)): + charge = self.charges[n].__new__(type(self.charges[n])) + charge.__init__( + charges=[np.empty(0, dtype=self.charges[n].dtype)], + shifts=self.charges[n].shifts) + charges.append(charge) + + obj = self.__new__(type(self)) + obj.__init__(charges=charges) + return obj + + if len(array.shape) == 1: + array = np.expand_dims(array, 1) + + for n in range(len(self.charges)): + charge = self.charges[n].__new__(type(self.charges[n])) + charge.__init__(charges=[array[n, :]], shifts=self.charges[n].shifts) + charges.append(charge) + + obj = self.__new__(type(self)) + obj.__init__(charges=charges) + return obj #return np.asarray([c.charges[n] for c in self.charges]) @@ -503,7 +541,7 @@ def __repr__(self): for n in range(len(self.charges)): tmp = self.charges[n].__repr__() tmp = tmp.replace('\n', '\n\t') - text += (tmp + '\n\t') + text += (tmp + '\n') return text def __len__(self): @@ -583,20 +621,23 @@ def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: for n in range(len(target_charges)) ]) - def __eq__(self, target_charges): - if len(target_charges) != len(self.charges): + def __eq__(self, target_charges: Union[np.ndarray, "ChargeCollection"]): + if isinstance(target_charges, type(self)): + target_charges = np.stack([c.charges for c in target_charges.charges], + axis=1) + + if target_charges.shape[1] != len(self.charges): raise ValueError( "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" .format(len(target_charges), len(self.charges))) if not hasattr(self, '_stacked_charges'): self._stacked_charges = np.stack([c.charges for c in self.charges], axis=1) - target = np.reshape(target_charges, (1, len(target_charges))) - return np.logical_and.reduce(self._stacked_charges == target, axis=1) - # return np.logical_and.reduce([ - # self.charges[n] == target_charges[n] - # for n in range(len(target_charges)) - # ]) + if len(target_charges.shape) == 1: + target_charges = np.expand_dims(target_charges, 0) + + return np.logical_and.reduce( + self._stacked_charges == target_charges, axis=1) def concatenate(self, others: Union["ChargeCollection", List["ChargeCollection"]]): @@ -617,10 +658,20 @@ def concatenate(self, ] return ChargeCollection(charges) + @property + def dtype(self): + return np.result_type(*[c.dtype for c in self.charges]) + + @property + def zero_charge(self): + obj = self.__new__(type(self)) + obj.__init__(charges=[c.zero_charge for c in self.charges]) + return obj + -def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]] - ) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", From 84ded263622f5bb5d0fb6cc7554746be877a690b Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 16:47:35 -0500 Subject: [PATCH 119/212] working in implemetation of multiple charges --- .../block_tensor/block_tensor_new.py | 80 ++++++++----------- 1 file changed, 34 insertions(+), 46 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 91232c906..143c436de 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -69,47 +69,35 @@ def compute_fused_charge_degeneracies( of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. Returns: - dict: Mapping fused charges to degeneracies + Union[BaseCharge, ChargeCollection]: The unique fused charges. + np.ndarray of integers: The degeneracies of each unqiue fused charge. """ if len(charges) == 1: return (flows[0] * charges[0]).unique(return_counts=True) # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (flows[0] * - charges[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + flows[0] * charges[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor - print(n, len(accumulated_charges)) - t1 = time.time() leg_charges, leg_degeneracies = charges[n].unique(return_counts=True) - print('unique', time.time() - t1) - #fuse the unique charges #Note: entries in `fused_charges` are not unique anymore. #flow1 = 1 because the flow of leg 0 has already been #mulitplied above - t1 = time.time() fused_charges = accumulated_charges + leg_charges * flows[n] - print('fusing charges', time.time() - t1) #compute the degeneracies of `fused_charges` charges #`fused_degeneracies` is a list of degeneracies such that # `fused_degeneracies[n]` is the degeneracy of of # charge `c = fused_charges[n]`. - t1 = time.time() fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, leg_degeneracies) - print('fusing degeneracies', time.time() - t1) - - t1 = time.time() accumulated_charges = fused_charges.unique() - print('second unique', time.time() - t1) accumulated_degeneracies = np.empty( len(accumulated_charges), dtype=np.int64) - print(len(accumulated_charges)) - fused_charges == accumulated_charges[n] + for n in range(len(accumulated_charges)): accumulated_degeneracies[n] = np.sum( fused_degeneracies[fused_charges == accumulated_charges[n]]) @@ -136,11 +124,13 @@ def compute_num_nonzero(charges: List[np.ndarray], """ accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( charges, flows) - if len(np.nonzero(accumulated_charges == 0)[0]) == 0: + res = accumulated_charges == accumulated_charges.zero_charge + + if len(np.nonzero(res)[0]) == 0: raise ValueError( "given leg-charges `charges` and flows `flows` are incompatible " "with a symmetric tensor") - return accumulated_degeneracies[accumulated_charges == 0][0] + return accumulated_degeneracies[res][0] def find_diagonal_sparse_blocks(data: np.ndarray, @@ -402,11 +392,11 @@ def find_diagonal_sparse_blocks_depreacated_1( return blocks -def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True - ) -> Dict: +def find_diagonal_sparse_blocks_deprecated_0( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Deprecated: this version is about 2 times slower (worst case) than the current used implementation @@ -508,11 +498,11 @@ def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, return blocks -def find_diagonal_sparse_blocks_column_major(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True - ) -> Dict: +def find_diagonal_sparse_blocks_column_major( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Deprecated @@ -727,9 +717,8 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + - right_locations[(target_charge - left_flow * c) * - right_flow]) + indices.append(n * len_right_charges + right_locations[ + (target_charge - left_flow * c) * right_flow]) return np.concatenate(indices) @@ -815,9 +804,8 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == (target_charge - - left_flow * left_charge) * - right_flow)[0] + tmp_relevant_right_charges == + (target_charge - left_flow * left_charge) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1204,9 +1192,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix( - (np.arange(len(self.data)), - (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1231,8 +1219,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -1323,8 +1311,8 @@ def transpose_intersect1d(self, order: Union[List[int], np.ndarray] # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Deprecated: @@ -1504,8 +1492,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1540,8 +1528,8 @@ def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: """ Deprecated From be7f790ee31d0b2663d5444bca76ee2c1122f29f Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 21:17:45 -0500 Subject: [PATCH 120/212] bugfix in ChargeCollection.__getitem__ --- tensornetwork/block_tensor/charge.py | 29 +++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index f50760793..c53d80837 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -88,9 +88,10 @@ def __matmul__(self, other: "BaseCharge") -> "Charge": def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": charges = self.charges[n] - + if isinstance(n, (np.integer, int)): + charges = np.asarray([charges]) obj = self.__new__(type(self)) - obj.__init__(charges=[np.asarray([charges])], shifts=self.shifts) + obj.__init__(charges=[charges], shifts=self.shifts) return obj @property @@ -476,7 +477,7 @@ def __init__(self, charges: List[BaseCharge]) -> None: for n in range(len(charges)): if not isinstance(charges[n], BaseCharge): raise TypeError( - "`Charge` can only be initialized with a list of `BaseCharge`. Found {} instead" + "`ChargeCollection` can only be initialized with a list of `BaseCharge`. Found {} instead" .format([type(charges[n]) for n in range(len(charges))])) self.charges = charges @@ -487,8 +488,18 @@ def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: axis=1) array = self._stacked_charges[n, :] + if len(array.shape) > 2: + raise ValueError( + 'array.shape = {} is larger than 2! this is a bug!'.format( + len(array.shape))) + if len(array.shape) == 2: + if array.shape[1] == 1: + array = np.squeeze(array, axis=1) + if len(array.shape) == 0: + array = np.asarray([array]) + charges = [] - if len(array) == 0: + if np.prod(array.shape) == 0: for n in range(len(self.charges)): charge = self.charges[n].__new__(type(self.charges[n])) charge.__init__( @@ -503,9 +514,9 @@ def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: if len(array.shape) == 1: array = np.expand_dims(array, 1) - for n in range(len(self.charges)): - charge = self.charges[n].__new__(type(self.charges[n])) - charge.__init__(charges=[array[n, :]], shifts=self.charges[n].shifts) + for m in range(len(self.charges)): + charge = self.charges[m].__new__(type(self.charges[m])) + charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) charges.append(charge) obj = self.__new__(type(self)) @@ -518,7 +529,7 @@ def __add__(self, other: "Charge") -> "Charge": """ Fuse `self` with `other`. Args: - other: A `Charge` object. + other: A `ChargeCollection` object. Returns: Charge: The result of fusing `self` with `other`. """ @@ -529,7 +540,7 @@ def __sub__(self, other: "Charge") -> "Charge": """ Subtract `other` from `self`. Args: - other: A `Charge` object. + other: A `ChargeCollection` object. Returns: Charge: The result of fusing `self` with `other`. """ From 884cf36f6f3e4db2e3b4e40e5f701e46540882e2 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 21:29:08 -0500 Subject: [PATCH 121/212] adding tests --- .../block_tensor/block_tensor_new_test.py | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 tensornetwork/block_tensor/block_tensor_new_test.py diff --git a/tensornetwork/block_tensor/block_tensor_new_test.py b/tensornetwork/block_tensor/block_tensor_new_test.py new file mode 100644 index 000000000..1cfbb811d --- /dev/null +++ b/tensornetwork/block_tensor/block_tensor_new_test.py @@ -0,0 +1,192 @@ +import numpy as np +import pytest + +from tensornetwork.block_tensor.charge import U1Charge +from tensornetwork.block_tensor.block_tensor_new import find_diagonal_sparse_blocks, compute_num_nonzero + +np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] + + +def test_consistency(): + B = 5 + D = 100 + rank = 4 + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge([q for q in qs[n]])]) for n in range(rank) + ] + flows = [1, 1, 1, -1] + n1 = compute_num_nonzero(charges1, flows) + n2 = compute_num_nonzero(charges2, flows) + n3 = compute_num_nonzero(charges3, flows) + assert n1 == n2 + assert n1 == n3 + + +# @pytest.mark.parametrize("dtype", np_dtypes) +# def test_block_sparse_init(dtype): +# D = 10 #bond dimension +# B = 10 #number of blocks +# rank = 4 +# flows = np.asarray([1 for _ in range(rank)]) +# flows[-2::] = -1 +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) +# for _ in range(rank) +# ] +# indices = [ +# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# num_elements = compute_num_nonzero([i.charges for i in indices], +# [i.flow for i in indices]) +# A = BlockSparseTensor.random(indices=indices, dtype=dtype) +# assert A.dtype == dtype +# for r in range(rank): +# assert A.indices[r].name == 'index{}'.format(r) +# assert A.dense_shape == tuple([D] * rank) +# assert len(A.data) == num_elements + +# def test_find_dense_positions(): +# left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) +# right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) +# target_charge = 0 +# fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) +# dense_positions = find_dense_positions(left_charges, 1, right_charges, 1, +# target_charge) +# np.testing.assert_allclose(dense_positions, +# np.nonzero(fused_charges == target_charge)[0]) + +# def test_find_dense_positions_2(): +# D = 40 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# rank = 4 +# flows = np.asarray([1 for _ in range(rank)]) +# flows[-2::] = -1 +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# for _ in range(rank) +# ] +# indices = [ +# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# n1 = compute_num_nonzero([i.charges for i in indices], +# [i.flow for i in indices]) +# row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], +# [1 for _ in range(rank // 2)]) +# column_charges = fuse_charges( +# [indices[n].charges for n in range(rank // 2, rank)], +# [1 for _ in range(rank // 2, rank)]) + +# i01 = indices[0] * indices[1] +# i23 = indices[2] * indices[3] +# positions = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) +# assert len(positions) == n1 + +# def test_find_sparse_positions(): +# D = 40 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# rank = 4 +# flows = np.asarray([1 for _ in range(rank)]) +# flows[-2::] = -1 +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# for _ in range(rank) +# ] +# indices = [ +# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# n1 = compute_num_nonzero([i.charges for i in indices], +# [i.flow for i in indices]) +# row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], +# [1 for _ in range(rank // 2)]) +# column_charges = fuse_charges( +# [indices[n].charges for n in range(rank // 2, rank)], +# [1 for _ in range(rank // 2, rank)]) + +# i01 = indices[0] * indices[1] +# i23 = indices[2] * indices[3] +# unique_row_charges = np.unique(i01.charges) +# unique_column_charges = np.unique(i23.charges) +# common_charges = np.intersect1d( +# unique_row_charges, -unique_column_charges, assume_unique=True) +# blocks = find_sparse_positions( +# i01.charges, 1, i23.charges, 1, target_charges=[0]) +# assert sum([len(v) for v in blocks.values()]) == n1 +# np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) + +# def test_find_sparse_positions_2(): +# D = 40 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# flows = [1, -1] + +# rank = len(flows) +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# for _ in range(rank) +# ] +# indices = [ +# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# i1, i2 = indices +# common_charges = np.intersect1d(i1.charges, i2.charges) +# row_locations = find_sparse_positions( +# left_charges=i1.charges, +# left_flow=flows[0], +# right_charges=i2.charges, +# right_flow=flows[1], +# target_charges=common_charges) +# fused = (i1 * i2).charges +# relevant = fused[np.isin(fused, common_charges)] +# for k, v in row_locations.items(): +# np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) + +# def test_get_diagonal_blocks(): +# D = 40 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# rank = 4 +# flows = np.asarray([1 for _ in range(rank)]) +# flows[-2::] = -1 +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# for _ in range(rank) +# ] +# indices = [ +# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# common_charges = np.intersect1d(indices[0].charges, indices[1].charges) +# row_locations = find_sparse_positions( +# left_charges=indices[0].charges, +# left_flow=1, +# right_charges=indices[1].charges, +# right_flow=1, +# target_charges=common_charges) + +# def test_dense_transpose(): +# Ds = [10, 11, 12] #bond dimension +# rank = len(Ds) +# flows = np.asarray([1 for _ in range(rank)]) +# flows[-2::] = -1 +# charges = [np.zeros(Ds[n], dtype=np.int16) for n in range(rank)] +# indices = [ +# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# A = BlockSparseTensor.random(indices=indices, dtype=np.float64) +# B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) +# A.transpose((1, 0, 2)) +# np.testing.assert_allclose(A.data, B.flat) From 8a581f3792a25cec05a9eec283a4947ef0164fe1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 4 Jan 2020 21:51:29 -0500 Subject: [PATCH 122/212] sleep commit --- .../block_tensor/block_tensor_new.py | 19 +++++++++++-------- .../block_tensor/block_tensor_new_test.py | 2 +- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 143c436de..ec541151a 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -192,7 +192,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, column_charges, column_flows) #convenience container for storing the degeneracies of each #column charge - column_degeneracies = dict(zip(unique_column_charges, column_dims)) + #column_degeneracies = dict(zip(unique_column_charges, column_dims)) if len(row_charges) > 1: left_row_charges, right_row_charges, _ = _find_best_partition( @@ -205,6 +205,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) tmp_unique, counts = concatenated.unique(return_counts=True) common_charges = tmp_unique[counts == 2] + return common_charges row_locations = find_sparse_positions( left_charges=left_row_charges, left_flow=1, @@ -722,14 +723,16 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, return np.concatenate(indices) -def find_sparse_positions(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charges: Union[List[int], np.ndarray]) -> Dict: +def find_sparse_positions( + left_charges: List[Union[BaseCharge, ChargeCollection]], left_flow: int, + right_charges: List[Union[BaseCharge, ChargeCollection]], right_flow: int, + target_charges: Union[List[int], np.ndarray]) -> Dict: """ - Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`, - assuming that all elements different from `target_charges` are `0`. + Find the sparse locations of elements (i.e. the index-values within + the SPARSE tensor) in the vector `fused_charges` (resulting from + fusing `left_charges` and `right_charges`) + that have a value of `target_charges`, assuming that all elements + different from `target_charges` are `0`. For example, given ``` left_charges = [-2,0,1,0,0] diff --git a/tensornetwork/block_tensor/block_tensor_new_test.py b/tensornetwork/block_tensor/block_tensor_new_test.py index 1cfbb811d..d6bdb6bac 100644 --- a/tensornetwork/block_tensor/block_tensor_new_test.py +++ b/tensornetwork/block_tensor/block_tensor_new_test.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from tensornetwork.block_tensor.charge import U1Charge +from tensornetwork.block_tensor.charge import U1Charge, ChargeCollection from tensornetwork.block_tensor.block_tensor_new import find_diagonal_sparse_blocks, compute_num_nonzero np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] From 1f8adf59d8f19764fea5b75f660905e11924462c Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 08:50:03 -0500 Subject: [PATCH 123/212] added iterators --- .../block_tensor/block_tensor_new.py | 10 +- tensornetwork/block_tensor/charge.py | 144 +++++++++++------- 2 files changed, 93 insertions(+), 61 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index ec541151a..9f53b9831 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -726,7 +726,7 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, def find_sparse_positions( left_charges: List[Union[BaseCharge, ChargeCollection]], left_flow: int, right_charges: List[Union[BaseCharge, ChargeCollection]], right_flow: int, - target_charges: Union[List[int], np.ndarray]) -> Dict: + target_charges: List[Union[BaseCharge, ChargeCollection]]) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -764,10 +764,10 @@ def find_sparse_positions( #FIXME: this is probably still not optimal _check_flows([left_flow, right_flow]) - target_charges = np.unique(target_charges) - unique_left = np.unique(left_charges) - unique_right = np.unique(right_charges) - fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + target_charges = target_charges.unique() + unique_left = left_charges.unique() + unique_right = right_charges.unique() + fused = left_flow * unique_left + right_flow * unique_right #compute all unique charges that can add up to #target_charges diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index c53d80837..d95b6ae25 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -21,7 +21,7 @@ from tensornetwork.backends import backend_factory import copy import warnings -from typing import List, Union, Any, Optional, Tuple, Text +from typing import List, Union, Any, Optional, Tuple, Text, Iterable def _copy_charges(charges): @@ -87,12 +87,15 @@ def __matmul__(self, other: "BaseCharge") -> "Charge": "`__matmul__` is not implemented for `BaseCharge`") def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": - charges = self.charges[n] - if isinstance(n, (np.integer, int)): - charges = np.asarray([charges]) - obj = self.__new__(type(self)) - obj.__init__(charges=[charges], shifts=self.shifts) - return obj + return self.charges[n] + + # def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": + # charges = self.charges[n] + # if isinstance(n, (np.integer, int)): + # charges = np.asarray([charges]) + # obj = self.__new__(type(self)) + # obj.__init__(charges=[charges], shifts=self.shifts) + # return obj @property def num_symmetries(self): @@ -160,7 +163,7 @@ def unique(self, out.__init__([result[0]], self.shifts) return tuple([out] + [result[n] for n in range(1, len(result))]) - def equals(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: + def equals(self, target_charges: Iterable) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. `target_charges` has to be an array of the same lenghts @@ -183,7 +186,7 @@ def equals(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: ]) return self.charges == target - def __eq__(self, target: Union[int, "BaseCharge"]) -> np.ndarray: + def __eq__(self, target: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. `target` is a single integer encoding all symmetries of @@ -194,9 +197,9 @@ def __eq__(self, target: Union[int, "BaseCharge"]) -> np.ndarray: np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals `target` and `False` everywhere else. """ - if isinstance(target, (np.integer, int)): - return self.charges == target - return self.charges == target.charges + if isinstance(target, type(self)): + return self.charges == target.charges + return self.charges == np.asarray(target) def concatenate(self, others: Union["BaseCharge", List["BaseCharge"]]): """ @@ -230,6 +233,9 @@ def zero_charge(self): obj.__init__(charges=[np.asarray([self.dtype.type(0)])], shifts=self.shifts) return obj + def __iter__(self): + return iter(self.charges) + class U1Charge(BaseCharge): """ @@ -455,7 +461,7 @@ def dual_charges(self): #Z2 charges are self-dual return self.charges - def equals(self, target_charges: Union[List, np.ndarray]) -> np.ndarray: + def equals(self, target_charges: Iterable) -> np.ndarray: if not np.all(np.isin(target_charges, np.asarray([0, 1]))): raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( np.unique(target_charges))) @@ -467,6 +473,20 @@ class ChargeCollection: """ + class Iterator: + + def __init__(self, data: np.ndarray): + self.n = 0 + self.data = data + + def __next__(self): + if self.n < self.data.shape[0]: + result = self.data[self.n, :] + self.n += 1 + return tuple(result) #this makes a copy! + else: + raise StopIteration + def __init__(self, charges: List[BaseCharge]) -> None: if not isinstance(charges, list): raise TypeError("only list allowed for argument `charges` " @@ -483,47 +503,58 @@ def __init__(self, charges: List[BaseCharge]) -> None: self.charges = charges def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: + if not hasattr(self, '_stacked_charges'): + self._stacked_charges = np.stack([c.charges for c in self.charges], + axis=1) + return self._stacked_charges[n, :] + + # def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: + # if not hasattr(self, '_stacked_charges'): + # self._stacked_charges = np.stack([c.charges for c in self.charges], + # axis=1) + + # array = self._stacked_charges[n, :] + # if len(array.shape) > 2: + # raise ValueError( + # 'array.shape = {} is larger than 2! this is a bug!'.format( + # len(array.shape))) + # if len(array.shape) == 2: + # if array.shape[1] == 1: + # array = np.squeeze(array, axis=1) + # if len(array.shape) == 0: + # array = np.asarray([array]) + + # charges = [] + # if np.prod(array.shape) == 0: + # for n in range(len(self.charges)): + # charge = self.charges[n].__new__(type(self.charges[n])) + # charge.__init__( + # charges=[np.empty(0, dtype=self.charges[n].dtype)], + # shifts=self.charges[n].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + + # if len(array.shape) == 1: + # array = np.expand_dims(array, 1) + + # for m in range(len(self.charges)): + # charge = self.charges[m].__new__(type(self.charges[m])) + # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + + def __iter__(self): if not hasattr(self, '_stacked_charges'): self._stacked_charges = np.stack([c.charges for c in self.charges], axis=1) - array = self._stacked_charges[n, :] - if len(array.shape) > 2: - raise ValueError( - 'array.shape = {} is larger than 2! this is a bug!'.format( - len(array.shape))) - if len(array.shape) == 2: - if array.shape[1] == 1: - array = np.squeeze(array, axis=1) - if len(array.shape) == 0: - array = np.asarray([array]) - - charges = [] - if np.prod(array.shape) == 0: - for n in range(len(self.charges)): - charge = self.charges[n].__new__(type(self.charges[n])) - charge.__init__( - charges=[np.empty(0, dtype=self.charges[n].dtype)], - shifts=self.charges[n].shifts) - charges.append(charge) - - obj = self.__new__(type(self)) - obj.__init__(charges=charges) - return obj - - if len(array.shape) == 1: - array = np.expand_dims(array, 1) - - for m in range(len(self.charges)): - charge = self.charges[m].__new__(type(self.charges[m])) - charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) - charges.append(charge) - - obj = self.__new__(type(self)) - obj.__init__(charges=charges) - return obj - - #return np.asarray([c.charges[n] for c in self.charges]) + return self.Iterator(self._stacked_charges) def __add__(self, other: "Charge") -> "Charge": """ @@ -632,20 +663,21 @@ def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: for n in range(len(target_charges)) ]) - def __eq__(self, target_charges: Union[np.ndarray, "ChargeCollection"]): + def __eq__(self, target_charges: Union[Iterable, "ChargeCollection"]): if isinstance(target_charges, type(self)): target_charges = np.stack([c.charges for c in target_charges.charges], axis=1) + if not hasattr(self, '_stacked_charges'): + self._stacked_charges = np.stack([c.charges for c in self.charges], + axis=1) + target_charges = np.asarray(target_charges) + if target_charges.ndim == 1: + target_charges = np.expand_dims(target_charges, 0) if target_charges.shape[1] != len(self.charges): raise ValueError( "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" .format(len(target_charges), len(self.charges))) - if not hasattr(self, '_stacked_charges'): - self._stacked_charges = np.stack([c.charges for c in self.charges], - axis=1) - if len(target_charges.shape) == 1: - target_charges = np.expand_dims(target_charges, 0) return np.logical_and.reduce( self._stacked_charges == target_charges, axis=1) From d729c5614f58154f30e9535cf5f3a18501410ba5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 10:50:45 -0500 Subject: [PATCH 124/212] ChargeCollection.__init__: charges are now always stacked, self.charges contain views to the stacked charges __init__ can be called with optional shifts and stacked_charges to initialize the BaseCharges object with it --- tensornetwork/block_tensor/charge.py | 179 +++++++++++++++++---------- 1 file changed, 116 insertions(+), 63 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index d95b6ae25..7728eb9cc 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -21,7 +21,7 @@ from tensornetwork.backends import backend_factory import copy import warnings -from typing import List, Union, Any, Optional, Tuple, Text, Iterable +from typing import List, Union, Any, Optional, Tuple, Text, Iterable, Type def _copy_charges(charges): @@ -73,6 +73,9 @@ def __init__(self, ], axis=0).astype(dtype) else: + if np.max(shifts) >= charges[0].dtype.itemsize * 8: + raise TypeError("shifts {} are incompatible with dtype {}".format( + shifts, charges[0].dtype)) self.shifts = np.asarray(shifts) self.charges = charges[0] @@ -163,6 +166,9 @@ def unique(self, out.__init__([result[0]], self.shifts) return tuple([out] + [result[n] for n in range(1, len(result))]) + def isin(self, targets: Union[int, Iterable]): + return np.isin(self.charges, targets) + def equals(self, target_charges: Iterable) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. @@ -186,7 +192,7 @@ def equals(self, target_charges: Iterable) -> np.ndarray: ]) return self.charges == target - def __eq__(self, target: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: + def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. `target` is a single integer encoding all symmetries of @@ -487,7 +493,10 @@ def __next__(self): else: raise StopIteration - def __init__(self, charges: List[BaseCharge]) -> None: + def __init__(self, + charges: List[BaseCharge], + shifts: Optional[List[np.ndarray]] = None, + stacked_charges: Optional[np.ndarray] = None) -> None: if not isinstance(charges, list): raise TypeError("only list allowed for argument `charges` " "in BaseCharge.__init__(charges)") @@ -496,64 +505,112 @@ def __init__(self, charges: List[BaseCharge]) -> None: "Got lengths = {}".format([len(c) for c in charges])) for n in range(len(charges)): if not isinstance(charges[n], BaseCharge): - raise TypeError( - "`ChargeCollection` can only be initialized with a list of `BaseCharge`. Found {} instead" - .format([type(charges[n]) for n in range(len(charges))])) + raise TypeError("`ChargeCollection` can only be initialized " + "with a list of `BaseCharge`. Found {} instead".format( + [type(charges[n]) for n in range(len(charges))])) + if (shifts is not None) and (stacked_charges is None): + raise ValueError( + "Found `shifts == None` and `stacked_charges != None`." + "`shifts` and `stacked_charges` can only be passed together.") + if (shifts is None) and (stacked_charges is not None): + raise ValueError( + "Found `shifts != None` and `stacked_charges == None`." + "`shifts` and `stacked_charges` can only be passed together.") + self.charges = [] + if stacked_charges is None: + + self._stacked_charges = np.stack([c.charges for c in charges], axis=1) + for n in range(len(charges)): + charge = charges[n].__new__(type(charges[n])) + charge.__init__(self._stacked_charges[:, n], shifts=charges[n].shifts) + self.charges.append(charge) + else: + if len(shifts) != stacked_charges.shape[1]: + raise ValueError("`len(shifts)` = {} is different from " + "`stacked_charges.shape[1]` = {}".format( + len(shifts), stacked_charges.shape[1])) + + if stacked_charges.shape[1] != len(charges): + raise ValueError("`len(charges) and shape[1] of `stacked_charges` " + "have to be the same.") + for n in range(len(charges)): + charge = charges[n].__new__(type(charges[n])) + charge.__init__(self._stacked_charges[:, n], shifts=shifts[n]) + self.charges.append(charge) + + @classmethod + def from_stacked_charges(cls, charge_types: Type, shifts: List[np.ndarray], + stacked_charges: np.ndarray): + if len(charge_types) != stacked_charges.shape[1]: + raise ValueError("`len(charge_types) and shape[1] of `stacked_charges` " + "have to be the same.") + if len(charge_types) != len(shifts): + raise ValueError( + "`len(charge_types) and `len(shifts)` have to be the same.") + charges = [] + for n in range(len(charge_types)): + charge = charge_types[n].__new__(charge_types[n]) + charge.__init__(charges=stacked_charges[:, n], shifts=shifts[n]) + charges.append(charge) + return cls(charges=charges, stacked_charges=stacked_charges) - self.charges = charges + @property + def num_charges(self) -> int: + """ + Return the number of different charges in `ChargeCollection` + """ + return self._stacked_charges.shape[1] - def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: - if not hasattr(self, '_stacked_charges'): - self._stacked_charges = np.stack([c.charges for c in self.charges], - axis=1) + def get_charges(self, n: Union[np.ndarray, int]) -> BaseCharge: + """ + Returns an np.ndarray `BaseCharges.charges[n]. + """ return self._stacked_charges[n, :] - # def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: - # if not hasattr(self, '_stacked_charges'): - # self._stacked_charges = np.stack([c.charges for c in self.charges], - # axis=1) - - # array = self._stacked_charges[n, :] - # if len(array.shape) > 2: - # raise ValueError( - # 'array.shape = {} is larger than 2! this is a bug!'.format( - # len(array.shape))) - # if len(array.shape) == 2: - # if array.shape[1] == 1: - # array = np.squeeze(array, axis=1) - # if len(array.shape) == 0: - # array = np.asarray([array]) - - # charges = [] - # if np.prod(array.shape) == 0: - # for n in range(len(self.charges)): - # charge = self.charges[n].__new__(type(self.charges[n])) - # charge.__init__( - # charges=[np.empty(0, dtype=self.charges[n].dtype)], - # shifts=self.charges[n].shifts) - # charges.append(charge) - - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj - - # if len(array.shape) == 1: - # array = np.expand_dims(array, 1) - - # for m in range(len(self.charges)): - # charge = self.charges[m].__new__(type(self.charges[m])) - # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) - # charges.append(charge) + def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj + if isinstance(n, np.integer, int): + n = np.asarray([n]) - def __iter__(self): - if not hasattr(self, '_stacked_charges'): - self._stacked_charges = np.stack([c.charges for c in self.charges], - axis=1) + array = self._stacked_charges[n, :] + + return self.from_stacked_charges( + charge_types=[type(c) for c in self.charges]) + # if self.num_charges == 1: + # array = np.expand_dims(array, 0) + + # if len(array.shape) == 2: + # if array.shape[1] == 1: + # array = np.squeeze(array, axis=1) + # if len(array.shape) == 0: + # array = np.asarray([array]) + + # charges = [] + # if np.prod(array.shape) == 0: + # for n in range(len(self.charges)): + # charge = self.charges[n].__new__(type(self.charges[n])) + # charge.__init__( + # charges=[np.empty(0, dtype=self.charges[n].dtype)], + # shifts=self.charges[n].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + # if len(array.shape) == 1: + # array = np.expand_dims(array, 1) + + # for m in range(len(self.charges)): + # charge = self.charges[m].__new__(type(self.charges[m])) + # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + + def __iter__(self): return self.Iterator(self._stacked_charges) def __add__(self, other: "Charge") -> "Charge": @@ -663,14 +720,10 @@ def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: for n in range(len(target_charges)) ]) - def __eq__(self, target_charges: Union[Iterable, "ChargeCollection"]): + def __eq__(self, target_charges: Iterable): if isinstance(target_charges, type(self)): target_charges = np.stack([c.charges for c in target_charges.charges], axis=1) - if not hasattr(self, '_stacked_charges'): - self._stacked_charges = np.stack([c.charges for c in self.charges], - axis=1) - target_charges = np.asarray(target_charges) if target_charges.ndim == 1: target_charges = np.expand_dims(target_charges, 0) @@ -678,7 +731,7 @@ def __eq__(self, target_charges: Union[Iterable, "ChargeCollection"]): raise ValueError( "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" .format(len(target_charges), len(self.charges))) - + print(self._stacked_charges.shape) return np.logical_and.reduce( self._stacked_charges == target_charges, axis=1) @@ -712,9 +765,9 @@ def zero_charge(self): return obj -def fuse_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]] + ) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", From 9987165716851b4f51ddf9b9e967352e127d43f0 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 12:12:49 -0500 Subject: [PATCH 125/212] lunch commit --- .../block_tensor/block_tensor_new.py | 503 +++--------------- tensornetwork/block_tensor/charge.py | 192 ++++--- tensornetwork/block_tensor/charge_test.py | 13 +- 3 files changed, 216 insertions(+), 492 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 9f53b9831..70a56cdf6 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -28,6 +28,51 @@ Tensor = Any +def unfuse(fused_indices: np.ndarray, len_left: int, + len_right: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Given an np.ndarray `fused_indices` of integers denoting + index-positions of elements within a 1d array, `unfuse` + obtains the index-positions of the elements in the left and + right np.ndarrays `left`, `right` which, upon fusion, + are placed at the index-positions given by + `fused_indices` in the fused np.ndarray. + An example will help to illuminate this: + Given np.ndarrays `left`, `right` and the result + of their fusion (`fused`): + + ``` + left = [0,1,0,2] + right = [-1,3,-2] + fused = fuse_charges([left, right], flows=[1,1]) + print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] + ``` + + we want to find which elements in `left` and `right` + fuse to a value of 0. In the above case, there are two + 0 in `fused`: one is obtained from fusing `left[1]` and + `right[0]`, the second one from fusing `left[3]` and `right[2]` + `unfuse` returns the index-positions of these values within + `left` and `right`, that is + + ``` + left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) + print(left_index_values) # [1,3] + print(right_index_values) # [0,2] + ``` + + Args: + fused_indices: A 1d np.ndarray of integers. + len_left: The length of the left np.ndarray. + len_right: The length of the right np.ndarray. + Returns: + (np.ndarry, np.ndarray) + """ + right = np.mod(fused_indices, len_right) + left = np.floor_divide(fused_indices - right, len_right) + return left, right + + def _check_flows(flows: List[int]) -> None: if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): raise ValueError( @@ -77,8 +122,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - flows[0] * charges[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (flows[0] * + charges[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -192,8 +238,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, column_charges, column_flows) #convenience container for storing the degeneracies of each #column charge - #column_degeneracies = dict(zip(unique_column_charges, column_dims)) - + column_degeneracies = dict(zip(unique_column_charges, column_dims)) if len(row_charges) > 1: left_row_charges, right_row_charges, _ = _find_best_partition( row_charges, row_flows) @@ -204,25 +249,28 @@ def find_diagonal_sparse_blocks(data: np.ndarray, #get the charges common to rows and columns (only those matter) concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[counts == 2] - return common_charges + common_charges = tmp_unique[counts == 2] #common_charges is an np.ndarray! + row_locations = find_sparse_positions( left_charges=left_row_charges, left_flow=1, right_charges=right_row_charges, right_flow=1, target_charges=common_charges) + return elif len(row_charges) == 1: fused_row_charges = fuse_charges(row_charges, row_flows) #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) + unique_row_charges, row_dims = fused_row_charges.unique(return_counts=True) #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - relevant_fused_row_charges = fused_row_charges[np.isin( - fused_row_charges, common_charges)] + #get the charges common to rows and columns (only those matter) + concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) + tmp_unique, counts = concatenated.unique(return_counts=True) + common_charges = tmp_unique[counts == 2] #common_charges is an np.ndarray! + + relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( + common_charges)] row_locations = {} for c in common_charges: row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] @@ -267,403 +315,6 @@ def find_diagonal_sparse_blocks(data: np.ndarray, return blocks -def find_diagonal_sparse_blocks_depreacated_1( - data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. - - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. Note that `column_charges` - are never explicitly fused (`row_charges` are). - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the sparse locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - flows = row_flows.copy() - flows.extend(column_flows) - _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) - - #since we are using row-major we have to fuse the row charges anyway. - fused_row_charges = fuse_charges(row_charges, row_flows) - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) - - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - column_charges, column_flows) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(fused_row_charges, common_charges) - relevant_row_charges = fused_row_charges[mask] - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[masks[c]], 1) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_deprecated_0( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated: this version is about 2 times slower (worst case) than the current used - implementation - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column - - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(row_charges, common_charges) - relevant_row_charges = row_charges[mask] - - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_column_major( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict, assuming column-major - ordering. - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column - - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(column_charges, -common_charges) - relevant_column_charges = column_charges[mask] - - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_column_charges) which, - #for each charge `c` in `relevant_column_charges` holds the - #row-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_column_charges == -c - masks[c] = mask - degeneracy_vector[mask] = row_degeneracies[c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each column - # within the data vector. - # E.g. for `relevant_column_charges` = [0,1,0,0,3], and - # row_degeneracies[0] = 10 - # row_degeneracies[1] = 20 - # row_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in column-major order) in - # each column with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - row_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) - b = np.expand_dims(np.arange(row_degeneracies[c]), 1) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_dense_positions_deprecated(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the all different blocks - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - together with their corresponding index-values of the data in the dense array. - `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` - to an array of integers. - For the above example, we get: - * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` - was obtained from fusing -2 and 2. - * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, - `fused_charges[5,13,17]` were obtained from fusing 0 and 0. - * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` - was obtained from fusing 1 and -1. - Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. - Returns: - dict: Mapping tuples of integers to np.ndarray of integers. - """ - _check_flows([left_flow, right_flow]) - unique_left = np.unique(left_charges) - unique_right = np.unique(right_charges) - fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) - left_inds, right_inds = unfuse( - np.nonzero(fused == target_charge)[0], len(unique_left), - len(unique_right)) - left_c = unique_left[left_inds] - right_c = unique_right[right_inds] - len_right_charges = len(right_charges) - linear_positions = {} - for left_charge, right_charge in zip(left_c, right_c): - left_positions = np.nonzero(left_charges == left_charge)[0] - left_offsets = np.expand_dims(left_positions * len_right_charges, 1) - right_offsets = np.expand_dims( - np.nonzero(right_charges == right_charge)[0], 0) - linear_positions[(left_charge, right_charge)] = np.reshape( - left_offsets + right_offsets, - left_offsets.shape[0] * right_offsets.shape[1]) - return np.sort(np.concatenate(list(linear_positions.values()))) - - def find_dense_positions(left_charges: np.ndarray, left_flow: int, right_charges: np.ndarray, right_flow: int, target_charge: int) -> Dict: @@ -718,8 +369,9 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + right_locations[ - (target_charge - left_flow * c) * right_flow]) + indices.append(n * len_right_charges + + right_locations[(target_charge - left_flow * c) * + right_flow]) return np.concatenate(indices) @@ -778,7 +430,7 @@ def find_sparse_positions( len(unique_right)) left_inds.append(li) right_inds.append(ri) - + return #now compute the relevant unique left and right charges unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] @@ -807,8 +459,9 @@ def find_sparse_positions( for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == - (target_charge - left_flow * left_charge) * right_flow)[0] + tmp_relevant_right_charges == (target_charge - + left_flow * left_charge) * + right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1195,9 +848,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix( + (np.arange(len(self.data)), + (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1222,8 +875,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_intersect1d(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -1314,8 +967,8 @@ def transpose_intersect1d( # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_searchsorted(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Deprecated: @@ -1495,8 +1148,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True + ) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1531,8 +1184,8 @@ def get_diagonal_blocks_deprecated_1( column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True + ) -> Dict: """ Deprecated diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 7728eb9cc..87a82478a 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -41,43 +41,48 @@ class BaseCharge: """ def __init__(self, - charges: Union[List[np.ndarray], np.ndarray], + charges: Optional[Union[List[np.ndarray], np.ndarray]] = None, shifts: Optional[Union[List[int], np.ndarray]] = None) -> None: - if isinstance(charges, np.ndarray): - charges = [charges] - self._itemsizes = [c.dtype.itemsize for c in charges] - if np.sum(self._itemsizes) > 8: - raise TypeError("number of bits required to store all charges " - "in a single int is larger than 64") - - if len(charges) > 1: - if shifts is not None: - raise ValueError("If `shifts` is passed, only a single charge array " - "can be passed. Got len(charges) = {}".format( - len(charges))) - if shifts is None: - dtype = np.int8 - if np.sum(self._itemsizes) > 1: - dtype = np.int16 - if np.sum(self._itemsizes) > 2: - dtype = np.int32 - if np.sum(self._itemsizes) > 4: - dtype = np.int64 - #multiply by eight to get number of bits - self.shifts = 8 * np.flip( - np.append(0, np.cumsum(np.flip(self._itemsizes[1::])))).astype(dtype) - dtype_charges = [c.astype(dtype) for c in charges] - self.charges = np.sum([ - np.left_shift(dtype_charges[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) + if charges is not None: + if isinstance(charges, np.ndarray): + charges = [charges] + self._itemsizes = [c.dtype.itemsize for c in charges] + if np.sum(self._itemsizes) > 8: + raise TypeError("number of bits required to store all charges " + "in a single int is larger than 64") + + if len(charges) > 1: + if shifts is not None: + raise ValueError("If `shifts` is passed, only a single charge array " + "can be passed. Got len(charges) = {}".format( + len(charges))) + if shifts is None: + dtype = np.int8 + if np.sum(self._itemsizes) > 1: + dtype = np.int16 + if np.sum(self._itemsizes) > 2: + dtype = np.int32 + if np.sum(self._itemsizes) > 4: + dtype = np.int64 + #multiply by eight to get number of bits + self.shifts = 8 * np.flip( + np.append(0, np.cumsum(np.flip( + self._itemsizes[1::])))).astype(dtype) + dtype_charges = [c.astype(dtype) for c in charges] + self.charges = np.sum([ + np.left_shift(dtype_charges[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + else: + if np.max(shifts) >= charges[0].dtype.itemsize * 8: + raise TypeError("shifts {} are incompatible with dtype {}".format( + shifts, charges[0].dtype)) + self.shifts = np.asarray(shifts) + self.charges = charges[0] else: - if np.max(shifts) >= charges[0].dtype.itemsize * 8: - raise TypeError("shifts {} are incompatible with dtype {}".format( - shifts, charges[0].dtype)) - self.shifts = np.asarray(shifts) - self.charges = charges[0] + self.charges = np.asarray([]) + self.shifts = np.asarray([]) def __add__(self, other: "BaseCharge") -> "BaseCharge": raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") @@ -89,16 +94,17 @@ def __matmul__(self, other: "BaseCharge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") - def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": + def get_charges(self, n: Union[np.ndarray, int]) -> "BaseCharge": return self.charges[n] - # def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": - # charges = self.charges[n] - # if isinstance(n, (np.integer, int)): - # charges = np.asarray([charges]) - # obj = self.__new__(type(self)) - # obj.__init__(charges=[charges], shifts=self.shifts) - # return obj + def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": + + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + charges = self.charges[n] + obj = self.__new__(type(self)) + obj.__init__(charges=[charges], shifts=self.shifts) + return obj @property def num_symmetries(self): @@ -500,14 +506,6 @@ def __init__(self, if not isinstance(charges, list): raise TypeError("only list allowed for argument `charges` " "in BaseCharge.__init__(charges)") - if not np.all([len(c) == len(charges[0]) for c in charges]): - raise ValueError("not all charges have the same length. " - "Got lengths = {}".format([len(c) for c in charges])) - for n in range(len(charges)): - if not isinstance(charges[n], BaseCharge): - raise TypeError("`ChargeCollection` can only be initialized " - "with a list of `BaseCharge`. Found {} instead".format( - [type(charges[n]) for n in range(len(charges))])) if (shifts is not None) and (stacked_charges is None): raise ValueError( "Found `shifts == None` and `stacked_charges != None`." @@ -518,6 +516,15 @@ def __init__(self, "`shifts` and `stacked_charges` can only be passed together.") self.charges = [] if stacked_charges is None: + if not np.all([len(c) == len(charges[0]) for c in charges]): + raise ValueError("not all charges have the same length. " + "Got lengths = {}".format([len(c) for c in charges])) + for n in range(len(charges)): + if not isinstance(charges[n], BaseCharge): + raise TypeError( + "`ChargeCollection` can only be initialized " + "with a list of `BaseCharge`. Found {} instead".format( + [type(charges[n]) for n in range(len(charges))])) self._stacked_charges = np.stack([c.charges for c in charges], axis=1) for n in range(len(charges)): @@ -535,24 +542,24 @@ def __init__(self, "have to be the same.") for n in range(len(charges)): charge = charges[n].__new__(type(charges[n])) - charge.__init__(self._stacked_charges[:, n], shifts=shifts[n]) + charge.__init__(stacked_charges[:, n], shifts=shifts[n]) self.charges.append(charge) + self._stacked_charges = stacked_charges @classmethod - def from_stacked_charges(cls, charge_types: Type, shifts: List[np.ndarray], - stacked_charges: np.ndarray): + def from_charge_types(cls, charge_types: Type, shifts: List[np.ndarray], + stacked_charges: np.ndarray): if len(charge_types) != stacked_charges.shape[1]: raise ValueError("`len(charge_types) and shape[1] of `stacked_charges` " "have to be the same.") if len(charge_types) != len(shifts): raise ValueError( "`len(charge_types) and `len(shifts)` have to be the same.") - charges = [] - for n in range(len(charge_types)): - charge = charge_types[n].__new__(charge_types[n]) - charge.__init__(charges=stacked_charges[:, n], shifts=shifts[n]) - charges.append(charge) - return cls(charges=charges, stacked_charges=stacked_charges) + charges = [ + charge_types[n].__new__(charge_types[n]) + for n in range(len(charge_types)) + ] + return cls(charges=charges, stacked_charges=stacked_charges, shifts=shifts) @property def num_charges(self) -> int: @@ -565,17 +572,22 @@ def get_charges(self, n: Union[np.ndarray, int]) -> BaseCharge: """ Returns an np.ndarray `BaseCharges.charges[n]. """ + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + return self._stacked_charges[n, :] - def __getitem__(self, n: Union[np.ndarray, int]) -> BaseCharge: + def __getitem__(self, n: Union[np.ndarray, int]) -> "ChargeCollection": - if isinstance(n, np.integer, int): + if isinstance(n, (np.integer, int)): n = np.asarray([n]) array = self._stacked_charges[n, :] - return self.from_stacked_charges( - charge_types=[type(c) for c in self.charges]) + return self.from_charge_types( + charge_types=[type(c) for c in self.charges], + shifts=[c.shifts for c in self.charges], + stacked_charges=array) # if self.num_charges == 1: # array = np.expand_dims(array, 0) @@ -661,6 +673,12 @@ def __rmul__(self, number: Union[bool, int]) -> "Charge": return self.__mul__(number) + def isin(self, targets: Iterable): + return np.logical_and.reduce([ + np.isin(self._stacked_charges[:, n], targets[n]) + for n in range(len(targets)) + ]) + def unique( self, return_index=False, @@ -731,7 +749,6 @@ def __eq__(self, target_charges: Iterable): raise ValueError( "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" .format(len(target_charges), len(self.charges))) - print(self._stacked_charges.shape) return np.logical_and.reduce( self._stacked_charges == target_charges, axis=1) @@ -807,3 +824,48 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray], """ return np.reshape(degen1[:, None] * degen2[None, :], len(degen1) * len(degen2)) + + +def unfuse(fused_indices: np.ndarray, len_left: int, + len_right: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Given an np.ndarray `fused_indices` of integers denoting + index-positions of elements within a 1d array, `unfuse` + obtains the index-positions of the elements in the left and + right np.ndarrays `left`, `right` which, upon fusion, + are placed at the index-positions given by + `fused_indices` in the fused np.ndarray. + An example will help to illuminate this: + Given np.ndarrays `left`, `right` and the result + of their fusion (`fused`): + + ``` + left = [0,1,0,2] + right = [-1,3,-2] + fused = fuse_charges([left, right], flows=[1,1]) + print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] + ``` + + we want to find which elements in `left` and `right` + fuse to a value of 0. In the above case, there are two + 0 in `fused`: one is obtained from fusing `left[1]` and + `right[0]`, the second one from fusing `left[3]` and `right[2]` + `unfuse` returns the index-positions of these values within + `left` and `right`, that is + + ``` + left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) + print(left_index_values) # [1,3] + print(right_index_values) # [0,2] + ``` + + Args: + fused_indices: A 1d np.ndarray of integers. + len_left: The length of the left np.ndarray. + len_right: The length of the right np.ndarray. + Returns: + (np.ndarry, np.ndarray) + """ + right = np.mod(fused_indices, len_right) + left = np.floor_divide(fused_indices - right, len_right) + return left, right diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index df0266db8..d2590a45f 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -68,11 +68,13 @@ def test_BaseCharge_raises(): for _ in range(2) ]) with pytest.raises(ValueError): - q1 = U1Charge([ + q1 = BaseCharge([ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) for _ in range(2) ], - shifts=[16, 0]) + shifts=[16, 0]) + with pytest.raises(TypeError): + BaseCharge(np.random.randint(0, 4, 10).astype(np.int16), shifts=[16, 0]) def test_U1Charge_fusion(): @@ -394,6 +396,13 @@ def test_Z2Charge_matmul(): assert np.all(Q.shifts == Q_.shifts) +def test_ChargeCollection_init_from_stacked(): + c = ChargeCollection( + [BaseCharge(None, None), BaseCharge(None, None)], + shifts=[[0], [0]], + stacked_charges=np.random.randint(0, 10, (10, 2))) + + def test_Charge_U1_add(): q1 = ChargeCollection( [U1Charge([np.asarray([0, 1])]), From 1c8455f79cc12bff87fadf069ecf49ab6d5e0a62 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 12:40:29 -0500 Subject: [PATCH 126/212] back from lunch --- .../block_tensor/block_tensor_new.py | 50 +++++++++--------- tensornetwork/block_tensor/charge.py | 51 ++----------------- 2 files changed, 26 insertions(+), 75 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 70a56cdf6..3502435f4 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -122,9 +122,8 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (flows[0] * - charges[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + flows[0] * charges[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -257,7 +256,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, right_charges=right_row_charges, right_flow=1, target_charges=common_charges) - return + elif len(row_charges) == 1: fused_row_charges = fuse_charges(row_charges, row_flows) @@ -369,16 +368,15 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + - right_locations[(target_charge - left_flow * c) * - right_flow]) + indices.append(n * len_right_charges + right_locations[ + (target_charge - left_flow * c) * right_flow]) return np.concatenate(indices) def find_sparse_positions( - left_charges: List[Union[BaseCharge, ChargeCollection]], left_flow: int, - right_charges: List[Union[BaseCharge, ChargeCollection]], right_flow: int, - target_charges: List[Union[BaseCharge, ChargeCollection]]) -> Dict: + left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, + right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, + target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -430,14 +428,13 @@ def find_sparse_positions( len(unique_right)) left_inds.append(li) right_inds.append(ri) - return + #now compute the relevant unique left and right charges unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] #only keep those charges that are relevant - relevant_left_charges = left_charges[np.isin(left_charges, - unique_left_charges)] + relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] relevant_right_charges = right_charges[np.isin(right_charges, unique_right_charges)] @@ -459,9 +456,8 @@ def find_sparse_positions( for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == (target_charge - - left_flow * left_charge) * - right_flow)[0] + tmp_relevant_right_charges == + (target_charge - left_flow * left_charge) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -848,9 +844,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix( - (np.arange(len(self.data)), - (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -875,8 +871,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -967,8 +963,8 @@ def transpose_intersect1d(self, order: Union[List[int], np.ndarray] # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Deprecated: @@ -1148,8 +1144,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1184,8 +1180,8 @@ def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: """ Deprecated diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 87a82478a..c93504de2 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -782,9 +782,9 @@ def zero_charge(self): return obj -def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]] - ) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", @@ -824,48 +824,3 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray], """ return np.reshape(degen1[:, None] * degen2[None, :], len(degen1) * len(degen2)) - - -def unfuse(fused_indices: np.ndarray, len_left: int, - len_right: int) -> Tuple[np.ndarray, np.ndarray]: - """ - Given an np.ndarray `fused_indices` of integers denoting - index-positions of elements within a 1d array, `unfuse` - obtains the index-positions of the elements in the left and - right np.ndarrays `left`, `right` which, upon fusion, - are placed at the index-positions given by - `fused_indices` in the fused np.ndarray. - An example will help to illuminate this: - Given np.ndarrays `left`, `right` and the result - of their fusion (`fused`): - - ``` - left = [0,1,0,2] - right = [-1,3,-2] - fused = fuse_charges([left, right], flows=[1,1]) - print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] - ``` - - we want to find which elements in `left` and `right` - fuse to a value of 0. In the above case, there are two - 0 in `fused`: one is obtained from fusing `left[1]` and - `right[0]`, the second one from fusing `left[3]` and `right[2]` - `unfuse` returns the index-positions of these values within - `left` and `right`, that is - - ``` - left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) - print(left_index_values) # [1,3] - print(right_index_values) # [0,2] - ``` - - Args: - fused_indices: A 1d np.ndarray of integers. - len_left: The length of the left np.ndarray. - len_right: The length of the right np.ndarray. - Returns: - (np.ndarry, np.ndarray) - """ - right = np.mod(fused_indices, len_right) - left = np.floor_divide(fused_indices - right, len_right) - return left, right From e43974ce3d3ea7b69f05ce54e8ecf3f7818875cf Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 15:37:56 -0500 Subject: [PATCH 127/212] tests added --- .../block_tensor/block_tensor_new_test.py | 103 +++++++++++++++++- 1 file changed, 97 insertions(+), 6 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new_test.py b/tensornetwork/block_tensor/block_tensor_new_test.py index d6bdb6bac..13cdff13d 100644 --- a/tensornetwork/block_tensor/block_tensor_new_test.py +++ b/tensornetwork/block_tensor/block_tensor_new_test.py @@ -2,15 +2,16 @@ import pytest from tensornetwork.block_tensor.charge import U1Charge, ChargeCollection -from tensornetwork.block_tensor.block_tensor_new import find_diagonal_sparse_blocks, compute_num_nonzero +from tensornetwork.block_tensor.block_tensor_new import find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] -def test_consistency(): - B = 5 +def test_test_num_nonzero_consistency(): + B = 4 D = 100 rank = 4 + qs = [[ np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) for _ in range(2) @@ -19,14 +20,104 @@ def test_consistency(): charges1 = [U1Charge(qs[n]) for n in range(rank)] charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] charges3 = [ - ChargeCollection([U1Charge([q for q in qs[n]])]) for n in range(rank) + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) ] flows = [1, 1, 1, -1] n1 = compute_num_nonzero(charges1, flows) - n2 = compute_num_nonzero(charges2, flows) + n2 = compute_num_nonzero(charges3, flows) n3 = compute_num_nonzero(charges3, flows) assert n1 == n2 - assert n1 == n3 + + +def test_find_sparse_positions_consistency(): + B = 4 + D = 100 + rank = 4 + + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) + ] + + data1 = find_sparse_positions( + left_charges=charges1[0] + charges1[1], + left_flow=1, + right_charges=charges1[2] + charges1[3], + right_flow=1, + target_charges=charges1[0].zero_charge) + data2 = find_sparse_positions( + left_charges=charges2[0] + charges2[1], + left_flow=1, + right_charges=charges2[2] + charges2[3], + right_flow=1, + target_charges=charges2[0].zero_charge) + data3 = find_sparse_positions( + left_charges=charges3[0] + charges3[1], + left_flow=1, + right_charges=charges3[2] + charges3[3], + right_flow=1, + target_charges=charges3[0].zero_charge) + + nz1 = np.asarray(list(data1.values())[0]) + nz2 = np.asarray(list(data2.values())[0]) + nz3 = np.asarray(list(data3.values())[0]) + assert np.all(nz1 == nz2) + assert np.all(nz1 == nz3) + + +def test_find_diagonal_sparse_blocks_consistency(): + B = 4 + D = 100 + rank = 4 + + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) + ] + + data1 = find_diagonal_sparse_blocks( + data=[], + row_charges=[charges1[0], charges1[1]], + column_charges=[charges1[2], charges1[3]], + row_flows=[1, 1], + column_flows=[1, -1]) + + data2 = find_diagonal_sparse_blocks( + data=[], + row_charges=[charges2[0], charges2[1]], + column_charges=[charges2[2], charges2[3]], + row_flows=[1, 1], + column_flows=[1, -1]) + data3 = find_diagonal_sparse_blocks( + data=[], + row_charges=[charges3[0], charges3[1]], + column_charges=[charges3[2], charges3[3]], + row_flows=[1, 1], + column_flows=[1, -1]) + keys1 = np.sort(np.asarray(list(data1.keys()))) + keys2 = np.squeeze(np.sort(np.asarray(list(data2.keys())))) + keys3 = np.sort([np.left_shift(c[0], 16) + c[1] for c in data3.keys()]) + + assert np.all(keys1 == keys2) + assert np.all(keys1 == keys3) # @pytest.mark.parametrize("dtype", np_dtypes) From 6e5bd04052b78bb1f4313c4e3ad6b4b5f0b4e513 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 15:38:34 -0500 Subject: [PATCH 128/212] ported find_sparse_positions and find_diagonal_sparse_blocks to new charge interface --- .../block_tensor/block_tensor_new.py | 92 +++++++++++-------- 1 file changed, 52 insertions(+), 40 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 3502435f4..431f3e5be 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -122,8 +122,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - flows[0] * charges[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (flows[0] * + charges[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -183,7 +184,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, column_charges: List[Union[List, np.ndarray]], row_flows: List[Union[bool, int]], column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: + return_data: Optional[bool] = False) -> Dict: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. @@ -237,7 +238,8 @@ def find_diagonal_sparse_blocks(data: np.ndarray, column_charges, column_flows) #convenience container for storing the degeneracies of each #column charge - column_degeneracies = dict(zip(unique_column_charges, column_dims)) + #column_degeneracies = dict(zip(unique_column_charges, column_dims)) + column_degeneracies = dict(zip((-1) * unique_column_charges, column_dims)) if len(row_charges) > 1: left_row_charges, right_row_charges, _ = _find_best_partition( row_charges, row_flows) @@ -248,7 +250,8 @@ def find_diagonal_sparse_blocks(data: np.ndarray, #get the charges common to rows and columns (only those matter) concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[counts == 2] #common_charges is an np.ndarray! + common_charges = tmp_unique[ + counts == 2] #common_charges is a BaseCharge or ChargeCollection row_locations = find_sparse_positions( left_charges=left_row_charges, @@ -266,23 +269,25 @@ def find_diagonal_sparse_blocks(data: np.ndarray, #get the charges common to rows and columns (only those matter) concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[counts == 2] #common_charges is an np.ndarray! + common_charges = tmp_unique[ + counts == 2] #common_charges is a BaseCharge or ChargeCollection relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( common_charges)] row_locations = {} for c in common_charges: + #c = common_charges.get_item(n) row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] else: raise ValueError('Found an empty sequence for `row_charges`') - #some numpy magic to get the index locations of the blocks + degeneracy_vector = np.empty( np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. masks = {} for c in common_charges: - degeneracy_vector[row_locations[c]] = column_degeneracies[-c] + degeneracy_vector[row_locations[c]] = column_degeneracies[c] # the result of the cumulative sum is a vector containing # the stop positions of the non-zero values of each row @@ -304,13 +309,13 @@ def find_diagonal_sparse_blocks(data: np.ndarray, for c in common_charges: #numpy broadcasting is substantially faster than kron! a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) - inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) + b = np.expand_dims(np.arange(column_degeneracies[c]), 0) + inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[c]) if not return_data: - blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] + blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[c])] else: blocks[c] = np.reshape(data[inds], - (len(row_locations[c]), column_degeneracies[-c])) + (len(row_locations[c]), column_degeneracies[c])) return blocks @@ -368,8 +373,9 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, indices = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + right_locations[ - (target_charge - left_flow * c) * right_flow]) + indices.append(n * len_right_charges + + right_locations[(target_charge - left_flow * c) * + right_flow]) return np.concatenate(indices) @@ -435,35 +441,41 @@ def find_sparse_positions( #only keep those charges that are relevant relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] - relevant_right_charges = right_charges[np.isin(right_charges, - unique_right_charges)] + relevant_right_charges = right_charges[right_charges.isin( + unique_right_charges)] - unique_right_charges, right_dims = np.unique( - relevant_right_charges, return_counts=True) + unique_right_charges, right_dims = relevant_right_charges.unique( + return_counts=True) right_degeneracies = dict(zip(unique_right_charges, right_dims)) #generate a degeneracy vector which for each value r in relevant_right_charges #holds the corresponding number of non-zero elements `relevant_right_charges` #that can add up to `target_charges`. degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) right_indices = {} - for left_charge in unique_left_charges: - total_degeneracy = np.sum(right_dims[np.isin( - left_flow * left_charge + right_flow * unique_right_charges, - target_charges)]) - tmp_relevant_right_charges = relevant_right_charges[np.isin( - relevant_right_charges, - (target_charges - left_flow * left_charge) * right_flow)] - for target_charge in target_charges: - right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == - (target_charge - left_flow * left_charge) * right_flow)[0] + for n in range(len(unique_left_charges)): + left_charge = unique_left_charges[n] + total_charge = left_flow * left_charge + right_flow * unique_right_charges + total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) + tmp_relevant_right_charges = relevant_right_charges[ + relevant_right_charges.isin( + (target_charges + (-1) * left_flow * left_charge) * right_flow)] + + for n in range(len(target_charges)): + target_charge = target_charges[n] + right_indices[( + left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( + tmp_relevant_right_charges == (target_charge + + (-1) * left_flow * left_charge) * + right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy stop_positions = np.cumsum(degeneracy_vector) start_positions = stop_positions - degeneracy_vector blocks = {t: [] for t in target_charges} + # iterator returns tuple of `int` for ChargeCollection objects + # and `int` for Ba seCharge objects (both hashable) for left_charge in unique_left_charges: a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) for target_charge in target_charges: @@ -844,9 +856,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix( + (np.arange(len(self.data)), + (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -871,8 +883,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_intersect1d(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -963,8 +975,8 @@ def transpose_intersect1d( # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + def transpose_searchsorted(self, order: Union[List[int], np.ndarray] + ) -> "BlockSparseTensor": """ Deprecated: @@ -1144,8 +1156,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True + ) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1180,8 +1192,8 @@ def get_diagonal_blocks_deprecated_1( column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: + def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True + ) -> Dict: """ Deprecated From ab5c3869d3ae5d277173fbee8c6c7a919991cd1c Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 16:48:03 -0500 Subject: [PATCH 129/212] broken commit --- tensornetwork/block_tensor/block_tensor.py | 4 +- .../block_tensor/block_tensor_new.py | 53 +++++++++++-------- tensornetwork/block_tensor/charge.py | 48 ++++++++++++----- 3 files changed, 69 insertions(+), 36 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 454d03a6d..d363d2662 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -779,7 +779,6 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, common_charges = np.intersect1d( unique_left, (target_charge - right_flow * unique_right) * left_flow, assume_unique=True) - right_locations = {} for c in common_charges: right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( @@ -787,11 +786,14 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, len_right_charges = len(right_charges) indices = [] + data = [] for n in range(len(left_charges)): c = left_charges[n] indices.append(n * len_right_charges + right_locations[(target_charge - left_flow * c) * right_flow]) + data.append([c, (target_charge - left_flow * c) * right_flow]) + return indices return np.concatenate(indices) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 431f3e5be..edf4902e7 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -319,9 +319,10 @@ def find_diagonal_sparse_blocks(data: np.ndarray, return blocks -def find_dense_positions(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: +def find_dense_positions( + left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, + right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, + target_charge: Union[BaseCharge, ChargeCollection]) -> Dict: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector `fused_charges` (resulting from fusing np.ndarrays @@ -356,27 +357,34 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, dict: Mapping tuples of integers to np.ndarray of integers. """ _check_flows([left_flow, right_flow]) - unique_left, left_degeneracies = np.unique(left_charges, return_counts=True) - unique_right, right_degeneracies = np.unique( - right_charges, return_counts=True) - - common_charges = np.intersect1d( - unique_left, (target_charge - right_flow * unique_right) * left_flow, - assume_unique=True) - + unique_left, left_degeneracies = left_charges.unique(return_counts=True) + unique_right, right_degeneracies = right_charges.unique(return_counts=True) + + tmp_charges = (target_charge - right_flow * unique_right) * left_flow + concatenated = unique_left.concatenate(tmp_charges) + tmp_unique, counts = concatenated.unique(return_counts=True) + common_charges = tmp_unique[ + counts == 2] #common_charges is a BaseCharge or ChargeCollection right_locations = {} - for c in common_charges: - right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( - right_charges == (target_charge - left_flow * c) * right_flow)[0] + + for n in range(len(common_charges)): + c = common_charges[n] + right_charge = (target_charge - left_flow * c) * right_flow + right_locations[c.get_item(0)] = np.nonzero( + right_charges == right_charge)[0] len_right_charges = len(right_charges) indices = [] + data = [] for n in range(len(left_charges)): c = left_charges[n] + right_charge = (target_charge - left_flow * c) * right_flow + data.append([c.get_item(0), right_charge.get_item(0)]) indices.append(n * len_right_charges + - right_locations[(target_charge - left_flow * c) * - right_flow]) - return np.concatenate(indices) + right_locations[right_charge.get_item(0)]) + + return indices + #return np.concatenate(indices) def find_sparse_positions( @@ -459,15 +467,14 @@ def find_sparse_positions( total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) tmp_relevant_right_charges = relevant_right_charges[ relevant_right_charges.isin( - (target_charges + (-1) * left_flow * left_charge) * right_flow)] + (target_charges + ((-1) * left_flow) * left_charge) * right_flow)] for n in range(len(target_charges)): target_charge = target_charges[n] - right_indices[( - left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + - (-1) * left_flow * left_charge) * - right_flow)[0] + right_indices[(left_charge.get_item(0), + target_charge.get_item(0))] = np.nonzero( + tmp_relevant_right_charges == (target_charge + ( + (-1) * left_flow) * left_charge) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index c93504de2..44b1bd731 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -94,9 +94,12 @@ def __matmul__(self, other: "BaseCharge") -> "Charge": raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") - def get_charges(self, n: Union[np.ndarray, int]) -> "BaseCharge": + def get_item(self, n: int) -> np.ndarray: return self.charges[n] + def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: + return self.get_item(n) + def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": if isinstance(n, (np.integer, int)): @@ -172,7 +175,16 @@ def unique(self, out.__init__([result[0]], self.shifts) return tuple([out] + [result[n] for n in range(1, len(result))]) - def isin(self, targets: Union[int, Iterable]): + def isin(self, targets: Union[int, Iterable, "BaseCharge"]): + + if isinstance(targets, type(self)): + if not np.all(self.shifts == targets.shifts): + raise ValueError( + "Cannot compare charges with different shifts {} and {}".format( + self.shifts, targets.shifts)) + + targets = targets.charges + targets = np.asarray(targets) return np.isin(self.charges, targets) def equals(self, target_charges: Iterable) -> np.ndarray: @@ -564,17 +576,24 @@ def from_charge_types(cls, charge_types: Type, shifts: List[np.ndarray], @property def num_charges(self) -> int: """ - Return the number of different charges in `ChargeCollection` + Return the number of different charges in `ChargeCollection`. """ return self._stacked_charges.shape[1] - def get_charges(self, n: Union[np.ndarray, int]) -> BaseCharge: + def get_item(self, n: int) -> Tuple: """ - Returns an np.ndarray `BaseCharges.charges[n]. + Returns the `n-th` charge-tuple of ChargeCollection in a tuple. """ if isinstance(n, (np.integer, int)): n = np.asarray([n]) + return tuple(self._stacked_charges[n, :].flat) + def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Returns the `n-th` charge-tuples of ChargeCollection in an np.ndarray. + """ + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) return self._stacked_charges[n, :] def __getitem__(self, n: Union[np.ndarray, int]) -> "ChargeCollection": @@ -673,10 +692,15 @@ def __rmul__(self, number: Union[bool, int]) -> "Charge": return self.__mul__(number) - def isin(self, targets: Iterable): - return np.logical_and.reduce([ - np.isin(self._stacked_charges[:, n], targets[n]) - for n in range(len(targets)) + def isin(self, targets: Union[Iterable, "ChargeCollection"]): + if isinstance(targets, type(self)): + _targets = [t for t in targets] + return np.logical_or.reduce([ + np.logical_and.reduce([ + np.isin(self._stacked_charges[:, n], _targets[m][n]) + for n in range(len(_targets[m])) + ]) + for m in range(len(_targets)) ]) def unique( @@ -782,9 +806,9 @@ def zero_charge(self): return obj -def fuse_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]] + ) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", From 1c70fb5a858953b63e5a3d73dd8f2f4a192a7105 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 22:37:21 -0500 Subject: [PATCH 130/212] fixed bug in find_dense_positions --- tensornetwork/block_tensor/block_tensor.py | 58 +++++++++++----------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index d363d2662..c552a184a 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -464,11 +464,11 @@ def find_diagonal_sparse_blocks_depreacated_1( return blocks -def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True - ) -> Dict: +def find_diagonal_sparse_blocks_deprecated_0( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Deprecated: this version is about 2 times slower (worst case) than the current used implementation @@ -570,11 +570,11 @@ def find_diagonal_sparse_blocks_deprecated_0(data: np.ndarray, return blocks -def find_diagonal_sparse_blocks_column_major(data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True - ) -> Dict: +def find_diagonal_sparse_blocks_column_major( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: """ Deprecated @@ -781,19 +781,18 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, assume_unique=True) right_locations = {} for c in common_charges: + right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( right_charges == (target_charge - left_flow * c) * right_flow)[0] len_right_charges = len(right_charges) indices = [] - data = [] for n in range(len(left_charges)): c = left_charges[n] - indices.append(n * len_right_charges + - right_locations[(target_charge - left_flow * c) * - right_flow]) - data.append([c, (target_charge - left_flow * c) * right_flow]) - return indices + if c not in common_charges: + continue + indices.append(n * len_right_charges + right_locations[ + (target_charge - left_flow * c) * right_flow]) return np.concatenate(indices) @@ -879,9 +878,8 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, for target_charge in target_charges: right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == (target_charge - - left_flow * left_charge) * - right_flow)[0] + tmp_relevant_right_charges == + (target_charge - left_flow * left_charge) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1268,9 +1266,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix( - (np.arange(len(self.data)), - (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1295,8 +1293,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -1387,8 +1385,8 @@ def transpose_intersect1d(self, order: Union[List[int], np.ndarray] # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Deprecated: @@ -1568,8 +1566,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1604,8 +1602,8 @@ def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: """ Deprecated From f5ed165cdb9a6335e5db6afd2027f579eaf374b1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 6 Jan 2020 22:37:38 -0500 Subject: [PATCH 131/212] fix bug in find_dense_positions --- .../block_tensor/block_tensor_new.py | 39 +++++++++---------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index edf4902e7..31c51934a 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -122,9 +122,8 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (flows[0] * - charges[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + flows[0] * charges[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -366,25 +365,25 @@ def find_dense_positions( common_charges = tmp_unique[ counts == 2] #common_charges is a BaseCharge or ChargeCollection right_locations = {} - for n in range(len(common_charges)): c = common_charges[n] + right_charge = (target_charge - left_flow * c) * right_flow - right_locations[c.get_item(0)] = np.nonzero( + right_locations[right_charge.get_item(0)] = np.nonzero( right_charges == right_charge)[0] len_right_charges = len(right_charges) indices = [] - data = [] for n in range(len(left_charges)): c = left_charges[n] right_charge = (target_charge - left_flow * c) * right_flow - data.append([c.get_item(0), right_charge.get_item(0)]) + + if c not in common_charges: + continue indices.append(n * len_right_charges + right_locations[right_charge.get_item(0)]) - return indices - #return np.concatenate(indices) + return np.concatenate(indices) def find_sparse_positions( @@ -863,9 +862,9 @@ def transpose(self, linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=0) - self.dense_to_sparse_table = sp.sparse.csr_matrix( - (np.arange(len(self.data)), - (linear_positions, np.zeros(len(self.data), dtype=np.int64)))) + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -890,8 +889,8 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order` Args: pp @@ -982,8 +981,8 @@ def transpose_intersect1d(self, order: Union[List[int], np.ndarray] # tr_dense_linear_positions[tr_linear_positions]) # self.data = self.data[inds] - def transpose_searchsorted(self, order: Union[List[int], np.ndarray] - ) -> "BlockSparseTensor": + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Deprecated: @@ -1163,8 +1162,8 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1199,8 +1198,8 @@ def get_diagonal_blocks_deprecated_1(self, return_data: Optional[bool] = True column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0(self, return_data: Optional[bool] = True - ) -> Dict: + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: """ Deprecated From ea4a1ab2e525c5cb5df45cd0872ac85e3fe24ae5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 7 Jan 2020 12:39:13 -0500 Subject: [PATCH 132/212] docstring --- tensornetwork/block_tensor/charge.py | 149 +++++++++++++++++++++++++-- 1 file changed, 140 insertions(+), 9 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 44b1bd731..1faaea695 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -43,6 +43,19 @@ class BaseCharge: def __init__(self, charges: Optional[Union[List[np.ndarray], np.ndarray]] = None, shifts: Optional[Union[List[int], np.ndarray]] = None) -> None: + """ + Initialize a BaseCharge object. + Args: + charges: Optional `np.ndarray` or list of `np.ndarray` of type `int` holdingn + the physical charges. If a list of `np,ndarray` is passed, the arrays are merged + into a single `np.ndarray` by `np.left_shift`-ing and adding up charges. The amount + of left-shift per `np,ndarray` is determined by its `dtype`. E.g. an `np,ndarray` of + `dtype=np.int16` is shifted by 16 bits. Charges are shifted and added moving from + small to large indices in `charges`. `BaseCharge` can hold at most 8 individual + charges of `dtype=np.int8` on 64-bit architectures. + shifts: An optional list of shifts, used for initializing a `BaseCharge` object from + an existing `BaseCharge` object. + """ if charges is not None: if isinstance(charges, np.ndarray): charges = [charges] @@ -85,22 +98,72 @@ def __init__(self, self.shifts = np.asarray([]) def __add__(self, other: "BaseCharge") -> "BaseCharge": + """ + Fuse the charges of two `BaseCharge` objects and return a new + `BaseCharge` holding the result. + Args: + other: A `BaseChare` object. + Returns: + BaseCharge: The result of fusing `self` with `other`. + """ raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") def __sub__(self, other: "BaseCharge") -> "BaseCharge": + """ + Subtract the charges of `other` from `self. + Returns a `BaseCharge` holding the result. + Args: + other: A `BaseChare` object. + Returns: + BaseCharge: The result subtracting `other` from `self`. + """ + raise NotImplementedError("`__sub__` is not implemented for `BaseCharge`") - def __matmul__(self, other: "BaseCharge") -> "Charge": + def __matmul__(self, other: "BaseCharge") -> "BaseCharge": + """ + Build the direct product of two charges and return + it in a new `BaseCharge` object. + Args: + other: A `BaseCharge` object. + Returns: + BaseCharge: The direct product of `self` and `other`. + """ raise NotImplementedError( "`__matmul__` is not implemented for `BaseCharge`") - def get_item(self, n: int) -> np.ndarray: + def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Return the charge-element at position `n`. + Args: + n: An integer or `np.ndarray`. + Returns: + np.ndarray: The charges at `n`. + """ return self.charges[n] def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Return the charge-element at position `n`. + Needed to provide a common interface with `ChargeCollection`. + Args: + n: An integer or `np.ndarray`. + Returns: + np.ndarray: The charges at `n`. + + """ + return self.get_item(n) def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": + """ + Return the charge-element at position `n`, wrapped into a `BaseCharge` + object. + Args: + n: An integer or `np.ndarray`. + Returns: + BaseCharge: The charges at `n`. + """ if isinstance(n, (np.integer, int)): n = np.asarray([n]) @@ -111,6 +174,9 @@ def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": @property def num_symmetries(self): + """ + The number of individual symmetries stored in this object. + """n return len(self.shifts) def __len__(self) -> int: @@ -126,16 +192,41 @@ def dual_charges(self) -> np.ndarray: "`dual_charges` is not implemented for `BaseCharge`") def __mul__(self, number: Union[bool, int]) -> "BaseCharge": + """ + Multiply `self` with `number` from the left. + `number` can take values in `1,-1, 0, True, False`. + This multiplication is used to transform between charges and dual-charges. + Args: + number: Can can take values in `1,-1, 0, True, False`. + If `1,True`, return the original object + If `-1, 0, False` return a new `BaseCharge` holding the + dual-charges. + Returns: + BaseCharge: The result of `self * number` + """ raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") def __rmul__(self, number: Union[bool, int]) -> "BaseCharge": + """ + Multiply `self` with `number` from the right. + `number` can take values in `1,-1, 0, True, False`. + This multiplication is used to transform between charges and dual-charges. + Args: + number: Can can take values in `1,-1, 0, True, False`. + If `1,True`, return the original object + If `-1, 0, False` return a new `BaseCharge` holding the + dual-charges. + Returns: + BaseCharge: The result of `number * self`. + """ + raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") @property def dtype(self): return self.charges.dtype - def unique(self, + def unique(self,n return_index=False, return_inverse=False, return_counts=False @@ -175,18 +266,44 @@ def unique(self, out.__init__([result[0]], self.shifts) return tuple([out] + [result[n] for n in range(1, len(result))]) - def isin(self, targets: Union[int, Iterable, "BaseCharge"]): - + def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: + """ + Test each element of `BaseCharge` if it is in `targets`. Returns + an `np.ndarray` of `dtype=bool`. + Args: + targets: The test elements + Returns: + np.ndarray: An array of `bool` type holding the result of the comparison. + """ if isinstance(targets, type(self)): if not np.all(self.shifts == targets.shifts): raise ValueError( "Cannot compare charges with different shifts {} and {}".format( - self.shifts, targets.shifts)) + self.shifts, tpargets.shifts)) targets = targets.charges targets = np.asarray(targets) return np.isin(self.charges, targets) + def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: + """ + Test each element of `BaseCharge` if it is in `targets`. Returns + an `np.ndarray` of `dtype=bool`. + Args: + targets: The test elements + Returns: + np.ndarray: An array of `bool` type holding the result of the comparison. + """ + + if isinstance(target, type(self)): + if not np.all(self.shifts == target.shifts): + raise ValueError( + "Cannot compare charges with different shifts {} and {}".format( + self.shifts, tparget.shifts)) + target = target.charges + target = np.asarray(target) + return target in self.charges + def equals(self, target_charges: Iterable) -> np.ndarray: """ Find indices where `BaseCharge` equals `target_charges`. @@ -703,6 +820,20 @@ def isin(self, targets: Union[Iterable, "ChargeCollection"]): for m in range(len(_targets)) ]) + def __contains__(self, targets: Union[Iterable, "ChargeCollection"]): + if isinstance(targets, type(self)): + if len(targets) > 1: + raise ValueError( + '__contains__ expects a single input, found {} inputs'.format( + len(targets))) + + _targets = targets.get_item(0) + return np.any( + np.logical_and.reduce([ + np.isin(self._stacked_charges[:, n], _targets[n]) + for n in range(len(_targets)) + ])) + def unique( self, return_index=False, @@ -806,9 +937,9 @@ def zero_charge(self): return obj -def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]] - ) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", From 604be2649c1f40e5fa288d94b952dc17d20be7f1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 7 Jan 2020 13:07:19 -0500 Subject: [PATCH 133/212] fix bug in Index initialization --- tensornetwork/block_tensor/index.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index fa6af358e..378760e1c 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -145,7 +145,7 @@ class Index: def __init__(self, charges: Union[List, np.ndarray], flow: int, - name: Optional[Text] = None, + name: Optional[Text] = "index", left_child: Optional["Index"] = None, right_child: Optional["Index"] = None): self._charges = np.asarray(charges) From ec66d50272031d1bf9c1ac8778ffbe443e0bfa00 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 7 Jan 2020 13:07:31 -0500 Subject: [PATCH 134/212] fix bug in Index initialization --- tensornetwork/block_tensor/index_new.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/tensornetwork/block_tensor/index_new.py b/tensornetwork/block_tensor/index_new.py index 66e97b283..18271aeff 100644 --- a/tensornetwork/block_tensor/index_new.py +++ b/tensornetwork/block_tensor/index_new.py @@ -31,15 +31,15 @@ class Index: def __init__(self, charges: Union[ChargeCollection, BaseCharge], flow: int, - name: Optional[Text] = None, + name: Optional[Text] = "index", left_child: Optional["Index"] = None, right_child: Optional["Index"] = None): if isinstance(charges, BaseCharge): - self._charges = ChargeCollection([charges]) + self._charges = charges #ChargeCollection([charges]) elif isinstance(charges, ChargeCollection) or (charges is None): self._charges = charges else: - raise TypeError("Unknown type {}".format(type(chargesp))) + raise TypeError("Unknown type {}".format(type(charges))) self.flow = flow self.left_child = left_child self.right_child = right_child @@ -60,19 +60,20 @@ def _copy_helper(self, index: "Index", copied_index: "Index") -> None: """ Helper function for copy """ + print(index.left_child, index.right_child) if index.left_child != None: left_copy = Index( - charges=copy.copy(index.left_child.charges), - flow=copy.copy(index.left_child.flow), - name=copy.copy(index.left_child.name)) + charges=copy.deepcopy(index.left_child.charges), + flow=copy.deepcopy(index.left_child.flow), + name=copy.deepcopy(index.left_child.name)) copied_index.left_child = left_copy self._copy_helper(index.left_child, left_copy) if index.right_child != None: right_copy = Index( - charges=copy.copy(index.right_child.charges), - flow=copy.copy(index.right_child.flow), - name=copy.copy(index.right_child.name)) + charges=copy.deepcopy(index.right_child.charges), + flow=copy.deepcopy(index.right_child.flow), + name=copy.deepcopy(index.right_child.name)) copied_index.right_child = right_copy self._copy_helper(index.right_child, right_copy) @@ -83,8 +84,8 @@ def copy(self): `Index` are copied as well. """ index_copy = Index( - charges=copy.copy(self._charges), - flow=copy.copy(self.flow), + charges=copy.deepcopy(self._charges), + flow=copy.deepcopy(self.flow), name=self.name) self._copy_helper(self, index_copy) From 4da8335d72d6680733f52dbf084c5201781231a5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 7 Jan 2020 16:29:38 -0500 Subject: [PATCH 135/212] typo --- .../backends/tensorflow/tensordot2.py | 26 +++++++------------ 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/tensornetwork/backends/tensorflow/tensordot2.py b/tensornetwork/backends/tensorflow/tensordot2.py index 00de66c49..94e980f4e 100644 --- a/tensornetwork/backends/tensorflow/tensordot2.py +++ b/tensornetwork/backends/tensorflow/tensordot2.py @@ -19,11 +19,7 @@ Tensor = Any -def tensordot(tf, - a, - b, - axes, - name: Optional[Text] = None) -> Tensor: +def tensordot(tf, a, b, axes, name: Optional[Text] = None) -> Tensor: r"""Tensor contraction of a and b along specified axes. Tensordot (also known as tensor contraction) sums the product of elements from `a` and `b` over the indices specified by `a_axes` and `b_axes`. @@ -80,7 +76,7 @@ def _tensordot_should_flip(contraction_axes: List[int], return bool(np.mean(contraction_axes) < np.mean(free_axes)) return False - def _tranpose_if_necessary(tensor: Tensor, perm: List[int]) -> Tensor: + def _transpose_if_necessary(tensor: Tensor, perm: List[int]) -> Tensor: """Like transpose(), but avoids creating a new tensor if possible. Although the graph optimizer should kill trivial transposes, it is best not to add them in the first place! @@ -89,8 +85,7 @@ def _tranpose_if_necessary(tensor: Tensor, perm: List[int]) -> Tensor: return tensor return tf.transpose(tensor, perm) - def _reshape_if_necessary(tensor: Tensor, - new_shape: List[int]) -> Tensor: + def _reshape_if_necessary(tensor: Tensor, new_shape: List[int]) -> Tensor: """Like reshape(), but avoids creating a new tensor if possible. Assumes shapes are both fully specified.""" cur_shape = tensor.get_shape().as_list() @@ -135,7 +130,7 @@ def _tensordot_reshape( prod_axes = int(np.prod([shape_a[i] for i in axes])) perm = axes + free if flipped else free + axes new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes] - transposed_a = _tranpose_if_necessary(a, perm) + transposed_a = _transpose_if_necessary(a, perm) reshaped_a = _reshape_if_necessary(transposed_a, new_shape) transpose_needed = (not flipped) if is_right_term else flipped return reshaped_a, free_dims, free_dims, transpose_needed @@ -152,7 +147,7 @@ def _tensordot_reshape( axes = tf.convert_to_tensor(axes, dtype=tf.dtypes.int32, name="axes") free = tf.convert_to_tensor(free, dtype=tf.dtypes.int32, name="free") shape_a = tf.shape(a) - transposed_a = _tranpose_if_necessary(a, perm) + transposed_a = _transpose_if_necessary(a, perm) else: free_dims_static = None shape_a = tf.shape(a) @@ -184,8 +179,7 @@ def _tensordot_reshape( transpose_needed = (not flipped) if is_right_term else flipped return reshaped_a, free_dims, free_dims_static, transpose_needed - def _tensordot_axes(a: Tensor, axes - ) -> Tuple[Any, Any]: + def _tensordot_axes(a: Tensor, axes) -> Tuple[Any, Any]: """Generates two sets of contraction axes for the two tensor arguments.""" a_shape = a.get_shape() if isinstance(axes, tf.compat.integral_types): @@ -195,11 +189,11 @@ def _tensordot_axes(a: Tensor, axes if axes > a_shape.ndims: raise ValueError("'axes' must not be larger than the number of " "dimensions of tensor %s." % a) - return (list(range(a_shape.ndims - axes, - a_shape.ndims)), list(range(axes))) + return (list(range(a_shape.ndims - axes, a_shape.ndims)), + list(range(axes))) rank = tf.rank(a) - return (tf.range(rank - axes, rank, - dtype=tf.int32), tf.range(axes, dtype=tf.int32)) + return (tf.range(rank - axes, rank, dtype=tf.int32), + tf.range(axes, dtype=tf.int32)) if isinstance(axes, (list, tuple)): if len(axes) != 2: raise ValueError("'axes' must be an integer or have length 2.") From 6e5d9b9c55190eafee51bf2039297fc3bb23627c Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 8 Jan 2020 11:55:58 -0500 Subject: [PATCH 136/212] remove __rmul__ calls of ChargeCollection and BaseCharge --- .../block_tensor/block_tensor_new.py | 524 ++++++------------ 1 file changed, 163 insertions(+), 361 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 31c51934a..37cbf5ac8 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -24,10 +24,45 @@ import scipy as sp import itertools import time -from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable +from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable, Sequence Tensor = Any +def fuse_ndarray_pair(array1: Union[List, np.ndarray], + array2: Union[List, np.ndarray]) -> np.ndarray: + """ + Fuse ndarrays `array1` and `array2` by kronecker-addition. + Given `array1 = [0,1,2]` and `array2 = [10,100]`, this returns + `[10, 100, 11, 101, 12, 102]`. + + Args: + array1: np.ndarray + array2: np.ndarray + Returns: + np.ndarray: The result of adding `array1` and `array2` + """ + return np.reshape( + np.asarray(array1)[:, None] + np.asarray(array2)[None, :], + len(array1) * len(array2)) + + +def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: + """ + Fuse all `arrays` by simple kronecker addition. + Arrays are fused from "right to left", + Args: + arrays: A list of arrays to be fused. + Returns: + np.ndarray: The result of fusing `charges`. + """ + if len(arrays) == 1: + return arrays[0] + fused_arrays = arrays[0] + for n in range(1, len(arrays)): + fused_arrays = fuse_ndarray_pair(array1=fused_arrays, array2=arrays[n]) + return fused_arrays + + def unfuse(fused_indices: np.ndarray, len_left: int, len_right: int) -> Tuple[np.ndarray, np.ndarray]: """ @@ -238,7 +273,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, #convenience container for storing the degeneracies of each #column charge #column_degeneracies = dict(zip(unique_column_charges, column_dims)) - column_degeneracies = dict(zip((-1) * unique_column_charges, column_dims)) + column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) if len(row_charges) > 1: left_row_charges, right_row_charges, _ = _find_best_partition( row_charges, row_flows) @@ -247,7 +282,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, unique_row_charges = (unique_left + unique_right).unique() #get the charges common to rows and columns (only those matter) - concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) + concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) tmp_unique, counts = concatenated.unique(return_counts=True) common_charges = tmp_unique[ counts == 2] #common_charges is a BaseCharge or ChargeCollection @@ -266,7 +301,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, unique_row_charges, row_dims = fused_row_charges.unique(return_counts=True) #get the charges common to rows and columns (only those matter) #get the charges common to rows and columns (only those matter) - concatenated = unique_row_charges.concatenate((-1) * unique_column_charges) + concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) tmp_unique, counts = concatenated.unique(return_counts=True) common_charges = tmp_unique[ counts == 2] #common_charges is a BaseCharge or ChargeCollection @@ -359,7 +394,7 @@ def find_dense_positions( unique_left, left_degeneracies = left_charges.unique(return_counts=True) unique_right, right_degeneracies = right_charges.unique(return_counts=True) - tmp_charges = (target_charge - right_flow * unique_right) * left_flow + tmp_charges = (target_charge + (unique_right * right_flow * (-1))) * left_flow concatenated = unique_left.concatenate(tmp_charges) tmp_unique, counts = concatenated.unique(return_counts=True) common_charges = tmp_unique[ @@ -368,7 +403,7 @@ def find_dense_positions( for n in range(len(common_charges)): c = common_charges[n] - right_charge = (target_charge - left_flow * c) * right_flow + right_charge = (target_charge + (c * left_flow * (-1))) * right_flow right_locations[right_charge.get_item(0)] = np.nonzero( right_charges == right_charge)[0] @@ -376,7 +411,7 @@ def find_dense_positions( indices = [] for n in range(len(left_charges)): c = left_charges[n] - right_charge = (target_charge - left_flow * c) * right_flow + right_charge = (target_charge + (c * left_flow * (-1))) * right_flow if c not in common_charges: continue @@ -430,7 +465,7 @@ def find_sparse_positions( target_charges = target_charges.unique() unique_left = left_charges.unique() unique_right = right_charges.unique() - fused = left_flow * unique_left + right_flow * unique_right + fused = unique_left * left_flow + unique_right * right_flow #compute all unique charges that can add up to #target_charges @@ -462,18 +497,18 @@ def find_sparse_positions( for n in range(len(unique_left_charges)): left_charge = unique_left_charges[n] - total_charge = left_flow * left_charge + right_flow * unique_right_charges + total_charge = left_charge * left_flow + unique_right_charges * right_flow total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) tmp_relevant_right_charges = relevant_right_charges[ relevant_right_charges.isin( - (target_charges + ((-1) * left_flow) * left_charge) * right_flow)] + (target_charges + left_charge * ((-1) * left_flow)) * right_flow)] for n in range(len(target_charges)): target_charge = target_charges[n] - right_indices[(left_charge.get_item(0), - target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + ( - (-1) * left_flow) * left_charge) * right_flow)[0] + right_indices[( + left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( + tmp_relevant_right_charges == (target_charge + left_charge * ( + (-1) * left_flow)) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -496,141 +531,10 @@ def find_sparse_positions( return out -def compute_dense_to_sparse_mapping_deprecated(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` - the rank of the tensor. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - t1 = time.time() - fused_charges = fuse_charges(charges, flows) - nz_indices = np.nonzero(fused_charges == target_charge)[0] - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - - index_locations = [] - for n in reversed(range(len(charges))): - t1 = time.time() - nz_indices, right_indices = unfuse(nz_indices, np.prod(dims[0:n]), dims[n]) - index_locations.insert(0, right_indices) - print(time.time() - t1) - return index_locations - - -def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` - the rank of the tensor. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - - #note: left_charges and right_charges have been fused from RIGHT to LEFT - left_charges, right_charges, partition = _find_best_partition(charges, flows) - t1 = time.time() - nz_indices = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=target_charge) - print(time.time() - t1) - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - t1 = time.time() - nz_left_indices, nz_right_indices = unfuse(nz_indices, len(left_charges), - len(right_charges)) - print(time.time() - t1) - index_locations = [] - #first unfuse left charges - for n in range(partition): - t1 = time.time() - indices, nz_left_indices = unfuse(nz_left_indices, dims[n], - np.prod(dims[n + 1:partition])) - index_locations.append(indices) - print(time.time() - t1) - for n in range(partition, len(dims)): - t1 = time.time() - indices, nz_right_indices = unfuse(nz_right_indices, dims[n], - np.prod(dims[n + 1::])) - index_locations.append(indices) - print(time.time() - t1) - - return index_locations - - -def compute_dense_to_sparse_mapping(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: +def compute_dense_to_sparse_mapping( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]], + target_charge: Union[BaseCharge, ChargeCollection]) -> int: """ Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. @@ -741,6 +645,42 @@ def randn(cls, indices: List[Index], data = backend.randn((num_non_zero_elements,), dtype=dtype) return cls(data=data, indices=indices) + @classmethod + def ones(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a symmetric tensor with ones. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + backend = backend_factory.get_backend('numpy') + data = backend.ones((num_non_zero_elements,), dtype=dtype) + return cls(data=data, indices=indices) + + @classmethod + def zeros(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a symmetric tensor with zeros. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + backend = backend_factory.get_backend('numpy') + data = backend.zeros((num_non_zero_elements,), dtype=dtype) + return cls(data=data, indices=indices) + @classmethod def random(cls, indices: List[Index], dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": @@ -754,6 +694,7 @@ def random(cls, indices: List[Index], """ charges = [i.charges for i in indices] flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) dtype = dtype if dtype is not None else self.np.float64 @@ -823,6 +764,10 @@ def transpose(self, #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse #positions + if (transposed_linear_positions is + not None) and (len(transposed_linear_positions) != len(tensor.data)): + raise ValueError("len(transposed_linear_positions) != len(tensor.data).") + if len(order) != self.rank: raise ValueError( "`len(order)={}` is different form `self.rank={}`".format( @@ -830,7 +775,7 @@ def transpose(self, #transpose is the only function using self.dense_to_sparse_table #so we can initialize it here. This will change if we are implementing #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` - #also needs + #also needs this. #we use elementary indices here because it is #more efficient to get the fused charges using @@ -860,7 +805,11 @@ def transpose(self, #of `left_charges` and `right_charges` that have `0` #total charge (those are the only non-zero elements). linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) + left_charges, + 1, + right_charges, + 1, + target_charge=flat_charges[0].zero_charge) self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( len(self.data)), (linear_positions, @@ -874,13 +823,12 @@ def transpose(self, tr_left_charges, tr_right_charges, _ = _find_best_partition( flat_tr_charges, flat_tr_flows) #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_charges([ + tr_dense_linear_positions = fuse_ndarrays([ np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) - ], - flows=[1] * len(flat_tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) + ]) + tr_linear_positions = find_dense_positions( + tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) inds = np.squeeze(self.dense_to_sparse_table[ tr_dense_linear_positions[tr_linear_positions], 0].toarray()) @@ -889,143 +837,6 @@ def transpose(self, self.data = self.data[inds] return inds - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order` - Args: pp - order: The new order of indices. - Returns: - BlockSparseTensor: The transposed tensor. - """ - #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the - #lookup-table from dense to sparse indices. According to some quick - #testing, the final lookup is currently the bottleneck. - #FIXME: transpose currently shuffles data. This can in principle be postponed - #until `tensordot` or `find_diagonal_sparse_blocks` - if len(order) != self.rank: - raise ValueError(len(order), self.rank) - charges = self.charges #call only once in case some of the indices are merged indices - dims = [len(c) for c in charges] - - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - tr_charges = [charges[n] for n in order] - tr_flows = [self.flows[n] for n in order] - tr_strides = [strides[n] for n in order] - tr_dims = [dims[n] for n in order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( - tr_charges, tr_flows) - - tr_dense_linear_positions = fuse_charges( - [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - flows=[1] * len(tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - new_linear_positions = tr_dense_linear_positions[tr_linear_positions] - _, _, inds = np.intersect1d( - linear_positions, - new_linear_positions, - return_indices=True, - assume_unique=True) - self.data = self.data[inds] - - # def transpose_lookup(self, order: Union[List[int], np.ndarray] - # ) -> "BlockSparseTensor": - # """ - # Deprecated - - # Transpose the tensor into the new order `order`. Uses a simple cython std::map - # for the lookup - # Args: - # order: The new order of indices. - # Returns: - # BlockSparseTensor: The transposed tensor. - # """ - # if len(order) != self.rank: - # raise ValueError( - # "`len(order)={}` is different form `self.rank={}`".format( - # len(order), self.rank)) - # charges = self.charges #call only once in case some of the indices are merged indices - # dims = [len(c) for c in charges] - - # strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - # #find the best partition into left and right charges - # left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - # #find the index-positions of the elements in the fusion - # #of `left_charges` and `right_charges` that have `0` - # #total charge (those are the only non-zero elements). - # linear_positions = find_dense_positions( - # left_charges, 1, right_charges, 1, target_charge=0) - - # tr_charges = [charges[n] for n in order] - # tr_flows = [self.flows[n] for n in order] - # tr_strides = [strides[n] for n in order] - # tr_dims = [dims[n] for n in order] - # tr_left_charges, tr_right_charges, _ = _find_best_partition( - # tr_charges, tr_flows) - # #FIXME: this should be done without fully fusing the strides - # tr_dense_linear_positions = fuse_charges( - # [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - # flows=[1] * len(tr_dims)) - # tr_linear_positions = find_dense_positions(tr_left_charges, 1, - # tr_right_charges, 1, 0) - # inds = lookup(linear_positions, - # tr_dense_linear_positions[tr_linear_positions]) - # self.data = self.data[inds] - - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": - """ - Deprecated: - - Transpose the tensor into the new order `order`. Uses `np.searchsorted` - for the lookup. - Args: - order: The new order of indices. - Returns: - BlockSparseTensor: The transposed tensor. - """ - if len(order) != self.rank: - raise ValueError( - "`len(order)={}` is different form `self.rank={}`".format( - len(order), self.rank)) - charges = self.charges #call only once in case some of the indices are merged indices - dims = [len(c) for c in charges] - - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - tr_charges = [charges[n] for n in order] - tr_flows = [self.flows[n] for n in order] - tr_strides = [strides[n] for n in order] - tr_dims = [dims[n] for n in order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( - tr_charges, tr_flows) - #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_charges( - [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - flows=[1] * len(tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - - inds = np.searchsorted(linear_positions, - tr_dense_linear_positions[tr_linear_positions]) - self.data = self.data[inds] - def reset_shape(self) -> None: """ Bring the tensor back into its elementary shape. @@ -1162,72 +973,6 @@ def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: - """ - Obtain the diagonal blocks of symmetric matrix. - BlockSparseTensor has to be a matrix. - For matrices with shape[0] << shape[1], this routine avoids explicit fusion - of column charges. - - Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - row_indices = self.indices[0].get_elementary_indices() - column_indices = self.indices[1].get_elementary_indices() - - return find_diagonal_sparse_blocks_deprecated_1( - data=self.data, - row_charges=[i.charges for i in row_indices], - column_charges=[i.charges for i in column_indices], - row_flows=[i.flow for i in row_indices], - column_flows=[i.flow for i in column_indices], - return_data=return_data) - - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - Obtain the diagonal blocks of symmetric matrix. - BlockSparseTensor has to be a matrix. - Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - return find_diagonal_sparse_blocks_deprecated_0( - data=self.data, - charges=self.charges, - flows=self.flows, - return_data=return_data) - def reshape(tensor: BlockSparseTensor, shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: @@ -1252,14 +997,14 @@ def reshape(tensor: BlockSparseTensor, i2 = Index(charges=q2,flow=-1) i3 = Index(charges=q3,flow=1) A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(A.shape) #prints (6,6,6) + print(nA.shape) #prints (6,6,6) reshape(A, (2,3,6,6)) #raises ValueError ``` raises a `ValueError` since (2,3,6,6) is incompatible with the elementary shape (6,6,6) of the tensor. Args: - tensor: A symmetric tensor. + tensopr: A symmetric tensor. shape: The new shape. Can either be a list of `Index` or a list of `int`. Returns: @@ -1269,3 +1014,60 @@ def reshape(tensor: BlockSparseTensor, data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) result.reshape(shape) return result + + +def transpose( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + transposed_linear_positions: Optional[np.ndarray] = None, + return_new_positions: Optional[bool] = False) -> "BlockSparseTensor": + """ + Transpose `tensor` into the new order `order`. This routine currently shuffles + data. + Args: + tensor: The tensor to be transposed. + order: The new order of indices. + transposed_linear_positions: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + can greatly speed up the transposition. + Returns: + BlockSparseTensor: The transposed tensor. + """ + if (transposed_linear_positions is + not None) and (len(transposed_linear_positions) != len(tensor.data)): + raise ValueError("len(transposed_linear_positions) != len(tensor.data).") + result = BlockSparseTensor( + data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) + inds = result.transpose(order, transposed_linear_positions) + if return_new_positions: + return result, inds + return result + + +def tensordot(tensor1: BlockSparseTensor, tensor2: BlockSparseTensor, + axes: Sequence[Sequence[int]]): + axes1 = axes[0] + axes2 = axes[1] + if not np.all(np.unique(axes1) == np.sort(axes1)): + raise ValueError( + "Some values in axes[0] = {} appear more than once!".format(axes1)) + if not np.all(np.unique(axes2) == np.sort(axes2)): + raise ValueError( + "Some values in axes[1] = {} appear more than once!".format(axes2n)) + + if max(axes1) >= len(tensor1.shape): + raise ValueError( + "rank of `tensor1` is smaller than `max(axes1) = {}.`".format( + max(axes1))) + if max(axes2) >= len(tensor2.shape): + raise ValueError( + "rank of `tensor2` is smaller than `max(axes2) = {}`".format( + max(axes1))) + free_axes_1 = sorted(set(np.arange(len(tensor1.shape))) - set(axes1)) + free_axes_2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) + new_order_1 = free_axes_1 + list(axes1) + new_order_2 = list(axes2) + free_axes_2 + #FIXME: currently this shuffles data, this is unnecessary! + tmp1 = transpose(tensor1, new_order_1) + tmp2 = transpose(tensor2, new_order_2) + pass From a86020804cc6e5583ca8f273cca9a859fe77817e Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 8 Jan 2020 12:02:53 -0500 Subject: [PATCH 137/212] removed __rmul__ --- tensornetwork/block_tensor/charge.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 1faaea695..2d50f76fa 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -176,7 +176,7 @@ def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": def num_symmetries(self): """ The number of individual symmetries stored in this object. - """n + """ return len(self.shifts) def __len__(self) -> int: @@ -219,14 +219,14 @@ def __rmul__(self, number: Union[bool, int]) -> "BaseCharge": Returns: BaseCharge: The result of `number * self`. """ - + raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") @property def dtype(self): return self.charges.dtype - def unique(self,n + def unique(self, return_index=False, return_inverse=False, return_counts=False @@ -294,7 +294,7 @@ def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: Returns: np.ndarray: An array of `bool` type holding the result of the comparison. """ - + if isinstance(target, type(self)): if not np.all(self.shifts == target.shifts): raise ValueError( @@ -481,12 +481,14 @@ def __mul__(self, number: Union[bool, int]) -> "U1Charge": #Note: the returned U1Charge shares its data with self return U1Charge(charges=[self.charges], shifts=self.shifts) - def __rmul__(self, number: Union[bool, int]) -> "U1Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - return self.__mul__(number) + # def __rmul__(self, number: Union[bool, int]) -> "U1Charge": + # raise + # print(number not in (True, False, 0, 1, -1)) + # if number not in (True, False, 0, 1, -1): + # raise ValueError( + # "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + # number)) + # return self.__mul__(number) @property def dual_charges(self) -> np.ndarray: @@ -799,7 +801,7 @@ def __mul__(self, number: Union[bool, int]) -> "Charge": raise ValueError( "can only multiply by `True`, `False`, `1` or `0`, found {}".format( number)) - return ChargeCollection(charges=[c * number for c in self.charges]) + return ChargeCollection(charges=[number * c for c in self.charges]) def __rmul__(self, number: Union[bool, int]) -> "Charge": if number not in (True, False, 0, 1, -1): @@ -957,7 +959,7 @@ def fuse_charges( len(charges), len(flows))) fused_charges = charges[0] * flows[0] for n in range(1, len(charges)): - fused_charges = fused_charges + flows[n] * charges[n] + fused_charges = fused_charges + charges[n] * flows[n] return fused_charges From 180895cc3de2b0159a66bb2791132e2c1e1b83a6 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 8 Jan 2020 12:20:35 -0500 Subject: [PATCH 138/212] removed some bugs inb transpose --- .../block_tensor/block_tensor_new.py | 143 +++++++++++++++--- 1 file changed, 121 insertions(+), 22 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 37cbf5ac8..c62b5a3e2 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -153,12 +153,12 @@ def compute_fused_charge_degeneracies( np.ndarray of integers: The degeneracies of each unqiue fused charge. """ if len(charges) == 1: - return (flows[0] * charges[0]).unique(return_counts=True) + return (charges[0] * flows[0]).unique(return_counts=True) # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". accumulated_charges, accumulated_degeneracies = ( - flows[0] * charges[0]).unique(return_counts=True) + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -749,7 +749,7 @@ def transpose(self, """ Transpose the tensor into the new order `order`. This routine currently shuffles data. - Args: + Args: order: The new order of indices. transposed_linear_positions: An np.ndarray of int for reshuffling the data, typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` @@ -797,23 +797,24 @@ def transpose(self, flat_flows = [i.flow for i in flat_elementary_indices] flat_dims = [len(c) for c in flat_charges] flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - if not hasattr(self, 'dense_to_sparse_table'): - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - flat_charges, flat_flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, - 1, - right_charges, - 1, - target_charge=flat_charges[0].zero_charge) - - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition( + flat_charges, flat_flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, + 1, + right_charges, + 1, + target_charge=flat_charges[0].zero_charge) + + dense_to_sparse_table = sp.sparse.csr_matrix((np.arange(len(self.data)), + (linear_positions, + np.zeros( + len(self.data), + dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -830,13 +831,111 @@ def transpose(self, tr_linear_positions = find_dense_positions( tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) - inds = np.squeeze(self.dense_to_sparse_table[ - tr_dense_linear_positions[tr_linear_positions], 0].toarray()) + inds = np.squeeze( + dense_to_sparse_table[tr_dense_linear_positions[tr_linear_positions], + 0].toarray()) else: inds = transposed_linear_positions + self.indices = [self.indices[n] for n in order] self.data = self.data[inds] return inds + # def transpose(self, + # order: Union[List[int], np.ndarray], + # transposed_linear_positions: Optional[np.ndarray] = None + # ) -> "BlockSparseTensor": + # """ + # Transpose the tensor into the new order `order`. This routine currently shuffles + # data. + # Args: + # order: The new order of indices. + # transposed_linear_positions: An np.ndarray of int for reshuffling the data, + # typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + # can greatly speed up the transposition. + # Returns: + # BlockSparseTensor: The transposed tensor. + # """ + # #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the + # #lookup-table from dense to sparse indices. According to some quick + # #testing, the final lookup is currently the bottleneck. + # #FIXME: transpose currently shuffles data. This can in principle be postponed + # #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of + # #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse + # #positions + # if (transposed_linear_positions is + # not None) and (len(transposed_linear_positions) != len(tensor.data)): + # raise ValueError("len(transposed_linear_positions) != len(tensor.data).") + + # if len(order) != self.rank: + # raise ValueError( + # "`len(order)={}` is different form `self.rank={}`".format( + # len(order), self.rank)) + # #transpose is the only function using self.dense_to_sparse_table + # #so we can initialize it here. This will change if we are implementing + # #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` + # #also needs this. + + # #we use elementary indices here because it is + # #more efficient to get the fused charges using + # #the best partition + # if transposed_linear_positions is None: + # elementary_indices = {} + # flat_elementary_indices = [] + + # for n in range(self.rank): + # elementary_indices[n] = self.indices[n].get_elementary_indices() + # flat_elementary_indices.extend(elementary_indices[n]) + # flat_index_list = np.arange(len(flat_elementary_indices)) + # cum_num_legs = np.append( + # 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) + # flat_order = np.concatenate( + # [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + + # flat_charges = [i.charges for i in flat_elementary_indices] + # flat_flows = [i.flow for i in flat_elementary_indices] + # flat_dims = [len(c) for c in flat_charges] + # flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + # if not hasattr(self, 'dense_to_sparse_table'): + # #find the best partition into left and right charges + # left_charges, right_charges, _ = _find_best_partition( + # flat_charges, flat_flows) + # #find the index-positions of the elements in the fusion + # #of `left_charges` and `right_charges` that have `0` + # #total charge (those are the only non-zero elements). + # linear_positions = find_dense_positions( + # left_charges, + # 1, + # right_charges, + # 1, + # target_charge=flat_charges[0].zero_charge) + + # self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + # len(self.data)), (linear_positions, + # np.zeros(len(self.data), dtype=np.int64)))) + + # flat_tr_charges = [flat_charges[n] for n in flat_order] + # flat_tr_flows = [flat_flows[n] for n in flat_order] + # flat_tr_strides = [flat_strides[n] for n in flat_order] + # flat_tr_dims = [flat_dims[n] for n in flat_order] + + # tr_left_charges, tr_right_charges, _ = _find_best_partition( + # flat_tr_charges, flat_tr_flows) + # #FIXME: this should be done without fully fusing the strides + # tr_dense_linear_positions = fuse_ndarrays([ + # np.arange(flat_tr_dims[n]) * flat_tr_strides[n] + # for n in range(len(flat_tr_dims)) + # ]) + # tr_linear_positions = find_dense_positions( + # tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) + + # inds = np.squeeze(self.dense_to_sparse_table[ + # tr_dense_linear_positions[tr_linear_positions], 0].toarray()) + # else: + # inds = transposed_linear_positions + # self.indices = [self.indices[n] for n in order] + # self.data = self.data[inds] + # return inds + def reset_shape(self) -> None: """ Bring the tensor back into its elementary shape. From 9b30651c4cb70580450774a65c9aa40da1a7248e Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 8 Jan 2020 13:23:57 -0500 Subject: [PATCH 139/212] broken commit --- .../block_tensor/block_tensor_new.py | 44 +++++----- .../block_tensor/block_tensor_new_test.py | 84 +++++++++++++++---- tensornetwork/block_tensor/charge.py | 2 +- tensornetwork/block_tensor/charge_test.py | 4 +- tensornetwork/block_tensor/index_new.py | 1 - 5 files changed, 92 insertions(+), 43 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index c62b5a3e2..a42576876 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -797,24 +797,23 @@ def transpose(self, flat_flows = [i.flow for i in flat_elementary_indices] flat_dims = [len(c) for c in flat_charges] flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - flat_charges, flat_flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, - 1, - right_charges, - 1, - target_charge=flat_charges[0].zero_charge) - - dense_to_sparse_table = sp.sparse.csr_matrix((np.arange(len(self.data)), - (linear_positions, - np.zeros( - len(self.data), - dtype=np.int64)))) + if not hasattr(self, 'dense_to_sparse_table'): + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition( + flat_charges, flat_flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, + 1, + right_charges, + 1, + target_charge=flat_charges[0].zero_charge) + + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -831,14 +830,15 @@ def transpose(self, tr_linear_positions = find_dense_positions( tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) - inds = np.squeeze( + self.element_order = np.squeeze( dense_to_sparse_table[tr_dense_linear_positions[tr_linear_positions], 0].toarray()) else: - inds = transposed_linear_positions + self.element_order = transposed_linear_positions self.indices = [self.indices[n] for n in order] - self.data = self.data[inds] - return inds + + #self.data = self.data[self.element_order] + return self.element_order # def transpose(self, # order: Union[List[int], np.ndarray], diff --git a/tensornetwork/block_tensor/block_tensor_new_test.py b/tensornetwork/block_tensor/block_tensor_new_test.py index 13cdff13d..0ed921b0d 100644 --- a/tensornetwork/block_tensor/block_tensor_new_test.py +++ b/tensornetwork/block_tensor/block_tensor_new_test.py @@ -2,7 +2,8 @@ import pytest from tensornetwork.block_tensor.charge import U1Charge, ChargeCollection -from tensornetwork.block_tensor.block_tensor_new import find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions +from tensornetwork.block_tensor.index_new import Index +from tensornetwork.block_tensor.block_tensor_new import find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions, find_dense_positions, BlockSparseTensor np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] @@ -75,9 +76,52 @@ def test_find_sparse_positions_consistency(): assert np.all(nz1 == nz3) +def test_find_dense_positions_consistency(): + B = 5 + D = 20 + rank = 4 + + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) + ] + flows = [1, 1, 1, -1] + data1 = find_dense_positions( + left_charges=charges1[0] * flows[0] + charges1[1] * flows[0], + left_flow=1, + right_charges=charges1[2] * flows[2] + charges1[3] * flows[3], + right_flow=1, + target_charge=charges1[0].zero_charge) + data2 = find_dense_positions( + left_charges=charges2[0] * flows[0] + charges2[1] * flows[1], + left_flow=1, + right_charges=charges2[2] * flows[2] + charges2[3] * flows[3], + right_flow=1, + target_charge=charges2[0].zero_charge) + data3 = find_dense_positions( + left_charges=charges3[0] * flows[0] + charges3[1] * flows[1], + left_flow=1, + right_charges=charges3[2] * flows[2] + charges3[3] * flows[3], + right_flow=1, + target_charge=charges3[0].zero_charge) + + nz = compute_num_nonzero(charges1, flows) + assert nz == len(data1) + assert len(data1) == len(data2) + assert len(data1) == len(data3) + + def test_find_diagonal_sparse_blocks_consistency(): - B = 4 - D = 100 + B = 5 + D = 20 rank = 4 qs = [[ @@ -267,17 +311,23 @@ def test_find_diagonal_sparse_blocks_consistency(): # right_flow=1, # target_charges=common_charges) -# def test_dense_transpose(): -# Ds = [10, 11, 12] #bond dimension -# rank = len(Ds) -# flows = np.asarray([1 for _ in range(rank)]) -# flows[-2::] = -1 -# charges = [np.zeros(Ds[n], dtype=np.int16) for n in range(rank)] -# indices = [ -# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# A = BlockSparseTensor.random(indices=indices, dtype=np.float64) -# B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) -# A.transpose((1, 0, 2)) -# np.testing.assert_allclose(A.data, B.flat) + +def test_dense_transpose(): + Ds = [10, 11, 12] #bond dimension + rank = len(Ds) + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + A = BlockSparseTensor.random(indices=indices, dtype=np.float64) + B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) + A.transpose((1, 0, 2)) + np.testing.assert_allclose(A.data, B.flat) + + B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) + A.transpose((1, 0, 2)) + + np.testing.assert_allclose(A.data, B.flat) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 2d50f76fa..690bc7441 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -801,7 +801,7 @@ def __mul__(self, number: Union[bool, int]) -> "Charge": raise ValueError( "can only multiply by `True`, `False`, `1` or `0`, found {}".format( number)) - return ChargeCollection(charges=[number * c for c in self.charges]) + return ChargeCollection(charges=[c * number for c in self.charges]) def __rmul__(self, number: Union[bool, int]) -> "Charge": if number not in (True, False, 0, 1, -1): diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index d2590a45f..fe75b2962 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -185,7 +185,7 @@ def run_test(): q3 = U1Charge([O3, P3, Q3]) target = np.random.randint(-B // 2, B // 2 + 1, 3) - q123 = q1 + (-1) * q2 + q3 + q123 = q1 + q2 * (-1) + q3 nz_1 = np.nonzero(q123.equals(target))[0] i1 = fused_1 == target[0] @@ -223,7 +223,7 @@ def run_test(): q2 = U1Charge([O2, P2, Q2]) target = np.random.randint(-B // 2, B // 2 + 1, 3) - q12 = q1 + (-1) * q2 + q12 = q1 + q2 * (-1) nz_1 = np.nonzero(q12.equals(target))[0] i1 = fused_1 == target[0] diff --git a/tensornetwork/block_tensor/index_new.py b/tensornetwork/block_tensor/index_new.py index 18271aeff..39afb061c 100644 --- a/tensornetwork/block_tensor/index_new.py +++ b/tensornetwork/block_tensor/index_new.py @@ -60,7 +60,6 @@ def _copy_helper(self, index: "Index", copied_index: "Index") -> None: """ Helper function for copy """ - print(index.left_child, index.right_child) if index.left_child != None: left_copy = Index( charges=copy.deepcopy(index.left_child.charges), From 4fea3239f86bf971b311cc03dba8bb068de17fc5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 8 Jan 2020 16:46:49 -0500 Subject: [PATCH 140/212] broken commit --- .../block_tensor/block_tensor_new.py | 253 +++++++++++++----- tensornetwork/block_tensor/index_new.py | 18 +- 2 files changed, 189 insertions(+), 82 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index a42576876..17b3706e8 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -63,6 +63,23 @@ def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: return fused_arrays +def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: + """ + Fuse all `arrays` by simple kronecker addition. + Arrays are fused from "right to left", + Args: + arrays: A list of arrays to be fused. + Returns: + np.ndarray: The result of fusing `charges`. + """ + if len(arrays) == 1: + return arrays[0] + fused_arrays = arrays[0] + for n in range(1, len(arrays)): + fused_arrays = fuse_ndarray_pair(array1=fused_arrays, array2=arrays[n]) + return fused_arrays + + def unfuse(fused_indices: np.ndarray, len_left: int, len_right: int) -> Tuple[np.ndarray, np.ndarray]: """ @@ -116,15 +133,32 @@ def _check_flows(flows: List[int]) -> None: def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], flows: List[int]): + #FIXME: fusing charges with dims (N,M) with M>~N is faster than fusing charges + # with dims (M,N). Thus, it is not always best to fuse at the minimum cut. + #for example, for dims (1000, 4, 1002), its better to fuse at the cut + #(1000, 4008) than at (4000, 1002), even though the difference between the + #dimensions is minimal for the latter case. We should implement some heuristic + #to find these cuts. if len(charges) == 1: raise ValueError( '_expecting `charges` with a length of at least 2, got `len(charges)={}`' .format(len(charges))) dims = np.asarray([len(c) for c in charges]) - min_ind = np.argmin([ + diffs = [ np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) for n in range(1, len(charges)) - ]) + ] + min_inds = np.nonzero(diffs == np.min(diffs))[0] + if len(min_inds) > 1: + right_dims = [np.prod(len(charges[min_ind + 1::])) for min_ind in min_inds] + min_ind = min_inds[np.argmax(right_dims)] + else: + min_ind = min_inds[0] + # min_ind = np.argmin([ + # np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) + # for n in range(1, len(charges)) + # ]) + fused_left_charges = fuse_charges(charges[0:min_ind + 1], flows[0:min_ind + 1]) fused_right_charges = fuse_charges(charges[min_ind + 1::], @@ -157,8 +191,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - charges[0] * flows[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (charges[0] * + flows[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -507,8 +542,9 @@ def find_sparse_positions( target_charge = target_charges[n] right_indices[( left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + left_charge * ( - (-1) * left_flow)) * right_flow)[0] + tmp_relevant_right_charges == (target_charge + left_charge * + ((-1) * left_flow)) * + right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -608,6 +644,16 @@ class BlockSparseTensor: The tensor data is stored in self.data, a 1d np.ndarray. """ + def copy(self): + new = self.__new__(type(self)) + new._data = self._data.copy() + new.data_permutation = self.data_permutation + if hasattr(self, 'dense_to_sparse_table'): + new.dense_to_sparse_table = self.dense_to_sparse_table.copy() + new.initial_index_order = [i.copy() for i in self.initial_index_order] + new.indices = [i.copy() for i in self.indices] + return new + def __init__(self, data: np.ndarray, indices: List[Index]) -> None: """ Args: @@ -616,6 +662,21 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: and `flows` indices: List of `Index` objecst, one for each leg. """ + for n in range(len(indices)): + if indices[n].name is None: + indices[n].name = 'index_{}'.format(n) + + inital_index_names = [ + indices[n].name if indices[n].name else 'index_{}'.format(n) + for n in range(len(indices)) + ] + unique, cnts = np.unique(inital_index_names, return_counts=True) + if np.any(cnts > 1): + raise ValueError("Index names {} appeared multiple times. " + "Please rename indices uniquely.".format( + unique[cnts > 1])) + + self.initial_index_order = [i.copy() for i in indices] self.indices = indices _check_flows(self.flows) num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) @@ -625,7 +686,14 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: "by `charges` is different from" " len(data)={}".format(len(data.flat))) - self.data = np.asarray(data.flat) #do not copy data + self._data = np.asarray(data.flat) #do not copy data + self.data_permutation = None + + @property + def data(self): + if self.data_permutation is not None: + return self._data[self.data_permutation] + return self._data @classmethod def randn(cls, indices: List[Index], @@ -744,15 +812,15 @@ def charges(self): def transpose(self, order: Union[List[int], np.ndarray], - transposed_linear_positions: Optional[np.ndarray] = None + permutation: Optional[np.ndarray] = None ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. Args: order: The new order of indices. - transposed_linear_positions: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` can greatly speed up the transposition. Returns: BlockSparseTensor: The transposed tensor. @@ -760,43 +828,73 @@ def transpose(self, #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the #lookup-table from dense to sparse indices. According to some quick #testing, the final lookup is currently the bottleneck. - #FIXME: transpose currently shuffles data. This can in principle be postponed - #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of - #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse - #positions - if (transposed_linear_positions is - not None) and (len(transposed_linear_positions) != len(tensor.data)): - raise ValueError("len(transposed_linear_positions) != len(tensor.data).") + + if (permutation is not None) and (permutation is not 'trivial') and ( + len(permutation) != len(self.data)): + raise ValueError("len(permutation) != len(tensor.data).") if len(order) != self.rank: raise ValueError( "`len(order)={}` is different form `self.rank={}`".format( len(order), self.rank)) - #transpose is the only function using self.dense_to_sparse_table - #so we can initialize it here. This will change if we are implementing - #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` - #also needs this. + + #check for trivial permutation + if np.all(order == np.arange(len(order))): + #nothing to do + if self.data_permutation is None: + return 'trivial' + return self.data_permutation + + new_index_order = [self.indices[n] for n in order] + if np.all([ + new_index_order[n].name == self.initial_index_order[n].name + for n in range(len(self.indices)) + ]): + #`order' permutes into the original ordering + self.data_permutation = None + self.indices = [self.indices[n] for n in order] + return 'trivial' #we use elementary indices here because it is #more efficient to get the fused charges using #the best partition - if transposed_linear_positions is None: - elementary_indices = {} - flat_elementary_indices = [] - - for n in range(self.rank): - elementary_indices[n] = self.indices[n].get_elementary_indices() - flat_elementary_indices.extend(elementary_indices[n]) - flat_index_list = np.arange(len(flat_elementary_indices)) - cum_num_legs = np.append( - 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) - flat_order = np.concatenate( - [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - - flat_charges = [i.charges for i in flat_elementary_indices] - flat_flows = [i.flow for i in flat_elementary_indices] - flat_dims = [len(c) for c in flat_charges] - flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + if permutation is None: + + def get_flattened_data(indices): + elementary_indices = {} + flat_elementary_indices = [] + for n in range(len(indices)): + elementary_indices[n] = indices[n].get_elementary_indices() + flat_elementary_indices.extend(elementary_indices[n]) + flat_index_list = np.arange(len(flat_elementary_indices)) + cum_num_legs = np.append( + 0, + np.cumsum([len(elementary_indices[n]) for n in range(len(indices)) + ])) + + flat_charges = [i.charges for i in flat_elementary_indices] + flat_flows = [i.flow for i in flat_elementary_indices] + flat_dims = [len(c) for c in flat_charges] + flat_strides = np.flip( + np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + return flat_charges, flat_flows, flat_dims, flat_strides, flat_index_list, cum_num_legs + + #find the absolute order, i.e. the requested order with respect to the initial + #ordering of the indices + absolute_order = [] + for o in order: + for n in range(len(self.initial_index_order)): + if self.initial_index_order[n].name == self.indices[o].name: + absolute_order.append(n) + + flat_charges, flat_flows, flat_dims, flat_strides, flat_index_list, cum_num_legs = get_flattened_data( + self.initial_index_order) + + flat_order = np.concatenate([ + flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] + for n in absolute_order + ]) + if not hasattr(self, 'dense_to_sparse_table'): #find the best partition into left and right charges left_charges, right_charges, _ = _find_best_partition( @@ -804,22 +902,24 @@ def transpose(self, #find the index-positions of the elements in the fusion #of `left_charges` and `right_charges` that have `0` #total charge (those are the only non-zero elements). + t1 = time.time() linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=flat_charges[0].zero_charge) - - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) + print(len(left_charges), len(right_charges)) + print('first finding dense positions {}s'.format(time.time() - t1)) + data = np.arange(len(self.data)) + zeros = np.zeros(len(self.data), dtype=np.int64) + self.dense_to_sparse_table = sp.sparse.csc_matrix( + (data, (linear_positions, zeros))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] flat_tr_strides = [flat_strides[n] for n in flat_order] flat_tr_dims = [flat_dims[n] for n in flat_order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( flat_tr_charges, flat_tr_flows) #FIXME: this should be done without fully fusing the strides @@ -827,30 +927,38 @@ def transpose(self, np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) ]) - tr_linear_positions = find_dense_positions( - tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) - - self.element_order = np.squeeze( - dense_to_sparse_table[tr_dense_linear_positions[tr_linear_positions], - 0].toarray()) + t1 = time.time() + print(len(tr_left_charges), len(tr_right_charges)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, + tr_left_charges.zero_charge) + print(len(tr_left_charges), len(tr_right_charges)) + print('finding dense positions {}s'.format(time.time() - t1)) + t1 = time.time() + self.data_permutation = np.squeeze(self.dense_to_sparse_table[ + tr_dense_linear_positions[tr_linear_positions], 0].toarray()) + print('lookup took {}s'.format(time.time() - t1)) + self.indices = [self.indices[n] for n in order] + return self.data_permutation + + elif permutation is 'trivial': + self.data_permutation = None + self.indices = [self.indices[n] for n in order] + return permutation else: - self.element_order = transposed_linear_positions - self.indices = [self.indices[n] for n in order] - - #self.data = self.data[self.element_order] - return self.element_order + self.data_permutation = permutation # def transpose(self, # order: Union[List[int], np.ndarray], - # transposed_linear_positions: Optional[np.ndarray] = None + # permutation: Optional[np.ndarray] = None # ) -> "BlockSparseTensor": # """ # Transpose the tensor into the new order `order`. This routine currently shuffles # data. # Args: # order: The new order of indices. - # transposed_linear_positions: An np.ndarray of int for reshuffling the data, - # typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + # permutation: An np.ndarray of int for reshuffling the data, + # typically the output of a prior call to `transpose`. Passing `permutation` # can greatly speed up the transposition. # Returns: # BlockSparseTensor: The transposed tensor. @@ -862,9 +970,9 @@ def transpose(self, # #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of # #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse # #positions - # if (transposed_linear_positions is - # not None) and (len(transposed_linear_positions) != len(tensor.data)): - # raise ValueError("len(transposed_linear_positions) != len(tensor.data).") + # if (permutation is + # not None) and (len(permutation) != len(tensor.data)): + # raise ValueError("len(permutation) != len(tensor.data).") # if len(order) != self.rank: # raise ValueError( @@ -878,7 +986,7 @@ def transpose(self, # #we use elementary indices here because it is # #more efficient to get the fused charges using # #the best partition - # if transposed_linear_positions is None: + # if permutation is None: # elementary_indices = {} # flat_elementary_indices = [] @@ -931,7 +1039,7 @@ def transpose(self, # inds = np.squeeze(self.dense_to_sparse_table[ # tr_dense_linear_positions[tr_linear_positions], 0].toarray()) # else: - # inds = transposed_linear_positions + # inds = permutation # self.indices = [self.indices[n] for n in order] # self.data = self.data[inds] # return inds @@ -1115,29 +1223,28 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose( - tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - transposed_linear_positions: Optional[np.ndarray] = None, - return_new_positions: Optional[bool] = False) -> "BlockSparseTensor": +def transpose(tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_new_positions: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. Args: tensor: The tensor to be transposed. order: The new order of indices. - transposed_linear_positions: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` can greatly speed up the transposition. Returns: BlockSparseTensor: The transposed tensor. """ - if (transposed_linear_positions is - not None) and (len(transposed_linear_positions) != len(tensor.data)): - raise ValueError("len(transposed_linear_positions) != len(tensor.data).") + if (permutation is not None) and (len(permutation) != len(tensor.data)): + raise ValueError("len(permutation) != len(tensor.data).") result = BlockSparseTensor( data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) - inds = result.transpose(order, transposed_linear_positions) + inds = result.transpose(order, permutation) if return_new_positions: return result, inds return result diff --git a/tensornetwork/block_tensor/index_new.py b/tensornetwork/block_tensor/index_new.py index 39afb061c..cd64e7493 100644 --- a/tensornetwork/block_tensor/index_new.py +++ b/tensornetwork/block_tensor/index_new.py @@ -31,7 +31,7 @@ class Index: def __init__(self, charges: Union[ChargeCollection, BaseCharge], flow: int, - name: Optional[Text] = "index", + name: Optional[Text] = None, left_child: Optional["Index"] = None, right_child: Optional["Index"] = None): if isinstance(charges, BaseCharge): @@ -43,7 +43,7 @@ def __init__(self, self.flow = flow self.left_child = left_child self.right_child = right_child - self._name = name + self.name = name def __repr__(self): return str(self.dimension) @@ -123,13 +123,13 @@ def charges(self): return self._charges return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow - @property - def name(self): - if self._name: - return self._name - if self.is_leave: - return self.name - return self.left_child.name + ' & ' + self.right_child.name + # @property + # def name(self): + # if self._name: + # return self._name + # if self.is_leave: + # return self.name + # return self.left_child.name + ' & ' + self.right_child.name def fuse_index_pair(left_index: Index, From 4a47dc6a7a2c3c55002c5897af5845b4327ff9ff Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 9 Jan 2020 09:52:11 -0500 Subject: [PATCH 141/212] remove csr matrix, use search sorted --- .../block_tensor/block_tensor_new.py | 105 +++++++++--------- 1 file changed, 55 insertions(+), 50 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 17b3706e8..8097d0f84 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -28,6 +28,16 @@ Tensor = Any +def find_values_in_fused(indices, left, right): + # inds = np.asarray(indices) + # left = np.asarray(left) + # right = np.asarray(right) + # right_inds = np.mod(inds, len(right)) + # left_inds = np.floor_divide(inds - right_inds, len(right)) + left_inds, right_inds = np.divmod(indices, len(right)) + return left[left_inds] + right[right_inds] + + def fuse_ndarray_pair(array1: Union[List, np.ndarray], array2: Union[List, np.ndarray]) -> np.ndarray: """ @@ -80,7 +90,7 @@ def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: return fused_arrays -def unfuse(fused_indices: np.ndarray, len_left: int, +def unfuse(fused_indices: np.ndarray, len_right: int) -> Tuple[np.ndarray, np.ndarray]: """ Given an np.ndarray `fused_indices` of integers denoting @@ -115,14 +125,11 @@ def unfuse(fused_indices: np.ndarray, len_left: int, Args: fused_indices: A 1d np.ndarray of integers. - len_left: The length of the left np.ndarray. len_right: The length of the right np.ndarray. Returns: (np.ndarry, np.ndarray) """ - right = np.mod(fused_indices, len_right) - left = np.floor_divide(fused_indices - right, len_right) - return left, right + return np.divmod(fused_indices, len_right) def _check_flows(flows: List[int]) -> None: @@ -191,9 +198,8 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (charges[0] * - flows[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -506,9 +512,7 @@ def find_sparse_positions( #target_charges left_inds, right_inds = [], [] for target_charge in target_charges: - li, ri = unfuse( - np.nonzero(fused == target_charge)[0], len(unique_left), - len(unique_right)) + li, ri = unfuse(np.nonzero(fused == target_charge)[0], len(unique_right)) left_inds.append(li) right_inds.append(ri) @@ -542,9 +546,8 @@ def find_sparse_positions( target_charge = target_charges[n] right_indices[( left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + left_charge * - ((-1) * left_flow)) * - right_flow)[0] + tmp_relevant_right_charges == (target_charge + left_charge * ( + (-1) * left_flow)) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -648,8 +651,8 @@ def copy(self): new = self.__new__(type(self)) new._data = self._data.copy() new.data_permutation = self.data_permutation - if hasattr(self, 'dense_to_sparse_table'): - new.dense_to_sparse_table = self.dense_to_sparse_table.copy() + if hasattr(self, 'linear_positions'): + new.linear_positions = self.linear_positions new.initial_index_order = [i.copy() for i in self.initial_index_order] new.indices = [i.copy() for i in self.indices] return new @@ -810,10 +813,10 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose(self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None - ) -> "BlockSparseTensor": + def transpose( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -869,8 +872,8 @@ def get_flattened_data(indices): flat_index_list = np.arange(len(flat_elementary_indices)) cum_num_legs = np.append( 0, - np.cumsum([len(elementary_indices[n]) for n in range(len(indices)) - ])) + np.cumsum( + [len(elementary_indices[n]) for n in range(len(indices))])) flat_charges = [i.charges for i in flat_elementary_indices] flat_flows = [i.flow for i in flat_elementary_indices] @@ -895,49 +898,51 @@ def get_flattened_data(indices): for n in absolute_order ]) - if not hasattr(self, 'dense_to_sparse_table'): + if not hasattr(self, 'linear_positions'): #find the best partition into left and right charges left_charges, right_charges, _ = _find_best_partition( flat_charges, flat_flows) #find the index-positions of the elements in the fusion #of `left_charges` and `right_charges` that have `0` #total charge (those are the only non-zero elements). - t1 = time.time() - linear_positions = find_dense_positions( + self.linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=flat_charges[0].zero_charge) - print(len(left_charges), len(right_charges)) - print('first finding dense positions {}s'.format(time.time() - t1)) - data = np.arange(len(self.data)) - zeros = np.zeros(len(self.data), dtype=np.int64) - self.dense_to_sparse_table = sp.sparse.csc_matrix( - (data, (linear_positions, zeros))) + #print(len(left_charges), len(right_charges)) + #print('first finding dense positions {}s'.format(time.time() - t1)) + #data = np.arange(len(self.data)) + #zeros = np.zeros(len(self.data), dtype=np.int64) + #self.dense_to_sparse_table = sp.sparse.csr_matrix( + # (data, (linear_positions, zeros))) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] flat_tr_strides = [flat_strides[n] for n in flat_order] flat_tr_dims = [flat_dims[n] for n in flat_order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_left_charges, tr_right_charges, partition = _find_best_partition( flat_tr_charges, flat_tr_flows) #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_ndarrays([ + # tr_dense_linear_positions = fuse_ndarrays([ + # np.arange(flat_tr_dims[n]) * flat_tr_strides[n] + # for n in range(len(flat_tr_dims)) + # ]) + tr_linear_positions = find_dense_positions( + tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) + stride_arrays = [ np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) - ]) - t1 = time.time() - print(len(tr_left_charges), len(tr_right_charges)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, - tr_left_charges.zero_charge) - print(len(tr_left_charges), len(tr_right_charges)) - print('finding dense positions {}s'.format(time.time() - t1)) - t1 = time.time() - self.data_permutation = np.squeeze(self.dense_to_sparse_table[ - tr_dense_linear_positions[tr_linear_positions], 0].toarray()) - print('lookup took {}s'.format(time.time() - t1)) + ] + dense_permutation = find_values_in_fused( + tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), + fuse_ndarrays(stride_arrays[partition::])) + # data_permutation = np.squeeze( + # self.dense_to_sparse_table[dense_permutation, 0].toarray()) + self.data_permutation = np.searchsorted(self.linear_positions, + dense_permutation) + self.indices = [self.indices[n] for n in order] return self.data_permutation @@ -1223,11 +1228,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose(tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_new_positions: Optional[bool] = False - ) -> "BlockSparseTensor": +def transpose( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_new_positions: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. From e0bfba95fe518237489a0b3e14a2f23912736daa Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 9 Jan 2020 09:54:22 -0500 Subject: [PATCH 142/212] remove unfuse, use divmod --- .../block_tensor/block_tensor_new.py | 81 +++++-------------- 1 file changed, 21 insertions(+), 60 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index 8097d0f84..b58c610a0 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -90,48 +90,6 @@ def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: return fused_arrays -def unfuse(fused_indices: np.ndarray, - len_right: int) -> Tuple[np.ndarray, np.ndarray]: - """ - Given an np.ndarray `fused_indices` of integers denoting - index-positions of elements within a 1d array, `unfuse` - obtains the index-positions of the elements in the left and - right np.ndarrays `left`, `right` which, upon fusion, - are placed at the index-positions given by - `fused_indices` in the fused np.ndarray. - An example will help to illuminate this: - Given np.ndarrays `left`, `right` and the result - of their fusion (`fused`): - - ``` - left = [0,1,0,2] - right = [-1,3,-2] - fused = fuse_charges([left, right], flows=[1,1]) - print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] - ``` - - we want to find which elements in `left` and `right` - fuse to a value of 0. In the above case, there are two - 0 in `fused`: one is obtained from fusing `left[1]` and - `right[0]`, the second one from fusing `left[3]` and `right[2]` - `unfuse` returns the index-positions of these values within - `left` and `right`, that is - - ``` - left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) - print(left_index_values) # [1,3] - print(right_index_values) # [0,2] - ``` - - Args: - fused_indices: A 1d np.ndarray of integers. - len_right: The length of the right np.ndarray. - Returns: - (np.ndarry, np.ndarray) - """ - return np.divmod(fused_indices, len_right) - - def _check_flows(flows: List[int]) -> None: if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): raise ValueError( @@ -198,8 +156,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - charges[0] * flows[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (charges[0] * + flows[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -512,7 +471,7 @@ def find_sparse_positions( #target_charges left_inds, right_inds = [], [] for target_charge in target_charges: - li, ri = unfuse(np.nonzero(fused == target_charge)[0], len(unique_right)) + li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) left_inds.append(li) right_inds.append(ri) @@ -546,8 +505,9 @@ def find_sparse_positions( target_charge = target_charges[n] right_indices[( left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + left_charge * ( - (-1) * left_flow)) * right_flow)[0] + tmp_relevant_right_charges == (target_charge + left_charge * + ((-1) * left_flow)) * + right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -813,10 +773,10 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose( - self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None) -> "BlockSparseTensor": + def transpose(self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -872,8 +832,8 @@ def get_flattened_data(indices): flat_index_list = np.arange(len(flat_elementary_indices)) cum_num_legs = np.append( 0, - np.cumsum( - [len(elementary_indices[n]) for n in range(len(indices))])) + np.cumsum([len(elementary_indices[n]) for n in range(len(indices)) + ])) flat_charges = [i.charges for i in flat_elementary_indices] flat_flows = [i.flow for i in flat_elementary_indices] @@ -929,8 +889,9 @@ def get_flattened_data(indices): # np.arange(flat_tr_dims[n]) * flat_tr_strides[n] # for n in range(len(flat_tr_dims)) # ]) - tr_linear_positions = find_dense_positions( - tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, + tr_left_charges.zero_charge) stride_arrays = [ np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) @@ -1228,11 +1189,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose( - tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_new_positions: Optional[bool] = False) -> "BlockSparseTensor": +def transpose(tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_new_positions: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. From 955b9003a48d1f2357fa0582c52b0e412979cdb5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Thu, 9 Jan 2020 16:49:50 -0500 Subject: [PATCH 143/212] broken commit, working on tensordot --- .../block_tensor/block_tensor_new.py | 436 ++++++++---------- 1 file changed, 190 insertions(+), 246 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index b58c610a0..bdda52431 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -338,7 +338,7 @@ def find_diagonal_sparse_blocks(data: np.ndarray, # and `stop_positions[masks[0]] - column_degeneracies[0]` stop_positions = np.cumsum(degeneracy_vector) start_positions = stop_positions - degeneracy_vector - blocks = {} + blocks = [] for c in common_charges: #numpy broadcasting is substantially faster than kron! @@ -346,11 +346,12 @@ def find_diagonal_sparse_blocks(data: np.ndarray, b = np.expand_dims(np.arange(column_degeneracies[c]), 0) inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[c]) if not return_data: - blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[c])] + blocks.append([inds, (len(row_locations[c]), column_degeneracies[c])]) else: - blocks[c] = np.reshape(data[inds], - (len(row_locations[c]), column_degeneracies[c])) - return blocks + blocks.append( + np.reshape(data[inds], + (len(row_locations[c]), column_degeneracies[c]))) + return common_charges, common_charges, blocks def find_dense_positions( @@ -608,14 +609,7 @@ class BlockSparseTensor: """ def copy(self): - new = self.__new__(type(self)) - new._data = self._data.copy() - new.data_permutation = self.data_permutation - if hasattr(self, 'linear_positions'): - new.linear_positions = self.linear_positions - new.initial_index_order = [i.copy() for i in self.initial_index_order] - new.indices = [i.copy() for i in self.indices] - return new + return BlockSparseTensor(self.data.copy(), [i.copy() for i in self.indices]) def __init__(self, data: np.ndarray, indices: List[Index]) -> None: """ @@ -625,38 +619,31 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: and `flows` indices: List of `Index` objecst, one for each leg. """ - for n in range(len(indices)): - if indices[n].name is None: - indices[n].name = 'index_{}'.format(n) + for n, i in enumerate(indices): + if i is None: + i.name = 'index_{}'.format(n) - inital_index_names = [ - indices[n].name if indices[n].name else 'index_{}'.format(n) - for n in range(len(indices)) + index_names = [ + i.name if i.name else 'index_{}'.format(n) + for n, i in enumerate(indices) ] - unique, cnts = np.unique(inital_index_names, return_counts=True) + unique, cnts = np.unique(index_names, return_counts=True) if np.any(cnts > 1): raise ValueError("Index names {} appeared multiple times. " "Please rename indices uniquely.".format( unique[cnts > 1])) - self.initial_index_order = [i.copy() for i in indices] self.indices = indices _check_flows(self.flows) num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) if num_non_zero_elements != len(data.flat): - raise ValueError("number of tensor elements defined " + raise ValueError("number of tensor elements {} defined " "by `charges` is different from" - " len(data)={}".format(len(data.flat))) + " len(data)={}".format(num_non_zero_elements, + len(data.flat))) - self._data = np.asarray(data.flat) #do not copy data - self.data_permutation = None - - @property - def data(self): - if self.data_permutation is not None: - return self._data[self.data_permutation] - return self._data + self.data = np.asarray(data.flat) #do not copy data @classmethod def randn(cls, indices: List[Index], @@ -775,7 +762,8 @@ def charges(self): def transpose(self, order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles @@ -788,12 +776,7 @@ def transpose(self, Returns: BlockSparseTensor: The transposed tensor. """ - #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the - #lookup-table from dense to sparse indices. According to some quick - #testing, the final lookup is currently the bottleneck. - - if (permutation is not None) and (permutation is not 'trivial') and ( - len(permutation) != len(self.data)): + if (permutation is not None) and (len(permutation) != len(self.data)): raise ValueError("len(permutation) != len(tensor.data).") if len(order) != self.rank: @@ -803,92 +786,50 @@ def transpose(self, #check for trivial permutation if np.all(order == np.arange(len(order))): - #nothing to do - if self.data_permutation is None: - return 'trivial' - return self.data_permutation - - new_index_order = [self.indices[n] for n in order] - if np.all([ - new_index_order[n].name == self.initial_index_order[n].name - for n in range(len(self.indices)) - ]): - #`order' permutes into the original ordering - self.data_permutation = None - self.indices = [self.indices[n] for n in order] - return 'trivial' + if return_permutation: + return np.arange(len(self.data)) + return #we use elementary indices here because it is #more efficient to get the fused charges using #the best partition if permutation is None: - - def get_flattened_data(indices): - elementary_indices = {} - flat_elementary_indices = [] - for n in range(len(indices)): - elementary_indices[n] = indices[n].get_elementary_indices() - flat_elementary_indices.extend(elementary_indices[n]) - flat_index_list = np.arange(len(flat_elementary_indices)) - cum_num_legs = np.append( - 0, - np.cumsum([len(elementary_indices[n]) for n in range(len(indices)) - ])) - - flat_charges = [i.charges for i in flat_elementary_indices] - flat_flows = [i.flow for i in flat_elementary_indices] - flat_dims = [len(c) for c in flat_charges] - flat_strides = np.flip( - np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - return flat_charges, flat_flows, flat_dims, flat_strides, flat_index_list, cum_num_legs - - #find the absolute order, i.e. the requested order with respect to the initial - #ordering of the indices - absolute_order = [] - for o in order: - for n in range(len(self.initial_index_order)): - if self.initial_index_order[n].name == self.indices[o].name: - absolute_order.append(n) - - flat_charges, flat_flows, flat_dims, flat_strides, flat_index_list, cum_num_legs = get_flattened_data( - self.initial_index_order) - - flat_order = np.concatenate([ - flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] - for n in absolute_order - ]) - - if not hasattr(self, 'linear_positions'): - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - flat_charges, flat_flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - self.linear_positions = find_dense_positions( - left_charges, - 1, - right_charges, - 1, - target_charge=flat_charges[0].zero_charge) - #print(len(left_charges), len(right_charges)) - #print('first finding dense positions {}s'.format(time.time() - t1)) - #data = np.arange(len(self.data)) - #zeros = np.zeros(len(self.data), dtype=np.int64) - #self.dense_to_sparse_table = sp.sparse.csr_matrix( - # (data, (linear_positions, zeros))) - + elementary_indices = {} + flat_elementary_indices = [] + for n in range(len(self.indices)): + elementary_indices[n] = self.indices[n].get_elementary_indices() + flat_elementary_indices.extend(elementary_indices[n]) + flat_index_list = np.arange(len(flat_elementary_indices)) + cum_num_legs = np.append( + 0, + np.cumsum( + [len(elementary_indices[n]) for n in range(len(self.indices))])) + + flat_charges = [i.charges for i in flat_elementary_indices] + flat_flows = [i.flow for i in flat_elementary_indices] + flat_dims = [len(c) for c in flat_charges] + flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + flat_order = np.concatenate( + [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition( + flat_charges, flat_flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, + 1, + right_charges, + 1, + target_charge=flat_charges[0].zero_charge) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] flat_tr_strides = [flat_strides[n] for n in flat_order] flat_tr_dims = [flat_dims[n] for n in flat_order] + tr_left_charges, tr_right_charges, partition = _find_best_partition( flat_tr_charges, flat_tr_flows) - #FIXME: this should be done without fully fusing the strides - # tr_dense_linear_positions = fuse_ndarrays([ - # np.arange(flat_tr_dims[n]) * flat_tr_strides[n] - # for n in range(len(flat_tr_dims)) - # ]) tr_linear_positions = find_dense_positions(tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) @@ -896,119 +837,17 @@ def get_flattened_data(indices): np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) ] + dense_permutation = find_values_in_fused( tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), fuse_ndarrays(stride_arrays[partition::])) - # data_permutation = np.squeeze( - # self.dense_to_sparse_table[dense_permutation, 0].toarray()) - self.data_permutation = np.searchsorted(self.linear_positions, - dense_permutation) + assert np.all(np.sort(dense_permutation) == linear_positions) + permutation = np.searchsorted(linear_positions, dense_permutation) - self.indices = [self.indices[n] for n in order] - return self.data_permutation - - elif permutation is 'trivial': - self.data_permutation = None - self.indices = [self.indices[n] for n in order] + self.indices = [self.indices[n] for n in order] + self.data = self.data[permutation] + if return_permutation: return permutation - else: - self.data_permutation = permutation - - # def transpose(self, - # order: Union[List[int], np.ndarray], - # permutation: Optional[np.ndarray] = None - # ) -> "BlockSparseTensor": - # """ - # Transpose the tensor into the new order `order`. This routine currently shuffles - # data. - # Args: - # order: The new order of indices. - # permutation: An np.ndarray of int for reshuffling the data, - # typically the output of a prior call to `transpose`. Passing `permutation` - # can greatly speed up the transposition. - # Returns: - # BlockSparseTensor: The transposed tensor. - # """ - # #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the - # #lookup-table from dense to sparse indices. According to some quick - # #testing, the final lookup is currently the bottleneck. - # #FIXME: transpose currently shuffles data. This can in principle be postponed - # #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of - # #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse - # #positions - # if (permutation is - # not None) and (len(permutation) != len(tensor.data)): - # raise ValueError("len(permutation) != len(tensor.data).") - - # if len(order) != self.rank: - # raise ValueError( - # "`len(order)={}` is different form `self.rank={}`".format( - # len(order), self.rank)) - # #transpose is the only function using self.dense_to_sparse_table - # #so we can initialize it here. This will change if we are implementing - # #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` - # #also needs this. - - # #we use elementary indices here because it is - # #more efficient to get the fused charges using - # #the best partition - # if permutation is None: - # elementary_indices = {} - # flat_elementary_indices = [] - - # for n in range(self.rank): - # elementary_indices[n] = self.indices[n].get_elementary_indices() - # flat_elementary_indices.extend(elementary_indices[n]) - # flat_index_list = np.arange(len(flat_elementary_indices)) - # cum_num_legs = np.append( - # 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) - # flat_order = np.concatenate( - # [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - - # flat_charges = [i.charges for i in flat_elementary_indices] - # flat_flows = [i.flow for i in flat_elementary_indices] - # flat_dims = [len(c) for c in flat_charges] - # flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - # if not hasattr(self, 'dense_to_sparse_table'): - # #find the best partition into left and right charges - # left_charges, right_charges, _ = _find_best_partition( - # flat_charges, flat_flows) - # #find the index-positions of the elements in the fusion - # #of `left_charges` and `right_charges` that have `0` - # #total charge (those are the only non-zero elements). - # linear_positions = find_dense_positions( - # left_charges, - # 1, - # right_charges, - # 1, - # target_charge=flat_charges[0].zero_charge) - - # self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - # len(self.data)), (linear_positions, - # np.zeros(len(self.data), dtype=np.int64)))) - - # flat_tr_charges = [flat_charges[n] for n in flat_order] - # flat_tr_flows = [flat_flows[n] for n in flat_order] - # flat_tr_strides = [flat_strides[n] for n in flat_order] - # flat_tr_dims = [flat_dims[n] for n in flat_order] - - # tr_left_charges, tr_right_charges, _ = _find_best_partition( - # flat_tr_charges, flat_tr_flows) - # #FIXME: this should be done without fully fusing the strides - # tr_dense_linear_positions = fuse_ndarrays([ - # np.arange(flat_tr_dims[n]) * flat_tr_strides[n] - # for n in range(len(flat_tr_dims)) - # ]) - # tr_linear_positions = find_dense_positions( - # tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) - - # inds = np.squeeze(self.dense_to_sparse_table[ - # tr_dense_linear_positions[tr_linear_positions], 0].toarray()) - # else: - # inds = permutation - # self.indices = [self.indices[n] for n in order] - # self.data = self.data[inds] - # return inds def reset_shape(self) -> None: """ @@ -1113,21 +952,25 @@ def raise_error(): def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ - Obtain the diagonal blocks of symmetric matrix. + Obtain the diagonal blocks of a symmetric matrix. BlockSparseTensor has to be a matrix. - For matrices with shape[0] << shape[1], this routine avoids explicit fusion - of column charges. + This routine avoids explicit fusion of row or column charges. Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with + return_data: If `True`, the returned dictionary maps quantum numbers `q` to + an actual `np.ndarray` containing the data of block `q`. + If `False`, the returned dict maps quantum numbers `q` to a list + `[locations, shape]`, where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within `self.data`, i.e. + `self.data[locations]` contains the elements belonging to the tensor with quantum numbers `(q,q). `shape` is the shape of the corresponding array. Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + dict: If `return_data=True`: Dictionary mapping charge `q` to an + np.ndarray of rank 2 (a matrix). + If `return_data=False`: Dictionary mapping charge `q` to a + list `[locations, shape]`, where `locations` is an np.ndarray of type + np.int64 containing the locations of the tensor elements within `self.data` + """ if self.rank != 2: @@ -1192,7 +1035,7 @@ def reshape(tensor: BlockSparseTensor, def transpose(tensor: BlockSparseTensor, order: Union[List[int], np.ndarray], permutation: Optional[np.ndarray] = None, - return_new_positions: Optional[bool] = False + return_permutation: Optional[bool] = False ) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles @@ -1208,16 +1051,19 @@ def transpose(tensor: BlockSparseTensor, """ if (permutation is not None) and (len(permutation) != len(tensor.data)): raise ValueError("len(permutation) != len(tensor.data).") - result = BlockSparseTensor( - data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) - inds = result.transpose(order, permutation) - if return_new_positions: + result = tensor.copy() + inds = result.transpose(order, permutation, return_permutation) + if return_permutation: return result, inds return result -def tensordot(tensor1: BlockSparseTensor, tensor2: BlockSparseTensor, - axes: Sequence[Sequence[int]]): +def tensordot(tensor1: BlockSparseTensor, + tensor2: BlockSparseTensor, + axes: Sequence[Sequence[int]], + permutation1: Optional[np.ndarray] = None, + permutation2: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False): axes1 = axes[0] axes2 = axes[1] if not np.all(np.unique(axes1) == np.sort(axes1)): @@ -1231,15 +1077,113 @@ def tensordot(tensor1: BlockSparseTensor, tensor2: BlockSparseTensor, raise ValueError( "rank of `tensor1` is smaller than `max(axes1) = {}.`".format( max(axes1))) + elementary_1, elementary_2 = [], [] + for a in axes1: + elementary_1.extend(tensor1.indices[a].get_elementary_indices()) + for a in axes2: + elementary_2.extend(tensor2.indices[a].get_elementary_indices()) + + if len(elementary_2) != len(elementary_1): + raise ValueError("axes1 and axes2 have incompatible elementary" + " shapes {} and {}".format(elementary_1, elementary_2)) + if not np.all( + np.array([i.flow for i in elementary_1]) == (-1) * + np.array([i.flow for i in elementary_2])): + raise ValueError("axes1 and axes2 have incompatible elementary" + " flows {} and {}".format( + np.array([i.flow for i in elementary_1]), + np.array([i.flow for i in elementary_2]))) + if max(axes2) >= len(tensor2.shape): raise ValueError( "rank of `tensor2` is smaller than `max(axes2) = {}`".format( max(axes1))) - free_axes_1 = sorted(set(np.arange(len(tensor1.shape))) - set(axes1)) - free_axes_2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) - new_order_1 = free_axes_1 + list(axes1) - new_order_2 = list(axes2) + free_axes_2 - #FIXME: currently this shuffles data, this is unnecessary! - tmp1 = transpose(tensor1, new_order_1) - tmp2 = transpose(tensor2, new_order_2) - pass + free_axes1 = sorted(set(np.arange(len(tensor1.shape))) - set(axes1)) + free_axes2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) + new_order1 = free_axes1 + list(axes1) + new_order2 = list(axes2) + free_axes2 + + tr1 = transpose( + tensor=tensor1, + order=new_order1, + permutation=permutation1, + return_permutation=return_permutation) + trshape1 = tr1.dense_shape + Dl1 = np.prod([trshape1[n] for n in range(len(free_axes1))]) + Dr1 = np.prod([trshape1[n] for n in range(len(free_axes1), len(trshape1))]) + + tmp1 = reshape(tr1, (Dl1, Dr1)) + + tr2 = transpose( + tensor=tensor2, + order=new_order2, + permutation=permutation2, + return_permutation=return_permutation) + trshape2 = tr2.dense_shape + Dl2 = np.prod([trshape2[n] for n in range(len(axes2))]) + Dr2 = np.prod([trshape2[n] for n in range(len(axes2), len(trshape2))]) + + tmp2 = reshape(tr2, (Dl2, Dr2)) + + row_charges1, column_charges1, data1 = tmp1.get_diagonal_blocks( + return_data=True) + row_charges2, column_charges2, data2 = tmp2.get_diagonal_blocks( + return_data=True) + + fused1 = fuse_charges([i.charges for i in tensor1.indices], + [i.flow for i in tensor1.indices]) + #print(len(np.nonzero(fused1.charges == 0)[0])) + fused2 = fuse_charges([i.charges for i in tensor2.indices], + [i.flow for i in tensor2.indices]) + + # print( + # np.sum([np.prod(d.shape) for d in data1]), len(tensor1.data), + # len(np.nonzero(fused1.charges == 0)[0])) + # print( + # np.sum([np.prod(d.shape) for d in data2]), len(tensor2.data), + # len(np.nonzero(fused2.charges == 0)[0])) + tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( + return_counts=True) + common_charges = tmp_charges[cnts == 2] + container = [] + # print(row_charges1, column_charges1) + # print(row_charges2, column_charges2) + # print(len(common_charges)) + # print(len(row_charges1)) + # print(len(row_charges2)) + # for n in range(len(row_charges1)): + # print(row_charges1.get_item(n), column_charges1.get_item(n), data1[n].shape) + # print() + # for n in range(len(row_charges1)): + # print(row_charges1.get_item(n), column_charges1.get_item(n), data2[n].shape) + + for c in common_charges: + i1 = np.nonzero(column_charges1 == c)[0][0] + i2 = np.nonzero(row_charges2 == c)[0][0] + try: + container.append(np.matmul(data1[i1], data2[i2]).flat) + except ValueError: + raise ValueError("for quantum number {}, shapes {} and {} " + "of left and right blocks have " + "incompatible shapes".format(c, data1[i1].shape, + data2[i2].shape)) + #print('asdfasdf', len(container)) + data = np.concatenate(container) + indices = [] + indices.extend(tmp1.indices[0].get_elementary_indices()) + indices.extend(tmp2.indices[1].get_elementary_indices()) + fused = fuse_charges([i.charges for i in indices], [i.flow for i in indices]) + #print(len(np.nonzero(fused.charges == 0)[0])) + # print( + # compute_num_nonzero([i.charges for i in indices], + # [i.flow for i in indices])) + # print([i.flow for i in indices]) + # print([i.name for i in indices]) + for n, i in enumerate(indices): + i.name = 'index_{}'.format(n) + + out = BlockSparseTensor(data=data, indices=indices) + resulting_shape = [trshape1[n] for n in range(len(free_axes1)) + ] + [trshape2[n] for n in range(len(axes2), len(trshape2))] + out.reshape(resulting_shape) + return out From 05e731384c19900c256fb2373821738da08c257d Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 10:22:48 -0500 Subject: [PATCH 144/212] tensordot implemented, not tested --- .../block_tensor/block_tensor_new.py | 248 ++++++++++-------- 1 file changed, 133 insertions(+), 115 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py index bdda52431..f0fc333b5 100644 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ b/tensornetwork/block_tensor/block_tensor_new.py @@ -28,12 +28,12 @@ Tensor = Any -def find_values_in_fused(indices, left, right): - # inds = np.asarray(indices) - # left = np.asarray(left) - # right = np.asarray(right) - # right_inds = np.mod(inds, len(right)) - # left_inds = np.floor_divide(inds - right_inds, len(right)) +def _find_values_in_fused(indices: np.ndarray, left: np.ndarray, + right: np.ndarray) -> np.ndarray: + """ + Returns fuse(left,right)[indices], i.e. the elements + in the fusion of `left` and `right` at positions `indices'. + """ left_inds, right_inds = np.divmod(indices, len(right)) return left[left_inds] + right[right_inds] @@ -73,31 +73,24 @@ def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: return fused_arrays -def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: - """ - Fuse all `arrays` by simple kronecker addition. - Arrays are fused from "right to left", - Args: - arrays: A list of arrays to be fused. - Returns: - np.ndarray: The result of fusing `charges`. - """ - if len(arrays) == 1: - return arrays[0] - fused_arrays = arrays[0] - for n in range(1, len(arrays)): - fused_arrays = fuse_ndarray_pair(array1=fused_arrays, array2=arrays[n]) - return fused_arrays - - def _check_flows(flows: List[int]) -> None: if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): raise ValueError( "flows = {} contains values different from 1 and -1".format(flows)) -def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[int]): +def _find_best_partition( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[int]) -> Tuple[Union[BaseCharge, ChargeCollection], + Union[BaseCharge, ChargeCollection], int]: + """ + compute the best partition for fusing `charges`, i.e. the integer `p` + such that fusing `len(fuse_charges(charges[0:p],flows[0:p]))` is + and `len(fuse_charges(charges[p::],flows[p::]))` are as close as possible. + Returns: + fused_left_charges, fused_right_charges, p + + """ #FIXME: fusing charges with dims (N,M) with M>~N is faster than fusing charges # with dims (M,N). Thus, it is not always best to fuse at the minimum cut. #for example, for dims (1000, 4, 1002), its better to fuse at the cut @@ -119,11 +112,6 @@ def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], min_ind = min_inds[np.argmax(right_dims)] else: min_ind = min_inds[0] - # min_ind = np.argmin([ - # np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) - # for n in range(1, len(charges)) - # ]) - fused_left_charges = fuse_charges(charges[0:min_ind + 1], flows[0:min_ind + 1]) fused_right_charges = fuse_charges(charges[min_ind + 1::], @@ -134,7 +122,8 @@ def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], def compute_fused_charge_degeneracies( charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Dict: + flows: List[Union[bool, int]] +) -> Tuple[Union[BaseCharge, ChargeCollection], np.ndarray]: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`, together with their respective degeneracies @@ -156,9 +145,8 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (charges[0] * - flows[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -213,12 +201,14 @@ def compute_num_nonzero(charges: List[np.ndarray], return accumulated_degeneracies[res][0] -def find_diagonal_sparse_blocks(data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = False) -> Dict: +def find_diagonal_sparse_blocks( + data: np.ndarray, + row_charges: List[Union[BaseCharge, ChargeCollection]], + column_charges: List[Union[BaseCharge, ChargeCollection]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = False +) -> Tuple[Union[BaseCharge, ChargeCollection], Dict, np.ndarray, Dict, Dict]: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. @@ -342,22 +332,23 @@ def find_diagonal_sparse_blocks(data: np.ndarray, for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) - b = np.expand_dims(np.arange(column_degeneracies[c]), 0) - inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[c]) + rlocs = row_locations[c] + rlocs.sort() #sort in place (we need it again later) + cdegs = column_degeneracies[c] + a = np.expand_dims(start_positions[rlocs], 1) + b = np.expand_dims(np.arange(cdegs), 0) + inds = np.reshape(a + b, len(rlocs) * cdegs) if not return_data: - blocks.append([inds, (len(row_locations[c]), column_degeneracies[c])]) + blocks.append([inds, (len(rlocs), cdegs)]) else: - blocks.append( - np.reshape(data[inds], - (len(row_locations[c]), column_degeneracies[c]))) - return common_charges, common_charges, blocks + blocks.append(np.reshape(data[inds], (len(rlocs), cdegs))) + return common_charges, blocks, start_positions, row_locations, column_degeneracies def find_dense_positions( left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, - target_charge: Union[BaseCharge, ChargeCollection]) -> Dict: + target_charge: Union[BaseCharge, ChargeCollection]) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector `fused_charges` (resulting from fusing np.ndarrays @@ -389,7 +380,7 @@ def find_dense_positions( right_flow: The flow direction of the right charges. target_charge: The target charge. Returns: - dict: Mapping tuples of integers to np.ndarray of integers. + np.ndarray: The indices of the elements fusing to `target_charge`. """ _check_flows([left_flow, right_flow]) unique_left, left_degeneracies = left_charges.unique(return_counts=True) @@ -506,9 +497,8 @@ def find_sparse_positions( target_charge = target_charges[n] right_indices[( left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + left_charge * - ((-1) * left_flow)) * - right_flow)[0] + tmp_relevant_right_charges == (target_charge + left_charge * ( + (-1) * left_flow)) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -534,7 +524,7 @@ def find_sparse_positions( def compute_dense_to_sparse_mapping( charges: List[Union[BaseCharge, ChargeCollection]], flows: List[Union[bool, int]], - target_charge: Union[BaseCharge, ChargeCollection]) -> int: + target_charge: Union[BaseCharge, ChargeCollection]) -> List[np.ndarray]: """ Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. @@ -726,6 +716,10 @@ def init_random(): return cls(data=init_random(), indices=indices) + @property + def index_names(self): + return [i.name for i in self.indices] + @property def rank(self): return len(self.indices) @@ -760,11 +754,11 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose(self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": + def transpose( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -773,6 +767,7 @@ def transpose(self, permutation: An np.ndarray of int for reshuffling the data, typically the output of a prior call to `transpose`. Passing `permutation` can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. Returns: BlockSparseTensor: The transposed tensor. """ @@ -830,15 +825,14 @@ def transpose(self, tr_left_charges, tr_right_charges, partition = _find_best_partition( flat_tr_charges, flat_tr_flows) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, - tr_left_charges.zero_charge) + tr_linear_positions = find_dense_positions( + tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) stride_arrays = [ np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) ] - dense_permutation = find_values_in_fused( + dense_permutation = _find_values_in_fused( tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), fuse_ndarrays(stride_arrays[partition::])) assert np.all(np.sort(dense_permutation) == linear_positions) @@ -950,7 +944,7 @@ def raise_error(): i2, i1 = self.indices.pop(), self.indices.pop() self.indices.append(fuse_index_pair(i1, i2)) - def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: + def _get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ Obtain the diagonal blocks of a symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1032,11 +1026,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose(tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": +def transpose( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. @@ -1046,8 +1040,14 @@ def transpose(tensor: BlockSparseTensor, permutation: An np.ndarray of int for reshuffling the data, typically the output of a prior call to `transpose`. Passing `permutation` can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. Returns: - BlockSparseTensor: The transposed tensor. + if `return_permutation == False`: + BlockSparseTensor: The transposed tensor. + if `return_permutation == True`: + BlockSparseTensor, permutation: The transposed tensor + and the permutation data + """ if (permutation is not None) and (len(permutation) != len(tensor.data)): raise ValueError("len(permutation) != len(tensor.data).") @@ -1064,6 +1064,24 @@ def tensordot(tensor1: BlockSparseTensor, permutation1: Optional[np.ndarray] = None, permutation2: Optional[np.ndarray] = None, return_permutation: Optional[bool] = False): + """ + Contract two `BlockSparseTensor`s along `axes`. + Args: + tensor1: First tensor. + tensor2: Second tensor. + axes: The axes to contract. + permutation1: Permutation data for `tensor1`. + permutation2: Permutation data for `tensor2`. + return_permutation: If `True`, return the the permutation data. + Returns: + if `return_permutation == False`: + BlockSparseTensor: The result of contracting `tensor1` and `tensor2`. + if `return_permutation == True`: + BlockSparseTensor, np.ndarrays, np.ndarray: The result of + contracting `tensor1` and `tensor2`, together with their respective + permutation data. + + """ axes1 = axes[0] axes2 = axes[1] if not np.all(np.unique(axes1) == np.sort(axes1)): @@ -1087,8 +1105,8 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == (-1) * - np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == + (-1) * np.array([i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), @@ -1108,6 +1126,10 @@ def tensordot(tensor1: BlockSparseTensor, order=new_order1, permutation=permutation1, return_permutation=return_permutation) + if return_permutation: + permutation1 = tr1[1] + tr1 = tr1[1] + trshape1 = tr1.dense_shape Dl1 = np.prod([trshape1[n] for n in range(len(free_axes1))]) Dr1 = np.prod([trshape1[n] for n in range(len(free_axes1), len(trshape1))]) @@ -1119,71 +1141,67 @@ def tensordot(tensor1: BlockSparseTensor, order=new_order2, permutation=permutation2, return_permutation=return_permutation) + if return_permutation: + permutation2 = tr2[1] + tr2 = tr2[1] trshape2 = tr2.dense_shape Dl2 = np.prod([trshape2[n] for n in range(len(axes2))]) Dr2 = np.prod([trshape2[n] for n in range(len(axes2), len(trshape2))]) tmp2 = reshape(tr2, (Dl2, Dr2)) - row_charges1, column_charges1, data1 = tmp1.get_diagonal_blocks( - return_data=True) - row_charges2, column_charges2, data2 = tmp2.get_diagonal_blocks( - return_data=True) - - fused1 = fuse_charges([i.charges for i in tensor1.indices], - [i.flow for i in tensor1.indices]) - #print(len(np.nonzero(fused1.charges == 0)[0])) - fused2 = fuse_charges([i.charges for i in tensor2.indices], - [i.flow for i in tensor2.indices]) - - # print( - # np.sum([np.prod(d.shape) for d in data1]), len(tensor1.data), - # len(np.nonzero(fused1.charges == 0)[0])) - # print( - # np.sum([np.prod(d.shape) for d in data2]), len(tensor2.data), - # len(np.nonzero(fused2.charges == 0)[0])) + #avoid data-copying here by setting `return_data=False` + column_charges1, data1, start_positions, row_locations, _ = tmp1._get_diagonal_blocks( + return_data=False) + row_charges2, data2, _, _, column_degeneracies = tmp2._get_diagonal_blocks( + return_data=False) + + #get common charges between rows and columns tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( return_counts=True) common_charges = tmp_charges[cnts == 2] - container = [] - # print(row_charges1, column_charges1) - # print(row_charges2, column_charges2) - # print(len(common_charges)) - # print(len(row_charges1)) - # print(len(row_charges2)) - # for n in range(len(row_charges1)): - # print(row_charges1.get_item(n), column_charges1.get_item(n), data1[n].shape) - # print() - # for n in range(len(row_charges1)): - # print(row_charges1.get_item(n), column_charges1.get_item(n), data2[n].shape) + + #get the flattened indices for the output tensor + indices = [] + indices.extend(tmp1.indices[0].get_elementary_indices()) + indices.extend(tmp2.indices[1].get_elementary_indices()) + index_names = [i.name for i in indices] + unique = np.unique(index_names) + #rename indices if they are not unique + if len(unique) < len(index_names): + for n, i in enumerate(indices): + i.name = 'index_{}'.format(n) + + #initialize the data-vector of the output with zeros + num_nonzero_elements = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + data = np.zeros( + num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) for c in common_charges: + rlocs = row_locations[c] + cdegs = column_degeneracies[c] + a = np.expand_dims(start_positions[rlocs], 1) + b = np.expand_dims(np.arange(cdegs), 0) + new_locations = np.reshape(a + b, len(rlocs) * cdegs) i1 = np.nonzero(column_charges1 == c)[0][0] i2 = np.nonzero(row_charges2 == c)[0][0] try: - container.append(np.matmul(data1[i1], data2[i2]).flat) + #place the result of the block-matrix multiplication + #into the new data-vector + data[new_locations] = np.matmul( + np.reshape(tensor1.data[data1[i1][0]], data1[i1][1]), + np.reshape(tensor2.data[data2[i2][0]], data2[i2][1])).flat except ValueError: raise ValueError("for quantum number {}, shapes {} and {} " "of left and right blocks have " "incompatible shapes".format(c, data1[i1].shape, data2[i2].shape)) - #print('asdfasdf', len(container)) - data = np.concatenate(container) - indices = [] - indices.extend(tmp1.indices[0].get_elementary_indices()) - indices.extend(tmp2.indices[1].get_elementary_indices()) - fused = fuse_charges([i.charges for i in indices], [i.flow for i in indices]) - #print(len(np.nonzero(fused.charges == 0)[0])) - # print( - # compute_num_nonzero([i.charges for i in indices], - # [i.flow for i in indices])) - # print([i.flow for i in indices]) - # print([i.name for i in indices]) - for n, i in enumerate(indices): - i.name = 'index_{}'.format(n) out = BlockSparseTensor(data=data, indices=indices) resulting_shape = [trshape1[n] for n in range(len(free_axes1)) ] + [trshape2[n] for n in range(len(axes2), len(trshape2))] out.reshape(resulting_shape) + if return_permutation: + return out, permutation1, permutation2 return out From 8e9f53eff488d38d701e6219c2971425e2858d12 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 10:24:08 -0500 Subject: [PATCH 145/212] removed commented codex --- tensornetwork/block_tensor/index_new.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tensornetwork/block_tensor/index_new.py b/tensornetwork/block_tensor/index_new.py index cd64e7493..b5e8ec339 100644 --- a/tensornetwork/block_tensor/index_new.py +++ b/tensornetwork/block_tensor/index_new.py @@ -123,14 +123,6 @@ def charges(self): return self._charges return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow - # @property - # def name(self): - # if self._name: - # return self._name - # if self.is_leave: - # return self.name - # return self.left_child.name + ' & ' + self.right_child.name - def fuse_index_pair(left_index: Index, right_index: Index, From 4102c767e05be5296641177565ef381e855ca058 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 10:34:38 -0500 Subject: [PATCH 146/212] fix tests --- tensornetwork/block_tensor/index_test.py | 104 +++++++++-------------- 1 file changed, 42 insertions(+), 62 deletions(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 293b37bd8..3d2e1c391 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,67 +1,55 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_indices +from tensornetwork.block_tensor.charge import U1Charge, Z2Charge, ChargeCollection def test_index_fusion_mul(): D = 10 B = 4 dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 - i12 = i1 * i2 - assert i12.left_child is i1 - assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - - -def test_fuse_index_pair(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - i12 = fuse_index_pair(i1, i2) + i12 = i1 * i2 assert i12.left_child is i1 assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + for n in range(len(i12.charges.charges)): + assert np.all(i12.charges == (q1 + q2).charges) def test_fuse_indices(): D = 10 B = 4 dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 i12 = fuse_indices([i1, i2]) assert i12.left_child is i1 assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + for n in range(len(i12.charges.charges)): + assert np.all(i12.charges == (q1 + q2).charges) def test_split_index(): D = 10 B = 4 dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 @@ -69,10 +57,10 @@ def test_split_index(): i1_, i2_ = split_index(i12) assert i1 is i1_ assert i2 is i2_ - np.testing.assert_allclose(q1, i1.charges) - np.testing.assert_allclose(q2, i2.charges) - np.testing.assert_allclose(q1, i1_.charges) - np.testing.assert_allclose(q2, i2_.charges) + np.testing.assert_allclose(q1.charges, i1.charges) + np.testing.assert_allclose(q2.charges, i2.charges) + np.testing.assert_allclose(q1.charges, i1_.charges) + np.testing.assert_allclose(q2.charges, i2_.charges) assert i1_.name == 'index1' assert i2_.name == 'index2' assert i1_.flow == i1.flow @@ -83,10 +71,10 @@ def test_elementary_indices(): D = 10 B = 4 dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) + q3 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) + q4 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) i1 = Index(charges=q1, flow=1, name='index1') i2 = Index(charges=q2, flow=1, name='index2') i3 = Index(charges=q3, flow=1, name='index3') @@ -113,18 +101,21 @@ def test_elementary_indices(): assert elmt1234[2].flow == i3.flow assert elmt1234[3].flow == i4.flow - np.testing.assert_allclose(q1, i1.charges) - np.testing.assert_allclose(q2, i2.charges) - np.testing.assert_allclose(q3, i3.charges) - np.testing.assert_allclose(q4, i4.charges) + np.testing.assert_allclose(q1.charges, i1.charges) + np.testing.assert_allclose(q2.charges, i2.charges) + np.testing.assert_allclose(q3.charges, i3.charges) + np.testing.assert_allclose(q4.charges, i4.charges) def test_leave(): D = 10 B = 4 dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + i1 = Index(charges=q1, flow=1, name='index1') i2 = Index(charges=q2, flow=1, name='index2') assert i1.is_leave @@ -138,8 +129,11 @@ def test_copy(): D = 10 B = 4 dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)]) #quantum numbers on leg 1 + i1 = Index(charges=q1, flow=1, name='index1') i2 = Index(charges=q2, flow=1, name='index2') i3 = Index(charges=q1, flow=-1, name='index3') @@ -155,17 +149,3 @@ def test_copy(): assert elmt1234[1] is not i2 assert elmt1234[2] is not i3 assert elmt1234[3] is not i4 - - -def test_unfuse(): - q1 = np.random.randint(-4, 5, 10).astype(np.int16) - q2 = np.random.randint(-4, 5, 4).astype(np.int16) - q3 = np.random.randint(-4, 5, 4).astype(np.int16) - q12 = fuse_charges([q1, q2], [1, 1]) - q123 = fuse_charges([q12, q3], [1, 1]) - nz = np.nonzero(q123 == 0)[0] - q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) - - q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) - np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], - np.zeros(len(q1_inds), dtype=np.int16)) From e23e628f7b2071debcf63d00cdd024a8259d1842 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 11:07:49 -0500 Subject: [PATCH 147/212] fix tests --- tensornetwork/block_tensor/charge_test.py | 70 +++++++++-------------- 1 file changed, 26 insertions(+), 44 deletions(-) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index fe75b2962..2094a68b4 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -1,26 +1,8 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.charge import ChargeCollection, BaseCharge, U1Charge, Z2Charge -from tensornetwork.block_tensor.index import fuse_charges, fuse_degeneracies, fuse_charge_pair - - -def test_fuse_charge_pair(): - q1 = np.asarray([0, 1]) - q2 = np.asarray([2, 3, 4]) - fused_charges = fuse_charge_pair(q1, 1, q2, 1) - assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) - fused_charges = fuse_charge_pair(q1, 1, q2, -1) - assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) - - -def test_fuse_charges(): - q1 = np.asarray([0, 1]) - q2 = np.asarray([2, 3, 4]) - fused_charges = fuse_charges([q1, q2], flows=[1, 1]) - assert np.all(fused_charges == np.asarray([2, 3, 4, 3, 4, 5])) - fused_charges = fuse_charges([q1, q2], flows=[1, -1]) - assert np.all(fused_charges == np.asarray([-2, -3, -4, -1, -2, -3])) +from tensornetwork.block_tensor.charge import ChargeCollection, BaseCharge, U1Charge, Z2Charge, fuse_degeneracies +from tensornetwork.block_tensor.block_tensor import fuse_ndarrays def test_fuse_degeneracies(): @@ -93,9 +75,9 @@ def run_test(): charges_2 = [P1, P2] charges_3 = [Q1, Q2] - fused_1 = fuse_charges(charges_1, [1, 1]) - fused_2 = fuse_charges(charges_2, [1, 1]) - fused_3 = fuse_charges(charges_3, [1, 1]) + fused_1 = fuse_ndarrays(charges_1) + fused_2 = fuse_ndarrays(charges_2) + fused_3 = fuse_ndarrays(charges_3) q1 = U1Charge([O1, P1, Q1]) q2 = U1Charge([O2, P2, Q2]) @@ -135,9 +117,9 @@ def run_test(): charges_2 = [P1, P2, P3] charges_3 = [Q1, Q2, Q3] - fused_1 = fuse_charges(charges_1, [1, 1, 1]) - fused_2 = fuse_charges(charges_2, [1, 1, 1]) - fused_3 = fuse_charges(charges_3, [1, 1, 1]) + fused_1 = fuse_ndarrays(charges_1) + fused_2 = fuse_ndarrays(charges_2) + fused_3 = fuse_ndarrays(charges_3) q1 = U1Charge([O1, P1, Q1]) q2 = U1Charge([O2, P2, Q2]) q3 = U1Charge([O3, P3, Q3]) @@ -173,13 +155,13 @@ def run_test(): Q2 = np.random.randint(0, B + 1, D).astype(np.int8) Q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - charges_1 = [O1, O2, O3] - charges_2 = [P1, P2, P3] - charges_3 = [Q1, Q2, Q3] + charges_1 = [O1, -O2, O3] + charges_2 = [P1, -P2, P3] + charges_3 = [Q1, -Q2, Q3] - fused_1 = fuse_charges(charges_1, [1, -1, 1]) - fused_2 = fuse_charges(charges_2, [1, -1, 1]) - fused_3 = fuse_charges(charges_3, [1, -1, 1]) + fused_1 = fuse_ndarrays(charges_1) + fused_2 = fuse_ndarrays(charges_2) + fused_3 = fuse_ndarrays(charges_3) q1 = U1Charge([O1, P1, Q1]) q2 = U1Charge([O2, P2, Q2]) q3 = U1Charge([O3, P3, Q3]) @@ -212,13 +194,13 @@ def run_test(): Q1 = np.random.randint(1, B + 1, D).astype(np.int8) Q2 = np.random.randint(1, B + 1, D).astype(np.int8) - charges_1 = [O1, O2] - charges_2 = [P1, P2] - charges_3 = [Q1, Q2] + charges_1 = [O1, -O2] + charges_2 = [P1, -P2] + charges_3 = [Q1, -Q2] - fused_1 = fuse_charges(charges_1, [1, -1]) - fused_2 = fuse_charges(charges_2, [1, -1]) - fused_3 = fuse_charges(charges_3, [1, -1]) + fused_1 = fuse_ndarrays(charges_1) + fused_2 = fuse_ndarrays(charges_2) + fused_3 = fuse_ndarrays(charges_3) q1 = U1Charge([O1, P1, Q1]) q2 = U1Charge([O2, P2, Q2]) @@ -250,13 +232,13 @@ def run_test(): Q1 = np.random.randint(1, B + 1, D).astype(np.int8) Q2 = np.random.randint(1, B + 1, D).astype(np.int8) - charges_1 = [O1, O2] - charges_2 = [P1, P2] - charges_3 = [Q1, Q2] + charges_1 = [O1, -O2] + charges_2 = [P1, -P2] + charges_3 = [Q1, -Q2] - fused_1 = fuse_charges(charges_1, [1, -1]) - fused_2 = fuse_charges(charges_2, [1, -1]) - fused_3 = fuse_charges(charges_3, [1, -1]) + fused_1 = fuse_ndarrays(charges_1) + fused_2 = fuse_ndarrays(charges_2) + fused_3 = fuse_ndarrays(charges_3) q1 = U1Charge([O1, P1, Q1]) q2 = U1Charge([O2, P2, Q2]) From cb6d55a5bee05d47dd8a8813bbd3a51a74ed2c07 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 11:28:42 -0500 Subject: [PATCH 148/212] added test for BlockSparseTensor back --- .../block_tensor/block_tensor_test.py | 227 ++++++++++++++---- 1 file changed, 185 insertions(+), 42 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 9f11bec6e..805ed2256 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -1,12 +1,169 @@ import numpy as np import pytest -# pylint: disable=line-too-long -from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, find_sparse_positions, find_dense_positions -from index import Index, fuse_charges + +from tensornetwork.block_tensor.charge import U1Charge, ChargeCollection, fuse_charges +from tensornetwork.block_tensor.index import Index +from tensornetwork.block_tensor.block_tensor import _find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions, find_dense_positions, BlockSparseTensor, fuse_ndarrays np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] +def test_test_num_nonzero_consistency(): + B = 4 + D = 100 + rank = 4 + + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) + ] + flows = [1, 1, 1, -1] + n1 = compute_num_nonzero(charges1, flows) + n2 = compute_num_nonzero(charges3, flows) + n3 = compute_num_nonzero(charges3, flows) + assert n1 == n2 + + +def test_find_sparse_positions_consistency(): + B = 4 + D = 100 + rank = 4 + + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) + ] + + data1 = find_sparse_positions( + left_charges=charges1[0] + charges1[1], + left_flow=1, + right_charges=charges1[2] + charges1[3], + right_flow=1, + target_charges=charges1[0].zero_charge) + data2 = find_sparse_positions( + left_charges=charges2[0] + charges2[1], + left_flow=1, + right_charges=charges2[2] + charges2[3], + right_flow=1, + target_charges=charges2[0].zero_charge) + data3 = find_sparse_positions( + left_charges=charges3[0] + charges3[1], + left_flow=1, + right_charges=charges3[2] + charges3[3], + right_flow=1, + target_charges=charges3[0].zero_charge) + + nz1 = np.asarray(list(data1.values())[0]) + nz2 = np.asarray(list(data2.values())[0]) + nz3 = np.asarray(list(data3.values())[0]) + assert np.all(nz1 == nz2) + assert np.all(nz1 == nz3) + + +def test_find_dense_positions_consistency(): + B = 5 + D = 20 + rank = 4 + + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) + ] + flows = [1, 1, 1, -1] + data1 = find_dense_positions( + left_charges=charges1[0] * flows[0] + charges1[1] * flows[0], + left_flow=1, + right_charges=charges1[2] * flows[2] + charges1[3] * flows[3], + right_flow=1, + target_charge=charges1[0].zero_charge) + data2 = find_dense_positions( + left_charges=charges2[0] * flows[0] + charges2[1] * flows[1], + left_flow=1, + right_charges=charges2[2] * flows[2] + charges2[3] * flows[3], + right_flow=1, + target_charge=charges2[0].zero_charge) + data3 = find_dense_positions( + left_charges=charges3[0] * flows[0] + charges3[1] * flows[1], + left_flow=1, + right_charges=charges3[2] * flows[2] + charges3[3] * flows[3], + right_flow=1, + target_charge=charges3[0].zero_charge) + + nz = compute_num_nonzero(charges1, flows) + assert nz == len(data1) + assert len(data1) == len(data2) + assert len(data1) == len(data3) + + +def test_find_diagonal_sparse_blocks_consistency(): + B = 5 + D = 20 + rank = 4 + + qs = [[ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(2) + ] + for _ in range(rank)] + charges1 = [U1Charge(qs[n]) for n in range(rank)] + charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] + charges3 = [ + ChargeCollection([U1Charge(qs[n][m]) + for m in range(2)]) + for n in range(rank) + ] + + _, _, start_positions1, _, _ = _find_diagonal_sparse_blocks( + data=[], + row_charges=[charges1[0], charges1[1]], + column_charges=[charges1[2], charges1[3]], + row_flows=[1, 1], + column_flows=[1, -1], + return_data=False) + + _, _, start_positions2, _, _ = _find_diagonal_sparse_blocks( + data=[], + row_charges=[charges2[0], charges2[1]], + column_charges=[charges2[2], charges2[3]], + row_flows=[1, 1], + column_flows=[1, -1], + return_data=False) + + _, _, start_positions3, _, _ = _find_diagonal_sparse_blocks( + data=[], + row_charges=[charges3[0], charges3[1]], + column_charges=[charges3[2], charges3[3]], + row_flows=[1, 1], + column_flows=[1, -1], + return_data=False) + assert np.all(start_positions1 == start_positions2) + assert np.all(start_positions1 == start_positions3) + + @pytest.mark.parametrize("dtype", np_dtypes) def test_block_sparse_init(dtype): D = 10 #bond dimension @@ -15,7 +172,7 @@ def test_block_sparse_init(dtype): flows = np.asarray([1 for _ in range(rank)]) flows[-2::] = -1 charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)) for _ in range(rank) ] indices = [ @@ -36,9 +193,10 @@ def test_find_dense_positions(): left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) - dense_positions = find_dense_positions(left_charges, 1, right_charges, 1, - target_charge) + fused_charges = fuse_ndarrays([left_charges, right_charges]) + dense_positions = find_dense_positions( + U1Charge(left_charges), 1, U1Charge(right_charges), 1, + U1Charge(np.asarray([target_charge]))) np.testing.assert_allclose(dense_positions, np.nonzero(fused_charges == target_charge)[0]) @@ -55,7 +213,8 @@ def test_find_dense_positions_2(): for _ in range(rank) ] indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + Index( + charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) for n in range(rank) ] n1 = compute_num_nonzero([i.charges for i in indices], @@ -68,7 +227,8 @@ def test_find_dense_positions_2(): i01 = indices[0] * indices[1] i23 = indices[2] * indices[3] - positions = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) + positions = find_dense_positions(i01.charges, 1, i23.charges, 1, + U1Charge(np.asarray([0]))) assert len(positions) == n1 @@ -84,7 +244,8 @@ def test_find_sparse_positions(): for _ in range(rank) ] indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + Index( + charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) for n in range(rank) ] n1 = compute_num_nonzero([i.charges for i in indices], @@ -97,12 +258,12 @@ def test_find_sparse_positions(): i01 = indices[0] * indices[1] i23 = indices[2] * indices[3] - unique_row_charges = np.unique(i01.charges) - unique_column_charges = np.unique(i23.charges) + unique_row_charges = np.unique(i01.charges.charges) + unique_column_charges = np.unique(i23.charges.charges) common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) blocks = find_sparse_positions( - i01.charges, 1, i23.charges, 1, target_charges=[0]) + i01.charges, 1, i23.charges, 1, target_charges=U1Charge(np.asarray([0]))) assert sum([len(v) for v in blocks.values()]) == n1 np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) @@ -119,53 +280,30 @@ def test_find_sparse_positions_2(): for _ in range(rank) ] indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + Index( + charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) for n in range(rank) ] i1, i2 = indices - common_charges = np.intersect1d(i1.charges, i2.charges) + common_charges = np.intersect1d(i1.charges.charges, i2.charges.charges) row_locations = find_sparse_positions( left_charges=i1.charges, left_flow=flows[0], right_charges=i2.charges, right_flow=flows[1], - target_charges=common_charges) + target_charges=U1Charge(common_charges)) fused = (i1 * i2).charges - relevant = fused[np.isin(fused, common_charges)] + relevant = fused.charges[np.isin(fused.charges, common_charges)] for k, v in row_locations.items(): np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) -def test_get_diagonal_blocks(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - common_charges = np.intersect1d(indices[0].charges, indices[1].charges) - row_locations = find_sparse_positions( - left_charges=indices[0].charges, - left_flow=1, - right_charges=indices[1].charges, - right_flow=1, - target_charges=common_charges) - - def test_dense_transpose(): Ds = [10, 11, 12] #bond dimension rank = len(Ds) flows = np.asarray([1 for _ in range(rank)]) flows[-2::] = -1 - charges = [np.zeros(Ds[n], dtype=np.int16) for n in range(rank)] + charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] indices = [ Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) for n in range(rank) @@ -174,3 +312,8 @@ def test_dense_transpose(): B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) A.transpose((1, 0, 2)) np.testing.assert_allclose(A.data, B.flat) + + B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) + A.transpose((1, 0, 2)) + + np.testing.assert_allclose(A.data, B.flat) From 93dbc011f47da2108754f91459a86fafd1666c07 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 11:28:54 -0500 Subject: [PATCH 149/212] renaming files --- tensornetwork/block_tensor/block_tensor.py | 1487 +++++---------- .../block_tensor/block_tensor_new.py | 1207 ------------ .../block_tensor/block_tensor_new_test.py | 333 ---- .../block_tensor/block_tensor_old.py | 1675 +++++++++++++++++ .../block_tensor/block_tensor_old_test.py | 176 ++ tensornetwork/block_tensor/index.py | 159 +- tensornetwork/block_tensor/index_new.py | 175 -- tensornetwork/block_tensor/index_new_test.py | 151 -- tensornetwork/block_tensor/index_old.py | 294 +++ tensornetwork/block_tensor/index_old_test.py | 171 ++ 10 files changed, 2846 insertions(+), 2982 deletions(-) delete mode 100644 tensornetwork/block_tensor/block_tensor_new.py delete mode 100644 tensornetwork/block_tensor/block_tensor_new_test.py create mode 100644 tensornetwork/block_tensor/block_tensor_old.py create mode 100644 tensornetwork/block_tensor/block_tensor_old_test.py delete mode 100644 tensornetwork/block_tensor/index_new.py delete mode 100644 tensornetwork/block_tensor/index_new_test.py create mode 100644 tensornetwork/block_tensor/index_old.py create mode 100644 tensornetwork/block_tensor/index_old_test.py diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index c552a184a..0c60284b5 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -18,34 +18,100 @@ import numpy as np #from tensornetwork.block_tensor.lookup import lookup # pylint: disable=line-too-long -from tensornetwork.network_components import Node, contract, contract_between -from tensornetwork.backends import backend_factory -# pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges, unfuse +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index +from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, ChargeCollection import numpy as np import scipy as sp import itertools import time -from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable +from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable, Sequence Tensor = Any -def _check_flows(flows) -> None: +def _find_values_in_fused(indices: np.ndarray, left: np.ndarray, + right: np.ndarray) -> np.ndarray: + """ + Returns fuse(left,right)[indices], i.e. the elements + in the fusion of `left` and `right` at positions `indices'. + """ + left_inds, right_inds = np.divmod(indices, len(right)) + return left[left_inds] + right[right_inds] + + +def fuse_ndarray_pair(array1: Union[List, np.ndarray], + array2: Union[List, np.ndarray]) -> np.ndarray: + """ + Fuse ndarrays `array1` and `array2` by kronecker-addition. + Given `array1 = [0,1,2]` and `array2 = [10,100]`, this returns + `[10, 100, 11, 101, 12, 102]`. + + Args: + array1: np.ndarray + array2: np.ndarray + Returns: + np.ndarray: The result of adding `array1` and `array2` + """ + return np.reshape( + np.asarray(array1)[:, None] + np.asarray(array2)[None, :], + len(array1) * len(array2)) + + +def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: + """ + Fuse all `arrays` by simple kronecker addition. + Arrays are fused from "right to left", + Args: + arrays: A list of arrays to be fused. + Returns: + np.ndarray: The result of fusing `charges`. + """ + if len(arrays) == 1: + return arrays[0] + fused_arrays = arrays[0] + for n in range(1, len(arrays)): + fused_arrays = fuse_ndarray_pair(array1=fused_arrays, array2=arrays[n]) + return fused_arrays + + +def _check_flows(flows: List[int]) -> None: if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): raise ValueError( "flows = {} contains values different from 1 and -1".format(flows)) -def _find_best_partition(charges, flows): +def _find_best_partition( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[int]) -> Tuple[Union[BaseCharge, ChargeCollection], + Union[BaseCharge, ChargeCollection], int]: + """ + compute the best partition for fusing `charges`, i.e. the integer `p` + such that fusing `len(fuse_charges(charges[0:p],flows[0:p]))` is + and `len(fuse_charges(charges[p::],flows[p::]))` are as close as possible. + Returns: + fused_left_charges, fused_right_charges, p + + """ + #FIXME: fusing charges with dims (N,M) with M>~N is faster than fusing charges + # with dims (M,N). Thus, it is not always best to fuse at the minimum cut. + #for example, for dims (1000, 4, 1002), its better to fuse at the cut + #(1000, 4008) than at (4000, 1002), even though the difference between the + #dimensions is minimal for the latter case. We should implement some heuristic + #to find these cuts. if len(charges) == 1: raise ValueError( '_expecting `charges` with a length of at least 2, got `len(charges)={}`' .format(len(charges))) dims = np.asarray([len(c) for c in charges]) - min_ind = np.argmin([ + diffs = [ np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) for n in range(1, len(charges)) - ]) + ] + min_inds = np.nonzero(diffs == np.min(diffs))[0] + if len(min_inds) > 1: + right_dims = [np.prod(len(charges[min_ind + 1::])) for min_ind in min_inds] + min_ind = min_inds[np.argmax(right_dims)] + else: + min_ind = min_inds[0] fused_left_charges = fuse_charges(charges[0:min_ind + 1], flows[0:min_ind + 1]) fused_right_charges = fuse_charges(charges[min_ind + 1::], @@ -54,36 +120,13 @@ def _find_best_partition(charges, flows): return fused_left_charges, fused_right_charges, min_ind + 1 -def map_to_integer(dims: Union[List, np.ndarray], - table: np.ndarray, - dtype: Optional[Type[np.number]] = np.int64): - """ - Map a `table` of integers of shape (N, r) bijectively into - an np.ndarray `integers` of length N of unique numbers. - The mapping is done using - ``` - `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` - - Args: - dims: An iterable of integers. - table: An array of shape (N,r) of integers. - dtype: An optional dtype used for the conversion. - Care should be taken when choosing this to avoid overflow issues. - Returns: - np.ndarray: An array of integers. - """ - converter_table = np.expand_dims( - np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) - tmp = table * converter_table - integers = np.sum(tmp, axis=1) - return integers - - -def compute_fused_charge_degeneracies(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: +def compute_fused_charge_degeneracies( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]] +) -> Tuple[Union[BaseCharge, ChargeCollection], np.ndarray]: """ For a list of charges, compute all possible fused charges resulting - from fusing `charges`, together with their respective degeneracyn + from fusing `charges`, together with their respective degeneracies Args: charges: List of np.ndarray of int, one for each leg of the underlying tensor. Each np.ndarray `charges[leg]` @@ -94,42 +137,39 @@ def compute_fused_charge_degeneracies(charges: List[np.ndarray], of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. Returns: - dict: Mapping fused charges (int) to degeneracies (int) + Union[BaseCharge, ChargeCollection]: The unique fused charges. + np.ndarray of integers: The degeneracies of each unqiue fused charge. """ if len(charges) == 1: - return np.unique(flows[0] * charges[0], return_counts=True) + return (charges[0] * flows[0]).unique(return_counts=True) # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = np.unique( - charges[0], return_counts=True) - #multiply the flow into the charges of first leg - accumulated_charges *= flows[0] + accumulated_charges, accumulated_degeneracies = ( + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor - leg_charges, leg_degeneracies = np.unique(charges[n], return_counts=True) - + leg_charges, leg_degeneracies = charges[n].unique(return_counts=True) #fuse the unique charges #Note: entries in `fused_charges` are not unique anymore. #flow1 = 1 because the flow of leg 0 has already been #mulitplied above - fused_charges = fuse_charge_pair( - q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n]) + fused_charges = accumulated_charges + leg_charges * flows[n] #compute the degeneracies of `fused_charges` charges #`fused_degeneracies` is a list of degeneracies such that # `fused_degeneracies[n]` is the degeneracy of of # charge `c = fused_charges[n]`. fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, leg_degeneracies) - #compute the new degeneracies resulting from fusing - #`accumulated_charges` and `leg_charges_2` - accumulated_charges = np.unique(fused_charges) + accumulated_charges = fused_charges.unique() accumulated_degeneracies = np.empty( len(accumulated_charges), dtype=np.int64) + for n in range(len(accumulated_charges)): accumulated_degeneracies[n] = np.sum( fused_degeneracies[fused_charges == accumulated_charges[n]]) + return accumulated_charges, accumulated_degeneracies @@ -152,65 +192,23 @@ def compute_num_nonzero(charges: List[np.ndarray], """ accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( charges, flows) - if len(np.nonzero(accumulated_charges == 0)[0]) == 0: + res = accumulated_charges == accumulated_charges.zero_charge + + if len(np.nonzero(res)[0]) == 0: raise ValueError( "given leg-charges `charges` and flows `flows` are incompatible " "with a symmetric tensor") - return accumulated_degeneracies[accumulated_charges == 0][0] + return accumulated_degeneracies[res][0] -def compute_nonzero_block_shapes(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: - """ - Compute the blocks and their respective shapes of a symmetric tensor, - given its meta-data. - Args: - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - Returns: - dict: Dictionary mapping a tuple of charges to a shape tuple. - Each element corresponds to a non-zero valued block of the tensor. - """ - #FIXME: this routine is slow - _check_flows(flows) - degeneracies = [] - unique_charges = [] - rank = len(charges) - #find the unique quantum numbers and their degeneracy on each leg - for leg in range(rank): - c, d = np.unique(charges[leg], return_counts=True) - unique_charges.append(c) - degeneracies.append(dict(zip(c, d))) - - #find all possible combination of leg charges c0, c1, ... - #(with one charge per leg 0, 1, ...) - #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0 - charge_combinations = list( - itertools.product(*[ - unique_charges[leg] * flows[leg] - for leg in range(len(unique_charges)) - ])) - net_charges = np.array([np.sum(c) for c in charge_combinations]) - zero_idxs = np.nonzero(net_charges == 0)[0] - charge_shape_dict = {} - for idx in zero_idxs: - c = charge_combinations[idx] - shapes = [degeneracies[leg][flows[leg] * c[leg]] for leg in range(rank)] - charge_shape_dict[c] = shapes - return charge_shape_dict - - -def find_diagonal_sparse_blocks(data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: +def _find_diagonal_sparse_blocks( + data: np.ndarray, + row_charges: List[Union[BaseCharge, ChargeCollection]], + column_charges: List[Union[BaseCharge, ChargeCollection]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = False +) -> Tuple[Union[BaseCharge, ChargeCollection], List, np.ndarray, Dict, Dict]: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. @@ -245,8 +243,9 @@ def find_diagonal_sparse_blocks(data: np.ndarray, quantum numbers `(q,q). `shape` is the shape of the corresponding array. Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. + List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. + List[np.ndarray]: A list containing the blocks. + """ flows = row_flows.copy() flows.extend(column_flows) @@ -259,23 +258,25 @@ def find_diagonal_sparse_blocks(data: np.ndarray, #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly + #`compute_fused_charge_degeneracies` multiplies flows into the column_charges unique_column_charges, column_dims = compute_fused_charge_degeneracies( column_charges, column_flows) #convenience container for storing the degeneracies of each #column charge - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - + #column_degeneracies = dict(zip(unique_column_charges, column_dims)) + column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) if len(row_charges) > 1: left_row_charges, right_row_charges, _ = _find_best_partition( row_charges, row_flows) - unique_left = np.unique(left_row_charges) - unique_right = np.unique(right_row_charges) - unique_row_charges = np.unique( - fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) + unique_left = left_row_charges.unique() + unique_right = right_row_charges.unique() + unique_row_charges = (unique_left + unique_right).unique() #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) + concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) + tmp_unique, counts = concatenated.unique(return_counts=True) + common_charges = tmp_unique[ + counts == 2] #common_charges is a BaseCharge or ChargeCollection row_locations = find_sparse_positions( left_charges=left_row_charges, @@ -283,30 +284,35 @@ def find_diagonal_sparse_blocks(data: np.ndarray, right_charges=right_row_charges, right_flow=1, target_charges=common_charges) + elif len(row_charges) == 1: fused_row_charges = fuse_charges(row_charges, row_flows) #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) + unique_row_charges, row_dims = fused_row_charges.unique(return_counts=True) #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - relevant_fused_row_charges = fused_row_charges[np.isin( - fused_row_charges, common_charges)] + #get the charges common to rows and columns (only those matter) + concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) + tmp_unique, counts = concatenated.unique(return_counts=True) + common_charges = tmp_unique[ + counts == 2] #common_charges is a BaseCharge or ChargeCollection + + relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( + common_charges)] row_locations = {} for c in common_charges: + #c = common_charges.get_item(n) row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] else: raise ValueError('Found an empty sequence for `row_charges`') - #some numpy magic to get the index locations of the blocks + degeneracy_vector = np.empty( np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. masks = {} for c in common_charges: - degeneracy_vector[row_locations[c]] = column_degeneracies[-c] + degeneracy_vector[row_locations[c]] = column_degeneracies[c] # the result of the cumulative sum is a vector containing # the stop positions of the non-zero values of each row @@ -323,421 +329,27 @@ def find_diagonal_sparse_blocks(data: np.ndarray, # and `stop_positions[masks[0]] - column_degeneracies[0]` stop_positions = np.cumsum(degeneracy_vector) start_positions = stop_positions - degeneracy_vector - blocks = {} + blocks = [] for c in common_charges: #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) - inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) + rlocs = row_locations[c] + rlocs.sort() #sort in place (we need it again later) + cdegs = column_degeneracies[c] + a = np.expand_dims(start_positions[rlocs], 1) + b = np.expand_dims(np.arange(cdegs), 0) + inds = np.reshape(a + b, len(rlocs) * cdegs) if not return_data: - blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] + blocks.append([inds, (len(rlocs), cdegs)]) else: - blocks[c] = np.reshape(data[inds], - (len(row_locations[c]), column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_depreacated_1( - data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. - - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. Note that `column_charges` - are never explicitly fused (`row_charges` are). - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the sparse locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. + blocks.append(np.reshape(data[inds], (len(rlocs), cdegs))) + return common_charges, blocks, start_positions, row_locations, column_degeneracies - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - flows = row_flows.copy() - flows.extend(column_flows) - _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) - - #since we are using row-major we have to fuse the row charges anyway. - fused_row_charges = fuse_charges(row_charges, row_flows) - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) - - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - column_charges, column_flows) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(fused_row_charges, common_charges) - relevant_row_charges = fused_row_charges[mask] - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[masks[c]], 1) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_deprecated_0( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated: this version is about 2 times slower (worst case) than the current used - implementation - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column - - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(row_charges, common_charges) - relevant_row_charges = row_charges[mask] - - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_column_major( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict, assuming column-major - ordering. - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column - - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(column_charges, -common_charges) - relevant_column_charges = column_charges[mask] - - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_column_charges) which, - #for each charge `c` in `relevant_column_charges` holds the - #row-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_column_charges == -c - masks[c] = mask - degeneracy_vector[mask] = row_degeneracies[c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each column - # within the data vector. - # E.g. for `relevant_column_charges` = [0,1,0,0,3], and - # row_degeneracies[0] = 10 - # row_degeneracies[1] = 20 - # row_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in column-major order) in - # each column with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - row_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) - b = np.expand_dims(np.arange(row_degeneracies[c]), 1) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_dense_positions_deprecated(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the all different blocks - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - together with their corresponding index-values of the data in the dense array. - `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` - to an array of integers. - For the above example, we get: - * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` - was obtained from fusing -2 and 2. - * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, - `fused_charges[5,13,17]` were obtained from fusing 0 and 0. - * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` - was obtained from fusing 1 and -1. - Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. - Returns: - dict: Mapping tuples of integers to np.ndarray of integers. - """ - _check_flows([left_flow, right_flow]) - unique_left = np.unique(left_charges) - unique_right = np.unique(right_charges) - fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) - left_inds, right_inds = unfuse( - np.nonzero(fused == target_charge)[0], len(unique_left), - len(unique_right)) - left_c = unique_left[left_inds] - right_c = unique_right[right_inds] - len_right_charges = len(right_charges) - linear_positions = {} - for left_charge, right_charge in zip(left_c, right_c): - left_positions = np.nonzero(left_charges == left_charge)[0] - left_offsets = np.expand_dims(left_positions * len_right_charges, 1) - right_offsets = np.expand_dims( - np.nonzero(right_charges == right_charge)[0], 0) - linear_positions[(left_charge, right_charge)] = np.reshape( - left_offsets + right_offsets, - left_offsets.shape[0] * right_offsets.shape[1]) - return np.sort(np.concatenate(list(linear_positions.values()))) - - -def find_dense_positions(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: +def find_dense_positions( + left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, + right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, + target_charge: Union[BaseCharge, ChargeCollection]) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector `fused_charges` (resulting from fusing np.ndarrays @@ -769,41 +381,49 @@ def find_dense_positions(left_charges: np.ndarray, left_flow: int, right_flow: The flow direction of the right charges. target_charge: The target charge. Returns: - dict: Mapping tuples of integers to np.ndarray of integers. + np.ndarray: The indices of the elements fusing to `target_charge`. """ _check_flows([left_flow, right_flow]) - unique_left, left_degeneracies = np.unique(left_charges, return_counts=True) - unique_right, right_degeneracies = np.unique( - right_charges, return_counts=True) - - common_charges = np.intersect1d( - unique_left, (target_charge - right_flow * unique_right) * left_flow, - assume_unique=True) + unique_left, left_degeneracies = left_charges.unique(return_counts=True) + unique_right, right_degeneracies = right_charges.unique(return_counts=True) + + tmp_charges = (target_charge + (unique_right * right_flow * (-1))) * left_flow + concatenated = unique_left.concatenate(tmp_charges) + tmp_unique, counts = concatenated.unique(return_counts=True) + common_charges = tmp_unique[ + counts == 2] #common_charges is a BaseCharge or ChargeCollection right_locations = {} - for c in common_charges: + for n in range(len(common_charges)): + c = common_charges[n] - right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( - right_charges == (target_charge - left_flow * c) * right_flow)[0] + right_charge = (target_charge + (c * left_flow * (-1))) * right_flow + right_locations[right_charge.get_item(0)] = np.nonzero( + right_charges == right_charge)[0] len_right_charges = len(right_charges) indices = [] for n in range(len(left_charges)): c = left_charges[n] + right_charge = (target_charge + (c * left_flow * (-1))) * right_flow + if c not in common_charges: continue - indices.append(n * len_right_charges + right_locations[ - (target_charge - left_flow * c) * right_flow]) + indices.append(n * len_right_charges + + right_locations[right_charge.get_item(0)]) + return np.concatenate(indices) -def find_sparse_positions(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charges: Union[List[int], np.ndarray]) -> Dict: +def find_sparse_positions( + left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, + right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, + target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: """ - Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`, - assuming that all elements different from `target_charges` are `0`. + Find the sparse locations of elements (i.e. the index-values within + the SPARSE tensor) in the vector `fused_charges` (resulting from + fusing `left_charges` and `right_charges`) + that have a value of `target_charges`, assuming that all elements + different from `target_charges` are `0`. For example, given ``` left_charges = [-2,0,1,0,0] @@ -835,18 +455,16 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, #FIXME: this is probably still not optimal _check_flows([left_flow, right_flow]) - target_charges = np.unique(target_charges) - unique_left = np.unique(left_charges) - unique_right = np.unique(right_charges) - fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + target_charges = target_charges.unique() + unique_left = left_charges.unique() + unique_right = right_charges.unique() + fused = unique_left * left_flow + unique_right * right_flow #compute all unique charges that can add up to #target_charges left_inds, right_inds = [], [] for target_charge in target_charges: - li, ri = unfuse( - np.nonzero(fused == target_charge)[0], len(unique_left), - len(unique_right)) + li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) left_inds.append(li) right_inds.append(ri) @@ -855,37 +473,41 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] #only keep those charges that are relevant - relevant_left_charges = left_charges[np.isin(left_charges, - unique_left_charges)] - relevant_right_charges = right_charges[np.isin(right_charges, - unique_right_charges)] + relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] + relevant_right_charges = right_charges[right_charges.isin( + unique_right_charges)] - unique_right_charges, right_dims = np.unique( - relevant_right_charges, return_counts=True) + unique_right_charges, right_dims = relevant_right_charges.unique( + return_counts=True) right_degeneracies = dict(zip(unique_right_charges, right_dims)) #generate a degeneracy vector which for each value r in relevant_right_charges #holds the corresponding number of non-zero elements `relevant_right_charges` #that can add up to `target_charges`. degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) right_indices = {} - for left_charge in unique_left_charges: - total_degeneracy = np.sum(right_dims[np.isin( - left_flow * left_charge + right_flow * unique_right_charges, - target_charges)]) - tmp_relevant_right_charges = relevant_right_charges[np.isin( - relevant_right_charges, - (target_charges - left_flow * left_charge) * right_flow)] - for target_charge in target_charges: - right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == - (target_charge - left_flow * left_charge) * right_flow)[0] + for n in range(len(unique_left_charges)): + left_charge = unique_left_charges[n] + total_charge = left_charge * left_flow + unique_right_charges * right_flow + total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) + tmp_relevant_right_charges = relevant_right_charges[ + relevant_right_charges.isin( + (target_charges + left_charge * ((-1) * left_flow)) * right_flow)] + + for n in range(len(target_charges)): + target_charge = target_charges[n] + right_indices[( + left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( + tmp_relevant_right_charges == (target_charge + left_charge * ( + (-1) * left_flow)) * right_flow)[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy stop_positions = np.cumsum(degeneracy_vector) start_positions = stop_positions - degeneracy_vector blocks = {t: [] for t in target_charges} + # iterator returns tuple of `int` for ChargeCollection objects + # and `int` for Ba seCharge objects (both hashable) for left_charge in unique_left_charges: a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) for target_charge in target_charges: @@ -900,141 +522,10 @@ def find_sparse_positions(left_charges: np.ndarray, left_flow: int, return out -def compute_dense_to_sparse_mapping_deprecated(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` - the rank of the tensor. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - t1 = time.time() - fused_charges = fuse_charges(charges, flows) - nz_indices = np.nonzero(fused_charges == target_charge)[0] - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - - index_locations = [] - for n in reversed(range(len(charges))): - t1 = time.time() - nz_indices, right_indices = unfuse(nz_indices, np.prod(dims[0:n]), dims[n]) - index_locations.insert(0, right_indices) - print(time.time() - t1) - return index_locations - - -def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` - the rank of the tensor. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - - #note: left_charges and right_charges have been fused from RIGHT to LEFT - left_charges, right_charges, partition = _find_best_partition(charges, flows) - t1 = time.time() - nz_indices = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=target_charge) - print(time.time() - t1) - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - t1 = time.time() - nz_left_indices, nz_right_indices = unfuse(nz_indices, len(left_charges), - len(right_charges)) - print(time.time() - t1) - index_locations = [] - #first unfuse left charges - for n in range(partition): - t1 = time.time() - indices, nz_left_indices = unfuse(nz_left_indices, dims[n], - np.prod(dims[n + 1:partition])) - index_locations.append(indices) - print(time.time() - t1) - for n in range(partition, len(dims)): - t1 = time.time() - indices, nz_right_indices = unfuse(nz_right_indices, dims[n], - np.prod(dims[n + 1::])) - index_locations.append(indices) - print(time.time() - t1) - - return index_locations - - -def compute_dense_to_sparse_mapping(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: +def compute_dense_to_sparse_mapping( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]], + target_charge: Union[BaseCharge, ChargeCollection]) -> List[np.ndarray]: """ Compute the mapping from multi-index positions to the linear positions within the sparse data container, given the meta-data of a symmetric tensor. @@ -1108,6 +599,9 @@ class BlockSparseTensor: The tensor data is stored in self.data, a 1d np.ndarray. """ + def copy(self): + return BlockSparseTensor(self.data.copy(), [i.copy() for i in self.indices]) + def __init__(self, data: np.ndarray, indices: List[Index]) -> None: """ Args: @@ -1116,14 +610,29 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: and `flows` indices: List of `Index` objecst, one for each leg. """ + for n, i in enumerate(indices): + if i is None: + i.name = 'index_{}'.format(n) + + index_names = [ + i.name if i.name else 'index_{}'.format(n) + for n, i in enumerate(indices) + ] + unique, cnts = np.unique(index_names, return_counts=True) + if np.any(cnts > 1): + raise ValueError("Index names {} appeared multiple times. " + "Please rename indices uniquely.".format( + unique[cnts > 1])) + self.indices = indices _check_flows(self.flows) num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) if num_non_zero_elements != len(data.flat): - raise ValueError("number of tensor elements defined " + raise ValueError("number of tensor elements {} defined " "by `charges` is different from" - " len(data)={}".format(len(data.flat))) + " len(data)={}".format(num_non_zero_elements, + len(data.flat))) self.data = np.asarray(data.flat) #do not copy data @@ -1145,6 +654,42 @@ def randn(cls, indices: List[Index], data = backend.randn((num_non_zero_elements,), dtype=dtype) return cls(data=data, indices=indices) + @classmethod + def ones(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a symmetric tensor with ones. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + backend = backend_factory.get_backend('numpy') + data = backend.ones((num_non_zero_elements,), dtype=dtype) + return cls(data=data, indices=indices) + + @classmethod + def zeros(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a symmetric tensor with zeros. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + backend = backend_factory.get_backend('numpy') + data = backend.zeros((num_non_zero_elements,), dtype=dtype) + return cls(data=data, indices=indices) + @classmethod def random(cls, indices: List[Index], dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": @@ -1158,6 +703,7 @@ def random(cls, indices: List[Index], """ charges = [i.charges for i in indices] flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) dtype = dtype if dtype is not None else self.np.float64 @@ -1171,6 +717,10 @@ def init_random(): return cls(data=init_random(), indices=indices) + @property + def index_names(self): + return [i.name for i in self.indices] + @property def rank(self): return len(self.indices) @@ -1205,230 +755,94 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose(self, - order: Union[List[int], np.ndarray], - transposed_linear_positions: Optional[np.ndarray] = None - ) -> "BlockSparseTensor": + def transpose( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. - Args: + Args: order: The new order of indices. - transposed_linear_positions: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. Returns: BlockSparseTensor: The transposed tensor. """ - #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the - #lookup-table from dense to sparse indices. According to some quick - #testing, the final lookup is currently the bottleneck. - #FIXME: transpose currently shuffles data. This can in principle be postponed - #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of - #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse - #positions + if (permutation is not None) and (len(permutation) != len(self.data)): + raise ValueError("len(permutation) != len(tensor.data).") + if len(order) != self.rank: raise ValueError( "`len(order)={}` is different form `self.rank={}`".format( len(order), self.rank)) - #transpose is the only function using self.dense_to_sparse_table - #so we can initialize it here. This will change if we are implementing - #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` - #also needs + + #check for trivial permutation + if np.all(order == np.arange(len(order))): + if return_permutation: + return np.arange(len(self.data)) + return #we use elementary indices here because it is #more efficient to get the fused charges using #the best partition - if transposed_linear_positions is None: + if permutation is None: elementary_indices = {} flat_elementary_indices = [] - - for n in range(self.rank): + for n in range(len(self.indices)): elementary_indices[n] = self.indices[n].get_elementary_indices() flat_elementary_indices.extend(elementary_indices[n]) flat_index_list = np.arange(len(flat_elementary_indices)) cum_num_legs = np.append( - 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) - flat_order = np.concatenate( - [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + 0, + np.cumsum( + [len(elementary_indices[n]) for n in range(len(self.indices))])) flat_charges = [i.charges for i in flat_elementary_indices] flat_flows = [i.flow for i in flat_elementary_indices] flat_dims = [len(c) for c in flat_charges] flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - if not hasattr(self, 'dense_to_sparse_table'): - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - flat_charges, flat_flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) - + flat_order = np.concatenate( + [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition( + flat_charges, flat_flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, + 1, + right_charges, + 1, + target_charge=flat_charges[0].zero_charge) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] flat_tr_strides = [flat_strides[n] for n in flat_order] flat_tr_dims = [flat_dims[n] for n in flat_order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_left_charges, tr_right_charges, partition = _find_best_partition( flat_tr_charges, flat_tr_flows) - #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_charges([ + tr_linear_positions = find_dense_positions( + tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) + stride_arrays = [ np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) - ], - flows=[1] * len(flat_tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) + ] - inds = np.squeeze(self.dense_to_sparse_table[ - tr_dense_linear_positions[tr_linear_positions], 0].toarray()) - else: - inds = transposed_linear_positions - self.data = self.data[inds] - return inds + dense_permutation = _find_values_in_fused( + tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), + fuse_ndarrays(stride_arrays[partition::])) + assert np.all(np.sort(dense_permutation) == linear_positions) + permutation = np.searchsorted(linear_positions, dense_permutation) - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order` - Args: pp - order: The new order of indices. - Returns: - BlockSparseTensor: The transposed tensor. - """ - #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the - #lookup-table from dense to sparse indices. According to some quick - #testing, the final lookup is currently the bottleneck. - #FIXME: transpose currently shuffles data. This can in principle be postponed - #until `tensordot` or `find_diagonal_sparse_blocks` - if len(order) != self.rank: - raise ValueError(len(order), self.rank) - charges = self.charges #call only once in case some of the indices are merged indices - dims = [len(c) for c in charges] - - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - tr_charges = [charges[n] for n in order] - tr_flows = [self.flows[n] for n in order] - tr_strides = [strides[n] for n in order] - tr_dims = [dims[n] for n in order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( - tr_charges, tr_flows) - - tr_dense_linear_positions = fuse_charges( - [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - flows=[1] * len(tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - new_linear_positions = tr_dense_linear_positions[tr_linear_positions] - _, _, inds = np.intersect1d( - linear_positions, - new_linear_positions, - return_indices=True, - assume_unique=True) - self.data = self.data[inds] - - # def transpose_lookup(self, order: Union[List[int], np.ndarray] - # ) -> "BlockSparseTensor": - # """ - # Deprecated - - # Transpose the tensor into the new order `order`. Uses a simple cython std::map - # for the lookup - # Args: - # order: The new order of indices. - # Returns: - # BlockSparseTensor: The transposed tensor. - # """ - # if len(order) != self.rank: - # raise ValueError( - # "`len(order)={}` is different form `self.rank={}`".format( - # len(order), self.rank)) - # charges = self.charges #call only once in case some of the indices are merged indices - # dims = [len(c) for c in charges] - - # strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - # #find the best partition into left and right charges - # left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - # #find the index-positions of the elements in the fusion - # #of `left_charges` and `right_charges` that have `0` - # #total charge (those are the only non-zero elements). - # linear_positions = find_dense_positions( - # left_charges, 1, right_charges, 1, target_charge=0) - - # tr_charges = [charges[n] for n in order] - # tr_flows = [self.flows[n] for n in order] - # tr_strides = [strides[n] for n in order] - # tr_dims = [dims[n] for n in order] - # tr_left_charges, tr_right_charges, _ = _find_best_partition( - # tr_charges, tr_flows) - # #FIXME: this should be done without fully fusing the strides - # tr_dense_linear_positions = fuse_charges( - # [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - # flows=[1] * len(tr_dims)) - # tr_linear_positions = find_dense_positions(tr_left_charges, 1, - # tr_right_charges, 1, 0) - # inds = lookup(linear_positions, - # tr_dense_linear_positions[tr_linear_positions]) - # self.data = self.data[inds] - - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": - """ - Deprecated: - - Transpose the tensor into the new order `order`. Uses `np.searchsorted` - for the lookup. - Args: - order: The new order of indices. - Returns: - BlockSparseTensor: The transposed tensor. - """ - if len(order) != self.rank: - raise ValueError( - "`len(order)={}` is different form `self.rank={}`".format( - len(order), self.rank)) - charges = self.charges #call only once in case some of the indices are merged indices - dims = [len(c) for c in charges] - - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - tr_charges = [charges[n] for n in order] - tr_flows = [self.flows[n] for n in order] - tr_strides = [strides[n] for n in order] - tr_dims = [dims[n] for n in order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( - tr_charges, tr_flows) - #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_charges( - [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - flows=[1] * len(tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - - inds = np.searchsorted(linear_positions, - tr_dense_linear_positions[tr_linear_positions]) - self.data = self.data[inds] + self.indices = [self.indices[n] for n in order] + self.data = self.data[permutation] + if return_permutation: + return permutation def reset_shape(self) -> None: """ @@ -1531,59 +945,27 @@ def raise_error(): i2, i1 = self.indices.pop(), self.indices.pop() self.indices.append(fuse_index_pair(i1, i2)) - def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: + def _get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: """ - Obtain the diagonal blocks of symmetric matrix. + Obtain the diagonal blocks of a symmetric matrix. BlockSparseTensor has to be a matrix. - For matrices with shape[0] << shape[1], this routine avoids explicit fusion - of column charges. + This routine avoids explicit fusion of row or column charges. Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with + return_data: If `True`, the returned dictionary maps quantum numbers `q` to + an actual `np.ndarray` containing the data of block `q`. + If `False`, the returned dict maps quantum numbers `q` to a list + `[locations, shape]`, where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within `self.data`, i.e. + `self.data[locations]` contains the elements belonging to the tensor with quantum numbers `(q,q). `shape` is the shape of the corresponding array. Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - row_indices = self.indices[0].get_elementary_indices() - column_indices = self.indices[1].get_elementary_indices() - - return find_diagonal_sparse_blocks( - data=self.data, - row_charges=[i.charges for i in row_indices], - column_charges=[i.charges for i in column_indices], - row_flows=[i.flow for i in row_indices], - column_flows=[i.flow for i in column_indices], - return_data=return_data) + dict: If `return_data=True`: Dictionary mapping charge `q` to an + np.ndarray of rank 2 (a matrix). + If `return_data=False`: Dictionary mapping charge `q` to a + list `[locations, shape]`, where `locations` is an np.ndarray of type + np.int64 containing the locations of the tensor elements within `self.data` - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: - """ - Obtain the diagonal blocks of symmetric matrix. - BlockSparseTensor has to be a matrix. - For matrices with shape[0] << shape[1], this routine avoids explicit fusion - of column charges. - - Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) """ if self.rank != 2: @@ -1594,7 +976,7 @@ def get_diagonal_blocks_deprecated_1( row_indices = self.indices[0].get_elementary_indices() column_indices = self.indices[1].get_elementary_indices() - return find_diagonal_sparse_blocks_deprecated_1( + return _find_diagonal_sparse_blocks( data=self.data, row_charges=[i.charges for i in row_indices], column_charges=[i.charges for i in column_indices], @@ -1602,36 +984,6 @@ def get_diagonal_blocks_deprecated_1( column_flows=[i.flow for i in column_indices], return_data=return_data) - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - Obtain the diagonal blocks of symmetric matrix. - BlockSparseTensor has to be a matrix. - Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - return find_diagonal_sparse_blocks_deprecated_0( - data=self.data, - charges=self.charges, - flows=self.flows, - return_data=return_data) - def reshape(tensor: BlockSparseTensor, shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: @@ -1656,14 +1008,14 @@ def reshape(tensor: BlockSparseTensor, i2 = Index(charges=q2,flow=-1) i3 = Index(charges=q3,flow=1) A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(A.shape) #prints (6,6,6) + print(nA.shape) #prints (6,6,6) reshape(A, (2,3,6,6)) #raises ValueError ``` raises a `ValueError` since (2,3,6,6) is incompatible with the elementary shape (6,6,6) of the tensor. Args: - tensor: A symmetric tensor. + tensopr: A symmetric tensor. shape: The new shape. Can either be a list of `Index` or a list of `int`. Returns: @@ -1673,3 +1025,184 @@ def reshape(tensor: BlockSparseTensor, data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) result.reshape(shape) return result + + +def transpose( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + """ + Transpose `tensor` into the new order `order`. This routine currently shuffles + data. + Args: + tensor: The tensor to be transposed. + order: The new order of indices. + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` + can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. + Returns: + if `return_permutation == False`: + BlockSparseTensor: The transposed tensor. + if `return_permutation == True`: + BlockSparseTensor, permutation: The transposed tensor + and the permutation data + + """ + if (permutation is not None) and (len(permutation) != len(tensor.data)): + raise ValueError("len(permutation) != len(tensor.data).") + result = tensor.copy() + inds = result.transpose(order, permutation, return_permutation) + if return_permutation: + return result, inds + return result + + +def tensordot(tensor1: BlockSparseTensor, + tensor2: BlockSparseTensor, + axes: Sequence[Sequence[int]], + permutation1: Optional[np.ndarray] = None, + permutation2: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False): + """ + Contract two `BlockSparseTensor`s along `axes`. + Args: + tensor1: First tensor. + tensor2: Second tensor. + axes: The axes to contract. + permutation1: Permutation data for `tensor1`. + permutation2: Permutation data for `tensor2`. + return_permutation: If `True`, return the the permutation data. + Returns: + if `return_permutation == False`: + BlockSparseTensor: The result of contracting `tensor1` and `tensor2`. + if `return_permutation == True`: + BlockSparseTensor, np.ndarrays, np.ndarray: The result of + contracting `tensor1` and `tensor2`, together with their respective + permutation data. + + """ + axes1 = axes[0] + axes2 = axes[1] + if not np.all(np.unique(axes1) == np.sort(axes1)): + raise ValueError( + "Some values in axes[0] = {} appear more than once!".format(axes1)) + if not np.all(np.unique(axes2) == np.sort(axes2)): + raise ValueError( + "Some values in axes[1] = {} appear more than once!".format(axes2n)) + + if max(axes1) >= len(tensor1.shape): + raise ValueError( + "rank of `tensor1` is smaller than `max(axes1) = {}.`".format( + max(axes1))) + elementary_1, elementary_2 = [], [] + for a in axes1: + elementary_1.extend(tensor1.indices[a].get_elementary_indices()) + for a in axes2: + elementary_2.extend(tensor2.indices[a].get_elementary_indices()) + + if len(elementary_2) != len(elementary_1): + raise ValueError("axes1 and axes2 have incompatible elementary" + " shapes {} and {}".format(elementary_1, elementary_2)) + if not np.all( + np.array([i.flow for i in elementary_1]) == + (-1) * np.array([i.flow for i in elementary_2])): + raise ValueError("axes1 and axes2 have incompatible elementary" + " flows {} and {}".format( + np.array([i.flow for i in elementary_1]), + np.array([i.flow for i in elementary_2]))) + + if max(axes2) >= len(tensor2.shape): + raise ValueError( + "rank of `tensor2` is smaller than `max(axes2) = {}`".format( + max(axes1))) + free_axes1 = sorted(set(np.arange(len(tensor1.shape))) - set(axes1)) + free_axes2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) + new_order1 = free_axes1 + list(axes1) + new_order2 = list(axes2) + free_axes2 + + tr1 = transpose( + tensor=tensor1, + order=new_order1, + permutation=permutation1, + return_permutation=return_permutation) + if return_permutation: + permutation1 = tr1[1] + tr1 = tr1[1] + + trshape1 = tr1.dense_shape + Dl1 = np.prod([trshape1[n] for n in range(len(free_axes1))]) + Dr1 = np.prod([trshape1[n] for n in range(len(free_axes1), len(trshape1))]) + + tmp1 = reshape(tr1, (Dl1, Dr1)) + + tr2 = transpose( + tensor=tensor2, + order=new_order2, + permutation=permutation2, + return_permutation=return_permutation) + if return_permutation: + permutation2 = tr2[1] + tr2 = tr2[1] + trshape2 = tr2.dense_shape + Dl2 = np.prod([trshape2[n] for n in range(len(axes2))]) + Dr2 = np.prod([trshape2[n] for n in range(len(axes2), len(trshape2))]) + + tmp2 = reshape(tr2, (Dl2, Dr2)) + + #avoid data-copying here by setting `return_data=False` + column_charges1, data1, start_positions, row_locations, _ = tmp1._get_diagonal_blocks( + return_data=False) + row_charges2, data2, _, _, column_degeneracies = tmp2._get_diagonal_blocks( + return_data=False) + + #get common charges between rows and columns + tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( + return_counts=True) + common_charges = tmp_charges[cnts == 2] + + #get the flattened indices for the output tensor + indices = [] + indices.extend(tmp1.indices[0].get_elementary_indices()) + indices.extend(tmp2.indices[1].get_elementary_indices()) + index_names = [i.name for i in indices] + unique = np.unique(index_names) + #rename indices if they are not unique + if len(unique) < len(index_names): + for n, i in enumerate(indices): + i.name = 'index_{}'.format(n) + + #initialize the data-vector of the output with zeros + num_nonzero_elements = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + data = np.zeros( + num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) + + for c in common_charges: + rlocs = row_locations[c] + cdegs = column_degeneracies[c] + a = np.expand_dims(start_positions[rlocs], 1) + b = np.expand_dims(np.arange(cdegs), 0) + new_locations = np.reshape(a + b, len(rlocs) * cdegs) + i1 = np.nonzero(column_charges1 == c)[0][0] + i2 = np.nonzero(row_charges2 == c)[0][0] + try: + #place the result of the block-matrix multiplication + #into the new data-vector + data[new_locations] = np.matmul( + np.reshape(tensor1.data[data1[i1][0]], data1[i1][1]), + np.reshape(tensor2.data[data2[i2][0]], data2[i2][1])).flat + except ValueError: + raise ValueError("for quantum number {}, shapes {} and {} " + "of left and right blocks have " + "incompatible shapes".format(c, data1[i1].shape, + data2[i2].shape)) + + out = BlockSparseTensor(data=data, indices=indices) + resulting_shape = [trshape1[n] for n in range(len(free_axes1)) + ] + [trshape2[n] for n in range(len(axes2), len(trshape2))] + out.reshape(resulting_shape) + if return_permutation: + return out, permutation1, permutation2 + return out diff --git a/tensornetwork/block_tensor/block_tensor_new.py b/tensornetwork/block_tensor/block_tensor_new.py deleted file mode 100644 index f0fc333b5..000000000 --- a/tensornetwork/block_tensor/block_tensor_new.py +++ /dev/null @@ -1,1207 +0,0 @@ -# Copyright 2019 The TensorNetwork Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import numpy as np -#from tensornetwork.block_tensor.lookup import lookup -# pylint: disable=line-too-long -from tensornetwork.block_tensor.index_new import Index, fuse_index_pair, split_index -from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, ChargeCollection -import numpy as np -import scipy as sp -import itertools -import time -from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable, Sequence -Tensor = Any - - -def _find_values_in_fused(indices: np.ndarray, left: np.ndarray, - right: np.ndarray) -> np.ndarray: - """ - Returns fuse(left,right)[indices], i.e. the elements - in the fusion of `left` and `right` at positions `indices'. - """ - left_inds, right_inds = np.divmod(indices, len(right)) - return left[left_inds] + right[right_inds] - - -def fuse_ndarray_pair(array1: Union[List, np.ndarray], - array2: Union[List, np.ndarray]) -> np.ndarray: - """ - Fuse ndarrays `array1` and `array2` by kronecker-addition. - Given `array1 = [0,1,2]` and `array2 = [10,100]`, this returns - `[10, 100, 11, 101, 12, 102]`. - - Args: - array1: np.ndarray - array2: np.ndarray - Returns: - np.ndarray: The result of adding `array1` and `array2` - """ - return np.reshape( - np.asarray(array1)[:, None] + np.asarray(array2)[None, :], - len(array1) * len(array2)) - - -def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: - """ - Fuse all `arrays` by simple kronecker addition. - Arrays are fused from "right to left", - Args: - arrays: A list of arrays to be fused. - Returns: - np.ndarray: The result of fusing `charges`. - """ - if len(arrays) == 1: - return arrays[0] - fused_arrays = arrays[0] - for n in range(1, len(arrays)): - fused_arrays = fuse_ndarray_pair(array1=fused_arrays, array2=arrays[n]) - return fused_arrays - - -def _check_flows(flows: List[int]) -> None: - if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): - raise ValueError( - "flows = {} contains values different from 1 and -1".format(flows)) - - -def _find_best_partition( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[int]) -> Tuple[Union[BaseCharge, ChargeCollection], - Union[BaseCharge, ChargeCollection], int]: - """ - compute the best partition for fusing `charges`, i.e. the integer `p` - such that fusing `len(fuse_charges(charges[0:p],flows[0:p]))` is - and `len(fuse_charges(charges[p::],flows[p::]))` are as close as possible. - Returns: - fused_left_charges, fused_right_charges, p - - """ - #FIXME: fusing charges with dims (N,M) with M>~N is faster than fusing charges - # with dims (M,N). Thus, it is not always best to fuse at the minimum cut. - #for example, for dims (1000, 4, 1002), its better to fuse at the cut - #(1000, 4008) than at (4000, 1002), even though the difference between the - #dimensions is minimal for the latter case. We should implement some heuristic - #to find these cuts. - if len(charges) == 1: - raise ValueError( - '_expecting `charges` with a length of at least 2, got `len(charges)={}`' - .format(len(charges))) - dims = np.asarray([len(c) for c in charges]) - diffs = [ - np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) - for n in range(1, len(charges)) - ] - min_inds = np.nonzero(diffs == np.min(diffs))[0] - if len(min_inds) > 1: - right_dims = [np.prod(len(charges[min_ind + 1::])) for min_ind in min_inds] - min_ind = min_inds[np.argmax(right_dims)] - else: - min_ind = min_inds[0] - fused_left_charges = fuse_charges(charges[0:min_ind + 1], - flows[0:min_ind + 1]) - fused_right_charges = fuse_charges(charges[min_ind + 1::], - flows[min_ind + 1::]) - - return fused_left_charges, fused_right_charges, min_ind + 1 - - -def compute_fused_charge_degeneracies( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]] -) -> Tuple[Union[BaseCharge, ChargeCollection], np.ndarray]: - """ - For a list of charges, compute all possible fused charges resulting - from fusing `charges`, together with their respective degeneracies - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - Returns: - Union[BaseCharge, ChargeCollection]: The unique fused charges. - np.ndarray of integers: The degeneracies of each unqiue fused charge. - """ - if len(charges) == 1: - return (charges[0] * flows[0]).unique(return_counts=True) - - # get unique charges and their degeneracies on the first leg. - # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - charges[0] * flows[0]).unique(return_counts=True) - for n in range(1, len(charges)): - #list of unique charges and list of their degeneracies - #on the next unfused leg of the tensor - leg_charges, leg_degeneracies = charges[n].unique(return_counts=True) - #fuse the unique charges - #Note: entries in `fused_charges` are not unique anymore. - #flow1 = 1 because the flow of leg 0 has already been - #mulitplied above - fused_charges = accumulated_charges + leg_charges * flows[n] - #compute the degeneracies of `fused_charges` charges - #`fused_degeneracies` is a list of degeneracies such that - # `fused_degeneracies[n]` is the degeneracy of of - # charge `c = fused_charges[n]`. - fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, - leg_degeneracies) - accumulated_charges = fused_charges.unique() - accumulated_degeneracies = np.empty( - len(accumulated_charges), dtype=np.int64) - - for n in range(len(accumulated_charges)): - accumulated_degeneracies[n] = np.sum( - fused_degeneracies[fused_charges == accumulated_charges[n]]) - - return accumulated_charges, accumulated_degeneracies - - -def compute_num_nonzero(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> int: - """ - Compute the number of non-zero elements, given the meta-data of - a symmetric tensor. - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - Returns: - int: The number of non-zero elements. - """ - accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( - charges, flows) - res = accumulated_charges == accumulated_charges.zero_charge - - if len(np.nonzero(res)[0]) == 0: - raise ValueError( - "given leg-charges `charges` and flows `flows` are incompatible " - "with a symmetric tensor") - return accumulated_degeneracies[res][0] - - -def find_diagonal_sparse_blocks( - data: np.ndarray, - row_charges: List[Union[BaseCharge, ChargeCollection]], - column_charges: List[Union[BaseCharge, ChargeCollection]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = False -) -> Tuple[Union[BaseCharge, ChargeCollection], Dict, np.ndarray, Dict, Dict]: - """ - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. Note that `column_charges` - are never explicitly fused (`row_charges` are). - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the sparse locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - flows = row_flows.copy() - flows.extend(column_flows) - _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) - - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - #`compute_fused_charge_degeneracies` multiplies flows into the column_charges - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - column_charges, column_flows) - #convenience container for storing the degeneracies of each - #column charge - #column_degeneracies = dict(zip(unique_column_charges, column_dims)) - column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - if len(row_charges) > 1: - left_row_charges, right_row_charges, _ = _find_best_partition( - row_charges, row_flows) - unique_left = left_row_charges.unique() - unique_right = right_row_charges.unique() - unique_row_charges = (unique_left + unique_right).unique() - - #get the charges common to rows and columns (only those matter) - concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) - tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[ - counts == 2] #common_charges is a BaseCharge or ChargeCollection - - row_locations = find_sparse_positions( - left_charges=left_row_charges, - left_flow=1, - right_charges=right_row_charges, - right_flow=1, - target_charges=common_charges) - - elif len(row_charges) == 1: - fused_row_charges = fuse_charges(row_charges, row_flows) - - #get the unique row-charges - unique_row_charges, row_dims = fused_row_charges.unique(return_counts=True) - #get the charges common to rows and columns (only those matter) - #get the charges common to rows and columns (only those matter) - concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) - tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[ - counts == 2] #common_charges is a BaseCharge or ChargeCollection - - relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( - common_charges)] - row_locations = {} - for c in common_charges: - #c = common_charges.get_item(n) - row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] - else: - raise ValueError('Found an empty sequence for `row_charges`') - - degeneracy_vector = np.empty( - np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - degeneracy_vector[row_locations[c]] = column_degeneracies[c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - blocks = [] - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - rlocs = row_locations[c] - rlocs.sort() #sort in place (we need it again later) - cdegs = column_degeneracies[c] - a = np.expand_dims(start_positions[rlocs], 1) - b = np.expand_dims(np.arange(cdegs), 0) - inds = np.reshape(a + b, len(rlocs) * cdegs) - if not return_data: - blocks.append([inds, (len(rlocs), cdegs)]) - else: - blocks.append(np.reshape(data[inds], (len(rlocs), cdegs))) - return common_charges, blocks, start_positions, row_locations, column_degeneracies - - -def find_dense_positions( - left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, - right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, - target_charge: Union[BaseCharge, ChargeCollection]) -> np.ndarray: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the all different blocks - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - together with their corresponding index-values of the data in the dense array. - `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` - to an array of integers. - For the above example, we get: - * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` - was obtained from fusing -2 and 2. - * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, - `fused_charges[5,13,17]` were obtained from fusing 0 and 0. - * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` - was obtained from fusing 1 and -1. - Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. - Returns: - np.ndarray: The indices of the elements fusing to `target_charge`. - """ - _check_flows([left_flow, right_flow]) - unique_left, left_degeneracies = left_charges.unique(return_counts=True) - unique_right, right_degeneracies = right_charges.unique(return_counts=True) - - tmp_charges = (target_charge + (unique_right * right_flow * (-1))) * left_flow - concatenated = unique_left.concatenate(tmp_charges) - tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[ - counts == 2] #common_charges is a BaseCharge or ChargeCollection - right_locations = {} - for n in range(len(common_charges)): - c = common_charges[n] - - right_charge = (target_charge + (c * left_flow * (-1))) * right_flow - right_locations[right_charge.get_item(0)] = np.nonzero( - right_charges == right_charge)[0] - - len_right_charges = len(right_charges) - indices = [] - for n in range(len(left_charges)): - c = left_charges[n] - right_charge = (target_charge + (c * left_flow * (-1))) * right_flow - - if c not in common_charges: - continue - indices.append(n * len_right_charges + - right_locations[right_charge.get_item(0)]) - - return np.concatenate(indices) - - -def find_sparse_positions( - left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, - right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, - target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: - """ - Find the sparse locations of elements (i.e. the index-values within - the SPARSE tensor) in the vector `fused_charges` (resulting from - fusing `left_charges` and `right_charges`) - that have a value of `target_charges`, assuming that all elements - different from `target_charges` are `0`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charges = [0,1] - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` 0 1 2 3 4 5 6 7 8 - we want to find the all different blocks - that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, - together with their corresponding sparse index-values of the data in the sparse array, - assuming that all elements in `fused_charges` different from `target_charges` are 0. - - `find_sparse_blocks` returns a dict mapping integers `target_charge` - to an array of integers denoting the sparse locations of elements within - `fused_charges`. - For the above example, we get: - * `target_charge=0`: [0,1,3,5,7] - * `target_charge=1`: [2,4,6,8] - Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. - Returns: - dict: Mapping integers to np.ndarray of integers. - """ - #FIXME: this is probably still not optimal - - _check_flows([left_flow, right_flow]) - target_charges = target_charges.unique() - unique_left = left_charges.unique() - unique_right = right_charges.unique() - fused = unique_left * left_flow + unique_right * right_flow - - #compute all unique charges that can add up to - #target_charges - left_inds, right_inds = [], [] - for target_charge in target_charges: - li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) - left_inds.append(li) - right_inds.append(ri) - - #now compute the relevant unique left and right charges - unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] - unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] - - #only keep those charges that are relevant - relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] - relevant_right_charges = right_charges[right_charges.isin( - unique_right_charges)] - - unique_right_charges, right_dims = relevant_right_charges.unique( - return_counts=True) - right_degeneracies = dict(zip(unique_right_charges, right_dims)) - #generate a degeneracy vector which for each value r in relevant_right_charges - #holds the corresponding number of non-zero elements `relevant_right_charges` - #that can add up to `target_charges`. - degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) - right_indices = {} - - for n in range(len(unique_left_charges)): - left_charge = unique_left_charges[n] - total_charge = left_charge * left_flow + unique_right_charges * right_flow - total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) - tmp_relevant_right_charges = relevant_right_charges[ - relevant_right_charges.isin( - (target_charges + left_charge * ((-1) * left_flow)) * right_flow)] - - for n in range(len(target_charges)): - target_charge = target_charges[n] - right_indices[( - left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + left_charge * ( - (-1) * left_flow)) * right_flow)[0] - - degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy - - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - blocks = {t: [] for t in target_charges} - # iterator returns tuple of `int` for ChargeCollection objects - # and `int` for Ba seCharge objects (both hashable) - for left_charge in unique_left_charges: - a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) - for target_charge in target_charges: - ri = right_indices[(left_charge, target_charge)] - if len(ri) != 0: - b = np.expand_dims(ri, 1) - tmp = a + b - blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) - out = {} - for target_charge in target_charges: - out[target_charge] = np.concatenate(blocks[target_charge]) - return out - - -def compute_dense_to_sparse_mapping( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]], - target_charge: Union[BaseCharge, ChargeCollection]) -> List[np.ndarray]: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - list of np.ndarray: A list of length `r`, with `r` the rank of the tensor. - Each element in the list is an N-dimensional np.ndarray of int, - with `N` the number of non-zero elements. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - #note: left_charges and right_charges have been fused from RIGHT to LEFT - left_charges, right_charges, partition = _find_best_partition(charges, flows) - nz_indices = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=target_charge) - - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - return np.unravel_index(nz_indices, dims) - - -class BlockSparseTensor: - """ - Minimal class implementation of block sparsity. - The class design follows Glen's proposal (Design 0). - The class currently only supports a single U(1) symmetry - and only numpy.ndarray. - - Attributes: - * self.data: A 1d np.ndarray storing the underlying - data of the tensor - * self.charges: A list of `np.ndarray` of shape - (D,), where D is the bond dimension. Once we go beyond - a single U(1) symmetry, this has to be updated. - - * self.flows: A list of integers of length `k`. - `self.flows` determines the flows direction of charges - on each leg of the tensor. A value of `-1` denotes - outflowing charge, a value of `1` denotes inflowing - charge. - - The tensor data is stored in self.data, a 1d np.ndarray. - """ - - def copy(self): - return BlockSparseTensor(self.data.copy(), [i.copy() for i in self.indices]) - - def __init__(self, data: np.ndarray, indices: List[Index]) -> None: - """ - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - indices: List of `Index` objecst, one for each leg. - """ - for n, i in enumerate(indices): - if i is None: - i.name = 'index_{}'.format(n) - - index_names = [ - i.name if i.name else 'index_{}'.format(n) - for n, i in enumerate(indices) - ] - unique, cnts = np.unique(index_names, return_counts=True) - if np.any(cnts > 1): - raise ValueError("Index names {} appeared multiple times. " - "Please rename indices uniquely.".format( - unique[cnts > 1])) - - self.indices = indices - _check_flows(self.flows) - num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) - - if num_non_zero_elements != len(data.flat): - raise ValueError("number of tensor elements {} defined " - "by `charges` is different from" - " len(data)={}".format(num_non_zero_elements, - len(data.flat))) - - self.data = np.asarray(data.flat) #do not copy data - - @classmethod - def randn(cls, indices: List[Index], - dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": - """ - Initialize a random symmetric tensor from random normal distribution. - Args: - indices: List of `Index` objecst, one for each leg. - dtype: An optional numpy dtype. The dtype of the tensor - Returns: - BlockSparseTensor - """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] - num_non_zero_elements = compute_num_nonzero(charges, flows) - backend = backend_factory.get_backend('numpy') - data = backend.randn((num_non_zero_elements,), dtype=dtype) - return cls(data=data, indices=indices) - - @classmethod - def ones(cls, indices: List[Index], - dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": - """ - Initialize a symmetric tensor with ones. - Args: - indices: List of `Index` objecst, one for each leg. - dtype: An optional numpy dtype. The dtype of the tensor - Returns: - BlockSparseTensor - """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] - num_non_zero_elements = compute_num_nonzero(charges, flows) - backend = backend_factory.get_backend('numpy') - data = backend.ones((num_non_zero_elements,), dtype=dtype) - return cls(data=data, indices=indices) - - @classmethod - def zeros(cls, indices: List[Index], - dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": - """ - Initialize a symmetric tensor with zeros. - Args: - indices: List of `Index` objecst, one for each leg. - dtype: An optional numpy dtype. The dtype of the tensor - Returns: - BlockSparseTensor - """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] - num_non_zero_elements = compute_num_nonzero(charges, flows) - backend = backend_factory.get_backend('numpy') - data = backend.zeros((num_non_zero_elements,), dtype=dtype) - return cls(data=data, indices=indices) - - @classmethod - def random(cls, indices: List[Index], - dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": - """ - Initialize a random symmetric tensor from random normal distribution. - Args: - indices: List of `Index` objecst, one for each leg. - dtype: An optional numpy dtype. The dtype of the tensor - Returns: - BlockSparseTensor - """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] - - num_non_zero_elements = compute_num_nonzero(charges, flows) - dtype = dtype if dtype is not None else self.np.float64 - - def init_random(): - if ((np.dtype(dtype) is np.dtype(np.complex128)) or - (np.dtype(dtype) is np.dtype(np.complex64))): - return np.random.rand(num_non_zero_elements).astype( - dtype) - 0.5 + 1j * ( - np.random.rand(num_non_zero_elements).astype(dtype) - 0.5) - return np.random.randn(num_non_zero_elements).astype(dtype) - 0.5 - - return cls(data=init_random(), indices=indices) - - @property - def index_names(self): - return [i.name for i in self.indices] - - @property - def rank(self): - return len(self.indices) - - @property - def dense_shape(self) -> Tuple: - """ - The dense shape of the tensor. - Returns: - Tuple: A tuple of `int`. - """ - return tuple([i.dimension for i in self.indices]) - - @property - def shape(self) -> Tuple: - """ - The sparse shape of the tensor. - Returns: - Tuple: A tuple of `Index` objects. - """ - return tuple(self.indices) - - @property - def dtype(self) -> Type[np.number]: - return self.data.dtype - - @property - def flows(self): - return [i.flow for i in self.indices] - - @property - def charges(self): - return [i.charges for i in self.indices] - - def transpose( - self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order`. This routine currently shuffles - data. - Args: - order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. - Returns: - BlockSparseTensor: The transposed tensor. - """ - if (permutation is not None) and (len(permutation) != len(self.data)): - raise ValueError("len(permutation) != len(tensor.data).") - - if len(order) != self.rank: - raise ValueError( - "`len(order)={}` is different form `self.rank={}`".format( - len(order), self.rank)) - - #check for trivial permutation - if np.all(order == np.arange(len(order))): - if return_permutation: - return np.arange(len(self.data)) - return - - #we use elementary indices here because it is - #more efficient to get the fused charges using - #the best partition - if permutation is None: - elementary_indices = {} - flat_elementary_indices = [] - for n in range(len(self.indices)): - elementary_indices[n] = self.indices[n].get_elementary_indices() - flat_elementary_indices.extend(elementary_indices[n]) - flat_index_list = np.arange(len(flat_elementary_indices)) - cum_num_legs = np.append( - 0, - np.cumsum( - [len(elementary_indices[n]) for n in range(len(self.indices))])) - - flat_charges = [i.charges for i in flat_elementary_indices] - flat_flows = [i.flow for i in flat_elementary_indices] - flat_dims = [len(c) for c in flat_charges] - flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - flat_order = np.concatenate( - [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - flat_charges, flat_flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, - 1, - right_charges, - 1, - target_charge=flat_charges[0].zero_charge) - flat_tr_charges = [flat_charges[n] for n in flat_order] - flat_tr_flows = [flat_flows[n] for n in flat_order] - flat_tr_strides = [flat_strides[n] for n in flat_order] - flat_tr_dims = [flat_dims[n] for n in flat_order] - - tr_left_charges, tr_right_charges, partition = _find_best_partition( - flat_tr_charges, flat_tr_flows) - tr_linear_positions = find_dense_positions( - tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) - stride_arrays = [ - np.arange(flat_tr_dims[n]) * flat_tr_strides[n] - for n in range(len(flat_tr_dims)) - ] - - dense_permutation = _find_values_in_fused( - tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), - fuse_ndarrays(stride_arrays[partition::])) - assert np.all(np.sort(dense_permutation) == linear_positions) - permutation = np.searchsorted(linear_positions, dense_permutation) - - self.indices = [self.indices[n] for n in order] - self.data = self.data[permutation] - if return_permutation: - return permutation - - def reset_shape(self) -> None: - """ - Bring the tensor back into its elementary shape. - """ - self.indices = self.get_elementary_indices() - - def get_elementary_indices(self) -> List: - """ - Compute the elementary indices of the array. - """ - elementary_indices = [] - for i in self.indices: - elementary_indices.extend(i.get_elementary_indices()) - - return elementary_indices - - def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: - """ - Reshape `tensor` into `shape` in place. - `BlockSparseTensor.reshape` works essentially the same as the dense - version, with the notable exception that the tensor can only be - reshaped into a form compatible with its elementary indices. - The elementary indices are the indices at the leaves of the `Index` - objects `tensors.indices`. - For example, while the following reshaping is possible for regular - dense numpy tensor, - ``` - A = np.random.rand(6,6,6) - np.reshape(A, (2,3,6,6)) - ``` - the same code for BlockSparseTensor - ``` - q1 = np.random.randint(0,10,6) - q2 = np.random.randint(0,10,6) - q3 = np.random.randint(0,10,6) - i1 = Index(charges=q1,flow=1) - i2 = Index(charges=q2,flow=-1) - i3 = Index(charges=q3,flow=1) - A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(A.shape) #prints (6,6,6) - A.reshape((2,3,6,6)) #raises ValueError - ``` - raises a `ValueError` since (2,3,6,6) - is incompatible with the elementary shape (6,6,6) of the tensor. - - Args: - tensor: A symmetric tensor. - shape: The new shape. Can either be a list of `Index` - or a list of `int`. - Returns: - BlockSparseTensor: A new tensor reshaped into `shape` - """ - dense_shape = [] - for s in shape: - if isinstance(s, Index): - dense_shape.append(s.dimension) - else: - dense_shape.append(s) - # a few simple checks - if np.prod(dense_shape) != np.prod(self.dense_shape): - raise ValueError("A tensor with {} elements cannot be " - "reshaped into a tensor with {} elements".format( - np.prod(self.shape), np.prod(dense_shape))) - - #keep a copy of the old indices for the case where reshaping fails - #FIXME: this is pretty hacky! - index_copy = [i.copy() for i in self.indices] - - def raise_error(): - #if this error is raised then `shape` is incompatible - #with the elementary indices. We then reset the shape - #to what is was before the call to `reshape`. - self.indices = index_copy - elementary_indices = [] - for i in self.indices: - elementary_indices.extend(i.get_elementary_indices()) - raise ValueError("The shape {} is incompatible with the " - "elementary shape {} of the tensor.".format( - dense_shape, - tuple([e.dimension for e in elementary_indices]))) - - self.reset_shape() #bring tensor back into its elementary shape - for n in range(len(dense_shape)): - if dense_shape[n] > self.dense_shape[n]: - while dense_shape[n] > self.dense_shape[n]: - #fuse indices - i1, i2 = self.indices.pop(n), self.indices.pop(n) - #note: the resulting flow is set to one since the flow - #is multiplied into the charges. As a result the tensor - #will then be invariant in any case. - self.indices.insert(n, fuse_index_pair(i1, i2)) - if self.dense_shape[n] > dense_shape[n]: - raise_error() - elif dense_shape[n] < self.dense_shape[n]: - raise_error() - #at this point the first len(dense_shape) indices of the tensor - #match the `dense_shape`. - while len(dense_shape) < len(self.indices): - i2, i1 = self.indices.pop(), self.indices.pop() - self.indices.append(fuse_index_pair(i1, i2)) - - def _get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: - """ - Obtain the diagonal blocks of a symmetric matrix. - BlockSparseTensor has to be a matrix. - This routine avoids explicit fusion of row or column charges. - - Args: - return_data: If `True`, the returned dictionary maps quantum numbers `q` to - an actual `np.ndarray` containing the data of block `q`. - If `False`, the returned dict maps quantum numbers `q` to a list - `[locations, shape]`, where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within `self.data`, i.e. - `self.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: If `return_data=True`: Dictionary mapping charge `q` to an - np.ndarray of rank 2 (a matrix). - If `return_data=False`: Dictionary mapping charge `q` to a - list `[locations, shape]`, where `locations` is an np.ndarray of type - np.int64 containing the locations of the tensor elements within `self.data` - - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - row_indices = self.indices[0].get_elementary_indices() - column_indices = self.indices[1].get_elementary_indices() - - return find_diagonal_sparse_blocks( - data=self.data, - row_charges=[i.charges for i in row_indices], - column_charges=[i.charges for i in column_indices], - row_flows=[i.flow for i in row_indices], - column_flows=[i.flow for i in column_indices], - return_data=return_data) - - -def reshape(tensor: BlockSparseTensor, - shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: - """ - Reshape `tensor` into `shape`. - `reshape` works essentially the same as the dense version, with the - notable exception that the tensor can only be reshaped into a form - compatible with its elementary indices. The elementary indices are - the indices at the leaves of the `Index` objects `tensors.indices`. - For example, while the following reshaping is possible for regular - dense numpy tensor, - ``` - A = np.random.rand(6,6,6) - np.reshape(A, (2,3,6,6)) - ``` - the same code for BlockSparseTensor - ``` - q1 = np.random.randint(0,10,6) - q2 = np.random.randint(0,10,6) - q3 = np.random.randint(0,10,6) - i1 = Index(charges=q1,flow=1) - i2 = Index(charges=q2,flow=-1) - i3 = Index(charges=q3,flow=1) - A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(nA.shape) #prints (6,6,6) - reshape(A, (2,3,6,6)) #raises ValueError - ``` - raises a `ValueError` since (2,3,6,6) - is incompatible with the elementary shape (6,6,6) of the tensor. - - Args: - tensopr: A symmetric tensor. - shape: The new shape. Can either be a list of `Index` - or a list of `int`. - Returns: - BlockSparseTensor: A new tensor reshaped into `shape` - """ - result = BlockSparseTensor( - data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) - result.reshape(shape) - return result - - -def transpose( - tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": - """ - Transpose `tensor` into the new order `order`. This routine currently shuffles - data. - Args: - tensor: The tensor to be transposed. - order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. - Returns: - if `return_permutation == False`: - BlockSparseTensor: The transposed tensor. - if `return_permutation == True`: - BlockSparseTensor, permutation: The transposed tensor - and the permutation data - - """ - if (permutation is not None) and (len(permutation) != len(tensor.data)): - raise ValueError("len(permutation) != len(tensor.data).") - result = tensor.copy() - inds = result.transpose(order, permutation, return_permutation) - if return_permutation: - return result, inds - return result - - -def tensordot(tensor1: BlockSparseTensor, - tensor2: BlockSparseTensor, - axes: Sequence[Sequence[int]], - permutation1: Optional[np.ndarray] = None, - permutation2: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False): - """ - Contract two `BlockSparseTensor`s along `axes`. - Args: - tensor1: First tensor. - tensor2: Second tensor. - axes: The axes to contract. - permutation1: Permutation data for `tensor1`. - permutation2: Permutation data for `tensor2`. - return_permutation: If `True`, return the the permutation data. - Returns: - if `return_permutation == False`: - BlockSparseTensor: The result of contracting `tensor1` and `tensor2`. - if `return_permutation == True`: - BlockSparseTensor, np.ndarrays, np.ndarray: The result of - contracting `tensor1` and `tensor2`, together with their respective - permutation data. - - """ - axes1 = axes[0] - axes2 = axes[1] - if not np.all(np.unique(axes1) == np.sort(axes1)): - raise ValueError( - "Some values in axes[0] = {} appear more than once!".format(axes1)) - if not np.all(np.unique(axes2) == np.sort(axes2)): - raise ValueError( - "Some values in axes[1] = {} appear more than once!".format(axes2n)) - - if max(axes1) >= len(tensor1.shape): - raise ValueError( - "rank of `tensor1` is smaller than `max(axes1) = {}.`".format( - max(axes1))) - elementary_1, elementary_2 = [], [] - for a in axes1: - elementary_1.extend(tensor1.indices[a].get_elementary_indices()) - for a in axes2: - elementary_2.extend(tensor2.indices[a].get_elementary_indices()) - - if len(elementary_2) != len(elementary_1): - raise ValueError("axes1 and axes2 have incompatible elementary" - " shapes {} and {}".format(elementary_1, elementary_2)) - if not np.all( - np.array([i.flow for i in elementary_1]) == - (-1) * np.array([i.flow for i in elementary_2])): - raise ValueError("axes1 and axes2 have incompatible elementary" - " flows {} and {}".format( - np.array([i.flow for i in elementary_1]), - np.array([i.flow for i in elementary_2]))) - - if max(axes2) >= len(tensor2.shape): - raise ValueError( - "rank of `tensor2` is smaller than `max(axes2) = {}`".format( - max(axes1))) - free_axes1 = sorted(set(np.arange(len(tensor1.shape))) - set(axes1)) - free_axes2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) - new_order1 = free_axes1 + list(axes1) - new_order2 = list(axes2) + free_axes2 - - tr1 = transpose( - tensor=tensor1, - order=new_order1, - permutation=permutation1, - return_permutation=return_permutation) - if return_permutation: - permutation1 = tr1[1] - tr1 = tr1[1] - - trshape1 = tr1.dense_shape - Dl1 = np.prod([trshape1[n] for n in range(len(free_axes1))]) - Dr1 = np.prod([trshape1[n] for n in range(len(free_axes1), len(trshape1))]) - - tmp1 = reshape(tr1, (Dl1, Dr1)) - - tr2 = transpose( - tensor=tensor2, - order=new_order2, - permutation=permutation2, - return_permutation=return_permutation) - if return_permutation: - permutation2 = tr2[1] - tr2 = tr2[1] - trshape2 = tr2.dense_shape - Dl2 = np.prod([trshape2[n] for n in range(len(axes2))]) - Dr2 = np.prod([trshape2[n] for n in range(len(axes2), len(trshape2))]) - - tmp2 = reshape(tr2, (Dl2, Dr2)) - - #avoid data-copying here by setting `return_data=False` - column_charges1, data1, start_positions, row_locations, _ = tmp1._get_diagonal_blocks( - return_data=False) - row_charges2, data2, _, _, column_degeneracies = tmp2._get_diagonal_blocks( - return_data=False) - - #get common charges between rows and columns - tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( - return_counts=True) - common_charges = tmp_charges[cnts == 2] - - #get the flattened indices for the output tensor - indices = [] - indices.extend(tmp1.indices[0].get_elementary_indices()) - indices.extend(tmp2.indices[1].get_elementary_indices()) - index_names = [i.name for i in indices] - unique = np.unique(index_names) - #rename indices if they are not unique - if len(unique) < len(index_names): - for n, i in enumerate(indices): - i.name = 'index_{}'.format(n) - - #initialize the data-vector of the output with zeros - num_nonzero_elements = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) - data = np.zeros( - num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) - - for c in common_charges: - rlocs = row_locations[c] - cdegs = column_degeneracies[c] - a = np.expand_dims(start_positions[rlocs], 1) - b = np.expand_dims(np.arange(cdegs), 0) - new_locations = np.reshape(a + b, len(rlocs) * cdegs) - i1 = np.nonzero(column_charges1 == c)[0][0] - i2 = np.nonzero(row_charges2 == c)[0][0] - try: - #place the result of the block-matrix multiplication - #into the new data-vector - data[new_locations] = np.matmul( - np.reshape(tensor1.data[data1[i1][0]], data1[i1][1]), - np.reshape(tensor2.data[data2[i2][0]], data2[i2][1])).flat - except ValueError: - raise ValueError("for quantum number {}, shapes {} and {} " - "of left and right blocks have " - "incompatible shapes".format(c, data1[i1].shape, - data2[i2].shape)) - - out = BlockSparseTensor(data=data, indices=indices) - resulting_shape = [trshape1[n] for n in range(len(free_axes1)) - ] + [trshape2[n] for n in range(len(axes2), len(trshape2))] - out.reshape(resulting_shape) - if return_permutation: - return out, permutation1, permutation2 - return out diff --git a/tensornetwork/block_tensor/block_tensor_new_test.py b/tensornetwork/block_tensor/block_tensor_new_test.py deleted file mode 100644 index 0ed921b0d..000000000 --- a/tensornetwork/block_tensor/block_tensor_new_test.py +++ /dev/null @@ -1,333 +0,0 @@ -import numpy as np -import pytest - -from tensornetwork.block_tensor.charge import U1Charge, ChargeCollection -from tensornetwork.block_tensor.index_new import Index -from tensornetwork.block_tensor.block_tensor_new import find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions, find_dense_positions, BlockSparseTensor - -np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] - - -def test_test_num_nonzero_consistency(): - B = 4 - D = 100 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] - flows = [1, 1, 1, -1] - n1 = compute_num_nonzero(charges1, flows) - n2 = compute_num_nonzero(charges3, flows) - n3 = compute_num_nonzero(charges3, flows) - assert n1 == n2 - - -def test_find_sparse_positions_consistency(): - B = 4 - D = 100 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] - - data1 = find_sparse_positions( - left_charges=charges1[0] + charges1[1], - left_flow=1, - right_charges=charges1[2] + charges1[3], - right_flow=1, - target_charges=charges1[0].zero_charge) - data2 = find_sparse_positions( - left_charges=charges2[0] + charges2[1], - left_flow=1, - right_charges=charges2[2] + charges2[3], - right_flow=1, - target_charges=charges2[0].zero_charge) - data3 = find_sparse_positions( - left_charges=charges3[0] + charges3[1], - left_flow=1, - right_charges=charges3[2] + charges3[3], - right_flow=1, - target_charges=charges3[0].zero_charge) - - nz1 = np.asarray(list(data1.values())[0]) - nz2 = np.asarray(list(data2.values())[0]) - nz3 = np.asarray(list(data3.values())[0]) - assert np.all(nz1 == nz2) - assert np.all(nz1 == nz3) - - -def test_find_dense_positions_consistency(): - B = 5 - D = 20 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] - flows = [1, 1, 1, -1] - data1 = find_dense_positions( - left_charges=charges1[0] * flows[0] + charges1[1] * flows[0], - left_flow=1, - right_charges=charges1[2] * flows[2] + charges1[3] * flows[3], - right_flow=1, - target_charge=charges1[0].zero_charge) - data2 = find_dense_positions( - left_charges=charges2[0] * flows[0] + charges2[1] * flows[1], - left_flow=1, - right_charges=charges2[2] * flows[2] + charges2[3] * flows[3], - right_flow=1, - target_charge=charges2[0].zero_charge) - data3 = find_dense_positions( - left_charges=charges3[0] * flows[0] + charges3[1] * flows[1], - left_flow=1, - right_charges=charges3[2] * flows[2] + charges3[3] * flows[3], - right_flow=1, - target_charge=charges3[0].zero_charge) - - nz = compute_num_nonzero(charges1, flows) - assert nz == len(data1) - assert len(data1) == len(data2) - assert len(data1) == len(data3) - - -def test_find_diagonal_sparse_blocks_consistency(): - B = 5 - D = 20 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] - - data1 = find_diagonal_sparse_blocks( - data=[], - row_charges=[charges1[0], charges1[1]], - column_charges=[charges1[2], charges1[3]], - row_flows=[1, 1], - column_flows=[1, -1]) - - data2 = find_diagonal_sparse_blocks( - data=[], - row_charges=[charges2[0], charges2[1]], - column_charges=[charges2[2], charges2[3]], - row_flows=[1, 1], - column_flows=[1, -1]) - data3 = find_diagonal_sparse_blocks( - data=[], - row_charges=[charges3[0], charges3[1]], - column_charges=[charges3[2], charges3[3]], - row_flows=[1, 1], - column_flows=[1, -1]) - keys1 = np.sort(np.asarray(list(data1.keys()))) - keys2 = np.squeeze(np.sort(np.asarray(list(data2.keys())))) - keys3 = np.sort([np.left_shift(c[0], 16) + c[1] for c in data3.keys()]) - - assert np.all(keys1 == keys2) - assert np.all(keys1 == keys3) - - -# @pytest.mark.parametrize("dtype", np_dtypes) -# def test_block_sparse_init(dtype): -# D = 10 #bond dimension -# B = 10 #number of blocks -# rank = 4 -# flows = np.asarray([1 for _ in range(rank)]) -# flows[-2::] = -1 -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) -# for _ in range(rank) -# ] -# indices = [ -# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# num_elements = compute_num_nonzero([i.charges for i in indices], -# [i.flow for i in indices]) -# A = BlockSparseTensor.random(indices=indices, dtype=dtype) -# assert A.dtype == dtype -# for r in range(rank): -# assert A.indices[r].name == 'index{}'.format(r) -# assert A.dense_shape == tuple([D] * rank) -# assert len(A.data) == num_elements - -# def test_find_dense_positions(): -# left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) -# right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) -# target_charge = 0 -# fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) -# dense_positions = find_dense_positions(left_charges, 1, right_charges, 1, -# target_charge) -# np.testing.assert_allclose(dense_positions, -# np.nonzero(fused_charges == target_charge)[0]) - -# def test_find_dense_positions_2(): -# D = 40 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# rank = 4 -# flows = np.asarray([1 for _ in range(rank)]) -# flows[-2::] = -1 -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# for _ in range(rank) -# ] -# indices = [ -# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# n1 = compute_num_nonzero([i.charges for i in indices], -# [i.flow for i in indices]) -# row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], -# [1 for _ in range(rank // 2)]) -# column_charges = fuse_charges( -# [indices[n].charges for n in range(rank // 2, rank)], -# [1 for _ in range(rank // 2, rank)]) - -# i01 = indices[0] * indices[1] -# i23 = indices[2] * indices[3] -# positions = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) -# assert len(positions) == n1 - -# def test_find_sparse_positions(): -# D = 40 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# rank = 4 -# flows = np.asarray([1 for _ in range(rank)]) -# flows[-2::] = -1 -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# for _ in range(rank) -# ] -# indices = [ -# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# n1 = compute_num_nonzero([i.charges for i in indices], -# [i.flow for i in indices]) -# row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], -# [1 for _ in range(rank // 2)]) -# column_charges = fuse_charges( -# [indices[n].charges for n in range(rank // 2, rank)], -# [1 for _ in range(rank // 2, rank)]) - -# i01 = indices[0] * indices[1] -# i23 = indices[2] * indices[3] -# unique_row_charges = np.unique(i01.charges) -# unique_column_charges = np.unique(i23.charges) -# common_charges = np.intersect1d( -# unique_row_charges, -unique_column_charges, assume_unique=True) -# blocks = find_sparse_positions( -# i01.charges, 1, i23.charges, 1, target_charges=[0]) -# assert sum([len(v) for v in blocks.values()]) == n1 -# np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) - -# def test_find_sparse_positions_2(): -# D = 40 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# flows = [1, -1] - -# rank = len(flows) -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# for _ in range(rank) -# ] -# indices = [ -# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# i1, i2 = indices -# common_charges = np.intersect1d(i1.charges, i2.charges) -# row_locations = find_sparse_positions( -# left_charges=i1.charges, -# left_flow=flows[0], -# right_charges=i2.charges, -# right_flow=flows[1], -# target_charges=common_charges) -# fused = (i1 * i2).charges -# relevant = fused[np.isin(fused, common_charges)] -# for k, v in row_locations.items(): -# np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) - -# def test_get_diagonal_blocks(): -# D = 40 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# rank = 4 -# flows = np.asarray([1 for _ in range(rank)]) -# flows[-2::] = -1 -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# for _ in range(rank) -# ] -# indices = [ -# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# common_charges = np.intersect1d(indices[0].charges, indices[1].charges) -# row_locations = find_sparse_positions( -# left_charges=indices[0].charges, -# left_flow=1, -# right_charges=indices[1].charges, -# right_flow=1, -# target_charges=common_charges) - - -def test_dense_transpose(): - Ds = [10, 11, 12] #bond dimension - rank = len(Ds) - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - A = BlockSparseTensor.random(indices=indices, dtype=np.float64) - B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) - A.transpose((1, 0, 2)) - np.testing.assert_allclose(A.data, B.flat) - - B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) - A.transpose((1, 0, 2)) - - np.testing.assert_allclose(A.data, B.flat) diff --git a/tensornetwork/block_tensor/block_tensor_old.py b/tensornetwork/block_tensor/block_tensor_old.py new file mode 100644 index 000000000..c552a184a --- /dev/null +++ b/tensornetwork/block_tensor/block_tensor_old.py @@ -0,0 +1,1675 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +#from tensornetwork.block_tensor.lookup import lookup +# pylint: disable=line-too-long +from tensornetwork.network_components import Node, contract, contract_between +from tensornetwork.backends import backend_factory +# pylint: disable=line-too-long +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges, unfuse +import numpy as np +import scipy as sp +import itertools +import time +from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable +Tensor = Any + + +def _check_flows(flows) -> None: + if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): + raise ValueError( + "flows = {} contains values different from 1 and -1".format(flows)) + + +def _find_best_partition(charges, flows): + if len(charges) == 1: + raise ValueError( + '_expecting `charges` with a length of at least 2, got `len(charges)={}`' + .format(len(charges))) + dims = np.asarray([len(c) for c in charges]) + min_ind = np.argmin([ + np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) + for n in range(1, len(charges)) + ]) + fused_left_charges = fuse_charges(charges[0:min_ind + 1], + flows[0:min_ind + 1]) + fused_right_charges = fuse_charges(charges[min_ind + 1::], + flows[min_ind + 1::]) + + return fused_left_charges, fused_right_charges, min_ind + 1 + + +def map_to_integer(dims: Union[List, np.ndarray], + table: np.ndarray, + dtype: Optional[Type[np.number]] = np.int64): + """ + Map a `table` of integers of shape (N, r) bijectively into + an np.ndarray `integers` of length N of unique numbers. + The mapping is done using + ``` + `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` + + Args: + dims: An iterable of integers. + table: An array of shape (N,r) of integers. + dtype: An optional dtype used for the conversion. + Care should be taken when choosing this to avoid overflow issues. + Returns: + np.ndarray: An array of integers. + """ + converter_table = np.expand_dims( + np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) + tmp = table * converter_table + integers = np.sum(tmp, axis=1) + return integers + + +def compute_fused_charge_degeneracies(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> Dict: + """ + For a list of charges, compute all possible fused charges resulting + from fusing `charges`, together with their respective degeneracyn + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + dict: Mapping fused charges (int) to degeneracies (int) + """ + if len(charges) == 1: + return np.unique(flows[0] * charges[0], return_counts=True) + + # get unique charges and their degeneracies on the first leg. + # We are fusing from "left" to "right". + accumulated_charges, accumulated_degeneracies = np.unique( + charges[0], return_counts=True) + #multiply the flow into the charges of first leg + accumulated_charges *= flows[0] + for n in range(1, len(charges)): + #list of unique charges and list of their degeneracies + #on the next unfused leg of the tensor + leg_charges, leg_degeneracies = np.unique(charges[n], return_counts=True) + + #fuse the unique charges + #Note: entries in `fused_charges` are not unique anymore. + #flow1 = 1 because the flow of leg 0 has already been + #mulitplied above + fused_charges = fuse_charge_pair( + q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n]) + #compute the degeneracies of `fused_charges` charges + #`fused_degeneracies` is a list of degeneracies such that + # `fused_degeneracies[n]` is the degeneracy of of + # charge `c = fused_charges[n]`. + fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, + leg_degeneracies) + #compute the new degeneracies resulting from fusing + #`accumulated_charges` and `leg_charges_2` + accumulated_charges = np.unique(fused_charges) + accumulated_degeneracies = np.empty( + len(accumulated_charges), dtype=np.int64) + for n in range(len(accumulated_charges)): + accumulated_degeneracies[n] = np.sum( + fused_degeneracies[fused_charges == accumulated_charges[n]]) + return accumulated_charges, accumulated_degeneracies + + +def compute_num_nonzero(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> int: + """ + Compute the number of non-zero elements, given the meta-data of + a symmetric tensor. + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + int: The number of non-zero elements. + """ + accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( + charges, flows) + if len(np.nonzero(accumulated_charges == 0)[0]) == 0: + raise ValueError( + "given leg-charges `charges` and flows `flows` are incompatible " + "with a symmetric tensor") + return accumulated_degeneracies[accumulated_charges == 0][0] + + +def compute_nonzero_block_shapes(charges: List[np.ndarray], + flows: List[Union[bool, int]]) -> Dict: + """ + Compute the blocks and their respective shapes of a symmetric tensor, + given its meta-data. + Args: + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + dict: Dictionary mapping a tuple of charges to a shape tuple. + Each element corresponds to a non-zero valued block of the tensor. + """ + #FIXME: this routine is slow + _check_flows(flows) + degeneracies = [] + unique_charges = [] + rank = len(charges) + #find the unique quantum numbers and their degeneracy on each leg + for leg in range(rank): + c, d = np.unique(charges[leg], return_counts=True) + unique_charges.append(c) + degeneracies.append(dict(zip(c, d))) + + #find all possible combination of leg charges c0, c1, ... + #(with one charge per leg 0, 1, ...) + #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0 + charge_combinations = list( + itertools.product(*[ + unique_charges[leg] * flows[leg] + for leg in range(len(unique_charges)) + ])) + net_charges = np.array([np.sum(c) for c in charge_combinations]) + zero_idxs = np.nonzero(net_charges == 0)[0] + charge_shape_dict = {} + for idx in zero_idxs: + c = charge_combinations[idx] + shapes = [degeneracies[leg][flows[leg] * c[leg]] for leg in range(rank)] + charge_shape_dict[c] = shapes + return charge_shape_dict + + +def find_diagonal_sparse_blocks(data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. Note that `column_charges` + are never explicitly fused (`row_charges` are). + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the sparse locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + flows = row_flows.copy() + flows.extend(column_flows) + _check_flows(flows) + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) + + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + column_charges, column_flows) + #convenience container for storing the degeneracies of each + #column charge + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + if len(row_charges) > 1: + left_row_charges, right_row_charges, _ = _find_best_partition( + row_charges, row_flows) + unique_left = np.unique(left_row_charges) + unique_right = np.unique(right_row_charges) + unique_row_charges = np.unique( + fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) + + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + row_locations = find_sparse_positions( + left_charges=left_row_charges, + left_flow=1, + right_charges=right_row_charges, + right_flow=1, + target_charges=common_charges) + elif len(row_charges) == 1: + fused_row_charges = fuse_charges(row_charges, row_flows) + + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + relevant_fused_row_charges = fused_row_charges[np.isin( + fused_row_charges, common_charges)] + row_locations = {} + for c in common_charges: + row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] + else: + raise ValueError('Found an empty sequence for `row_charges`') + #some numpy magic to get the index locations of the blocks + degeneracy_vector = np.empty( + np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + degeneracy_vector[row_locations[c]] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) + inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) + if not return_data: + blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] + else: + blocks[c] = np.reshape(data[inds], + (len(row_locations[c]), column_degeneracies[-c])) + return blocks + + +def find_diagonal_sparse_blocks_depreacated_1( + data: np.ndarray, + row_charges: List[Union[List, np.ndarray]], + column_charges: List[Union[List, np.ndarray]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Deprecated + + This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. + + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. Note that `column_charges` + are never explicitly fused (`row_charges` are). + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the sparse locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + flows = row_flows.copy() + flows.extend(column_flows) + _check_flows(flows) + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) + + #since we are using row-major we have to fuse the row charges anyway. + fused_row_charges = fuse_charges(row_charges, row_flows) + #get the unique row-charges + unique_row_charges, row_dims = np.unique( + fused_row_charges, return_counts=True) + + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + column_charges, column_flows) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + # we only care about charges common to row and columns + mask = np.isin(fused_row_charges, common_charges) + relevant_row_charges = fused_row_charges[mask] + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_row_charges == c + masks[c] = mask + degeneracy_vector[mask] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(start_positions[masks[c]], 1) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) + if not return_data: + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] + else: + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def find_diagonal_sparse_blocks_deprecated_0( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Deprecated: this version is about 2 times slower (worst case) than the current used + implementation + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + _check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + #we multiply the flows into the charges + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column + + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + # we only care about charges common to row and columns + mask = np.isin(row_charges, common_charges) + relevant_row_charges = row_charges[mask] + + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_row_charges) which, + #for each charge `c` in `relevant_row_charges` holds the + #column-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_row_charges == c + masks[c] = mask + degeneracy_vector[mask] = column_degeneracies[-c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each row + # within the data vector. + # E.g. for `relevant_row_charges` = [0,1,0,0,3], and + # column_degeneracies[0] = 10 + # column_degeneracies[1] = 20 + # column_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in row-major order) in + # each row with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - column_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) + b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) + if not return_data: + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] + else: + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def find_diagonal_sparse_blocks_column_major( + data: np.ndarray, + charges: List[np.ndarray], + flows: List[Union[bool, int]], + return_data: Optional[bool] = True) -> Dict: + """ + Deprecated + + Given the meta data and underlying data of a symmetric matrix, compute + all diagonal blocks and return them in a dict, assuming column-major + ordering. + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + charges: List of np.ndarray, one for each leg. + Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + + Returns: + dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray + or a python list of locations and shapes, depending on the value of `return_data`. + """ + if len(charges) != 2: + raise ValueError("input has to be a two-dimensional symmetric matrix") + _check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + + #we multiply the flows into the charges + row_charges = flows[0] * charges[0] # a list of charges on each row + column_charges = flows[1] * charges[1] # a list of charges on each column + + #get the unique charges + unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) + unique_column_charges, column_dims = np.unique( + column_charges, return_counts=True) + #get the charges common to rows and columns (only those matter) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + + #convenience container for storing the degeneracies of each + #row and column charge + row_degeneracies = dict(zip(unique_row_charges, row_dims)) + column_degeneracies = dict(zip(unique_column_charges, column_dims)) + + # we only care about charges common to row and columns + mask = np.isin(column_charges, -common_charges) + relevant_column_charges = column_charges[mask] + + #some numpy magic to get the index locations of the blocks + #we generate a vector of `len(relevant_column_charges) which, + #for each charge `c` in `relevant_column_charges` holds the + #row-degeneracy of charge `c` + degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) + #for each charge `c` in `common_charges` we generate a boolean mask + #for indexing the positions where `relevant_column_charges` has a value of `c`. + masks = {} + for c in common_charges: + mask = relevant_column_charges == -c + masks[c] = mask + degeneracy_vector[mask] = row_degeneracies[c] + + # the result of the cumulative sum is a vector containing + # the stop positions of the non-zero values of each column + # within the data vector. + # E.g. for `relevant_column_charges` = [0,1,0,0,3], and + # row_degeneracies[0] = 10 + # row_degeneracies[1] = 20 + # row_degeneracies[3] = 30 + # we have + # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] + # The starting positions of consecutive elements (in column-major order) in + # each column with charge `c=0` within the data vector are then simply obtained using + # masks[0] = [True, False, True, True, False] + # and `stop_positions[masks[0]] - row_degeneracies[0]` + stop_positions = np.cumsum(degeneracy_vector) + blocks = {} + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) + b = np.expand_dims(np.arange(row_degeneracies[c]), 1) + if not return_data: + blocks[c] = [ + np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), + (row_degeneracies[c], column_degeneracies[-c]) + ] + else: + blocks[c] = np.reshape( + data[np.reshape(a + b, + row_degeneracies[c] * column_degeneracies[-c])], + (row_degeneracies[c], column_degeneracies[-c])) + return blocks + + +def find_dense_positions_deprecated(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the all different blocks + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + together with their corresponding index-values of the data in the dense array. + `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` + to an array of integers. + For the above example, we get: + * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` + was obtained from fusing -2 and 2. + * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, + `fused_charges[5,13,17]` were obtained from fusing 0 and 0. + * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` + was obtained from fusing 1 and -1. + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping tuples of integers to np.ndarray of integers. + """ + _check_flows([left_flow, right_flow]) + unique_left = np.unique(left_charges) + unique_right = np.unique(right_charges) + fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + left_inds, right_inds = unfuse( + np.nonzero(fused == target_charge)[0], len(unique_left), + len(unique_right)) + left_c = unique_left[left_inds] + right_c = unique_right[right_inds] + len_right_charges = len(right_charges) + linear_positions = {} + for left_charge, right_charge in zip(left_c, right_c): + left_positions = np.nonzero(left_charges == left_charge)[0] + left_offsets = np.expand_dims(left_positions * len_right_charges, 1) + right_offsets = np.expand_dims( + np.nonzero(right_charges == right_charge)[0], 0) + linear_positions[(left_charge, right_charge)] = np.reshape( + left_offsets + right_offsets, + left_offsets.shape[0] * right_offsets.shape[1]) + return np.sort(np.concatenate(list(linear_positions.values()))) + + +def find_dense_positions(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charge: int) -> Dict: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the all different blocks + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + together with their corresponding index-values of the data in the dense array. + `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` + to an array of integers. + For the above example, we get: + * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` + was obtained from fusing -2 and 2. + * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, + `fused_charges[5,13,17]` were obtained from fusing 0 and 0. + * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` + was obtained from fusing 1 and -1. + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping tuples of integers to np.ndarray of integers. + """ + _check_flows([left_flow, right_flow]) + unique_left, left_degeneracies = np.unique(left_charges, return_counts=True) + unique_right, right_degeneracies = np.unique( + right_charges, return_counts=True) + + common_charges = np.intersect1d( + unique_left, (target_charge - right_flow * unique_right) * left_flow, + assume_unique=True) + right_locations = {} + for c in common_charges: + + right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( + right_charges == (target_charge - left_flow * c) * right_flow)[0] + + len_right_charges = len(right_charges) + indices = [] + for n in range(len(left_charges)): + c = left_charges[n] + if c not in common_charges: + continue + indices.append(n * len_right_charges + right_locations[ + (target_charge - left_flow * c) * right_flow]) + return np.concatenate(indices) + + +def find_sparse_positions(left_charges: np.ndarray, left_flow: int, + right_charges: np.ndarray, right_flow: int, + target_charges: Union[List[int], np.ndarray]) -> Dict: + """ + Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) + in the vector `fused_charges` (resulting from fusing np.ndarrays + `left_charges` and `right_charges`) that have a value of `target_charge`, + assuming that all elements different from `target_charges` are `0`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charges = [0,1] + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` 0 1 2 3 4 5 6 7 8 + we want to find the all different blocks + that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, + together with their corresponding sparse index-values of the data in the sparse array, + assuming that all elements in `fused_charges` different from `target_charges` are 0. + + `find_sparse_blocks` returns a dict mapping integers `target_charge` + to an array of integers denoting the sparse locations of elements within + `fused_charges`. + For the above example, we get: + * `target_charge=0`: [0,1,3,5,7] + * `target_charge=1`: [2,4,6,8] + Args: + left_charges: An np.ndarray of integer charges. + left_flow: The flow direction of the left charges. + right_charges: An np.ndarray of integer charges. + right_flow: The flow direction of the right charges. + target_charge: The target charge. + Returns: + dict: Mapping integers to np.ndarray of integers. + """ + #FIXME: this is probably still not optimal + + _check_flows([left_flow, right_flow]) + target_charges = np.unique(target_charges) + unique_left = np.unique(left_charges) + unique_right = np.unique(right_charges) + fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) + + #compute all unique charges that can add up to + #target_charges + left_inds, right_inds = [], [] + for target_charge in target_charges: + li, ri = unfuse( + np.nonzero(fused == target_charge)[0], len(unique_left), + len(unique_right)) + left_inds.append(li) + right_inds.append(ri) + + #now compute the relevant unique left and right charges + unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] + unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] + + #only keep those charges that are relevant + relevant_left_charges = left_charges[np.isin(left_charges, + unique_left_charges)] + relevant_right_charges = right_charges[np.isin(right_charges, + unique_right_charges)] + + unique_right_charges, right_dims = np.unique( + relevant_right_charges, return_counts=True) + right_degeneracies = dict(zip(unique_right_charges, right_dims)) + #generate a degeneracy vector which for each value r in relevant_right_charges + #holds the corresponding number of non-zero elements `relevant_right_charges` + #that can add up to `target_charges`. + degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) + right_indices = {} + for left_charge in unique_left_charges: + total_degeneracy = np.sum(right_dims[np.isin( + left_flow * left_charge + right_flow * unique_right_charges, + target_charges)]) + tmp_relevant_right_charges = relevant_right_charges[np.isin( + relevant_right_charges, + (target_charges - left_flow * left_charge) * right_flow)] + + for target_charge in target_charges: + right_indices[(left_charge, target_charge)] = np.nonzero( + tmp_relevant_right_charges == + (target_charge - left_flow * left_charge) * right_flow)[0] + + degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy + + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + blocks = {t: [] for t in target_charges} + for left_charge in unique_left_charges: + a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) + for target_charge in target_charges: + ri = right_indices[(left_charge, target_charge)] + if len(ri) != 0: + b = np.expand_dims(ri, 1) + tmp = a + b + blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) + out = {} + for target_charge in target_charges: + out[target_charge] = np.concatenate(blocks[target_charge]) + return out + + +def compute_dense_to_sparse_mapping_deprecated(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` + the rank of the tensor. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + t1 = time.time() + fused_charges = fuse_charges(charges, flows) + nz_indices = np.nonzero(fused_charges == target_charge)[0] + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + + index_locations = [] + for n in reversed(range(len(charges))): + t1 = time.time() + nz_indices, right_indices = unfuse(nz_indices, np.prod(dims[0:n]), dims[n]) + index_locations.insert(0, right_indices) + print(time.time() - t1) + return index_locations + + +def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + np.ndarray: An (N, r) np.ndarray of dtype np.int16, + with `N` the number of non-zero elements, and `r` + the rank of the tensor. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + + #note: left_charges and right_charges have been fused from RIGHT to LEFT + left_charges, right_charges, partition = _find_best_partition(charges, flows) + t1 = time.time() + nz_indices = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=target_charge) + print(time.time() - t1) + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + t1 = time.time() + nz_left_indices, nz_right_indices = unfuse(nz_indices, len(left_charges), + len(right_charges)) + print(time.time() - t1) + index_locations = [] + #first unfuse left charges + for n in range(partition): + t1 = time.time() + indices, nz_left_indices = unfuse(nz_left_indices, dims[n], + np.prod(dims[n + 1:partition])) + index_locations.append(indices) + print(time.time() - t1) + for n in range(partition, len(dims)): + t1 = time.time() + indices, nz_right_indices = unfuse(nz_right_indices, dims[n], + np.prod(dims[n + 1::])) + index_locations.append(indices) + print(time.time() - t1) + + return index_locations + + +def compute_dense_to_sparse_mapping(charges: List[np.ndarray], + flows: List[Union[bool, int]], + target_charge: int) -> int: + """ + Compute the mapping from multi-index positions to the linear positions + within the sparse data container, given the meta-data of a symmetric tensor. + This function returns a list of np.ndarray `index_positions`, with + `len(index_positions)=len(charges)` (equal to the rank of the tensor). + When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. + ` + multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) + ` + with `r` the rank of the tensor and `N` the number of non-zero elements of + the symmetric tensor, then the element at position `n` within the linear + data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], + i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges + can for example be obtained using + ``` + index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) + total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) + for n in range(len(charges)): + total_charges += flows[n]*charges[n][index_positions[n]] + np.testing.assert_allclose(total_charges, 0) + ``` + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + target_charge: The total target charge of the blocks to be calculated. + Returns: + list of np.ndarray: A list of length `r`, with `r` the rank of the tensor. + Each element in the list is an N-dimensional np.ndarray of int, + with `N` the number of non-zero elements. + """ + #find the best partition (the one where left and right dimensions are + #closest + dims = np.asarray([len(c) for c in charges]) + #note: left_charges and right_charges have been fused from RIGHT to LEFT + left_charges, right_charges, partition = _find_best_partition(charges, flows) + nz_indices = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=target_charge) + + if len(nz_indices) == 0: + raise ValueError( + "`charges` do not add up to a total charge {}".format(target_charge)) + return np.unravel_index(nz_indices, dims) + + +class BlockSparseTensor: + """ + Minimal class implementation of block sparsity. + The class design follows Glen's proposal (Design 0). + The class currently only supports a single U(1) symmetry + and only numpy.ndarray. + + Attributes: + * self.data: A 1d np.ndarray storing the underlying + data of the tensor + * self.charges: A list of `np.ndarray` of shape + (D,), where D is the bond dimension. Once we go beyond + a single U(1) symmetry, this has to be updated. + + * self.flows: A list of integers of length `k`. + `self.flows` determines the flows direction of charges + on each leg of the tensor. A value of `-1` denotes + outflowing charge, a value of `1` denotes inflowing + charge. + + The tensor data is stored in self.data, a 1d np.ndarray. + """ + + def __init__(self, data: np.ndarray, indices: List[Index]) -> None: + """ + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + indices: List of `Index` objecst, one for each leg. + """ + self.indices = indices + _check_flows(self.flows) + num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) + + if num_non_zero_elements != len(data.flat): + raise ValueError("number of tensor elements defined " + "by `charges` is different from" + " len(data)={}".format(len(data.flat))) + + self.data = np.asarray(data.flat) #do not copy data + + @classmethod + def randn(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a random symmetric tensor from random normal distribution. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + backend = backend_factory.get_backend('numpy') + data = backend.randn((num_non_zero_elements,), dtype=dtype) + return cls(data=data, indices=indices) + + @classmethod + def random(cls, indices: List[Index], + dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": + """ + Initialize a random symmetric tensor from random normal distribution. + Args: + indices: List of `Index` objecst, one for each leg. + dtype: An optional numpy dtype. The dtype of the tensor + Returns: + BlockSparseTensor + """ + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + num_non_zero_elements = compute_num_nonzero(charges, flows) + dtype = dtype if dtype is not None else self.np.float64 + + def init_random(): + if ((np.dtype(dtype) is np.dtype(np.complex128)) or + (np.dtype(dtype) is np.dtype(np.complex64))): + return np.random.rand(num_non_zero_elements).astype( + dtype) - 0.5 + 1j * ( + np.random.rand(num_non_zero_elements).astype(dtype) - 0.5) + return np.random.randn(num_non_zero_elements).astype(dtype) - 0.5 + + return cls(data=init_random(), indices=indices) + + @property + def rank(self): + return len(self.indices) + + @property + def dense_shape(self) -> Tuple: + """ + The dense shape of the tensor. + Returns: + Tuple: A tuple of `int`. + """ + return tuple([i.dimension for i in self.indices]) + + @property + def shape(self) -> Tuple: + """ + The sparse shape of the tensor. + Returns: + Tuple: A tuple of `Index` objects. + """ + return tuple(self.indices) + + @property + def dtype(self) -> Type[np.number]: + return self.data.dtype + + @property + def flows(self): + return [i.flow for i in self.indices] + + @property + def charges(self): + return [i.charges for i in self.indices] + + def transpose(self, + order: Union[List[int], np.ndarray], + transposed_linear_positions: Optional[np.ndarray] = None + ) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order`. This routine currently shuffles + data. + Args: + order: The new order of indices. + transposed_linear_positions: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` + can greatly speed up the transposition. + Returns: + BlockSparseTensor: The transposed tensor. + """ + #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the + #lookup-table from dense to sparse indices. According to some quick + #testing, the final lookup is currently the bottleneck. + #FIXME: transpose currently shuffles data. This can in principle be postponed + #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of + #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse + #positions + if len(order) != self.rank: + raise ValueError( + "`len(order)={}` is different form `self.rank={}`".format( + len(order), self.rank)) + #transpose is the only function using self.dense_to_sparse_table + #so we can initialize it here. This will change if we are implementing + #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` + #also needs + + #we use elementary indices here because it is + #more efficient to get the fused charges using + #the best partition + if transposed_linear_positions is None: + elementary_indices = {} + flat_elementary_indices = [] + + for n in range(self.rank): + elementary_indices[n] = self.indices[n].get_elementary_indices() + flat_elementary_indices.extend(elementary_indices[n]) + flat_index_list = np.arange(len(flat_elementary_indices)) + cum_num_legs = np.append( + 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) + flat_order = np.concatenate( + [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + + flat_charges = [i.charges for i in flat_elementary_indices] + flat_flows = [i.flow for i in flat_elementary_indices] + flat_dims = [len(c) for c in flat_charges] + flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + if not hasattr(self, 'dense_to_sparse_table'): + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition( + flat_charges, flat_flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( + len(self.data)), (linear_positions, + np.zeros(len(self.data), dtype=np.int64)))) + + flat_tr_charges = [flat_charges[n] for n in flat_order] + flat_tr_flows = [flat_flows[n] for n in flat_order] + flat_tr_strides = [flat_strides[n] for n in flat_order] + flat_tr_dims = [flat_dims[n] for n in flat_order] + + tr_left_charges, tr_right_charges, _ = _find_best_partition( + flat_tr_charges, flat_tr_flows) + #FIXME: this should be done without fully fusing the strides + tr_dense_linear_positions = fuse_charges([ + np.arange(flat_tr_dims[n]) * flat_tr_strides[n] + for n in range(len(flat_tr_dims)) + ], + flows=[1] * len(flat_tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + + inds = np.squeeze(self.dense_to_sparse_table[ + tr_dense_linear_positions[tr_linear_positions], 0].toarray()) + else: + inds = transposed_linear_positions + self.data = self.data[inds] + return inds + + def transpose_intersect1d( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order` + Args: pp + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ + #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the + #lookup-table from dense to sparse indices. According to some quick + #testing, the final lookup is currently the bottleneck. + #FIXME: transpose currently shuffles data. This can in principle be postponed + #until `tensordot` or `find_diagonal_sparse_blocks` + if len(order) != self.rank: + raise ValueError(len(order), self.rank) + charges = self.charges #call only once in case some of the indices are merged indices + dims = [len(c) for c in charges] + + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + tr_charges = [charges[n] for n in order] + tr_flows = [self.flows[n] for n in order] + tr_strides = [strides[n] for n in order] + tr_dims = [dims[n] for n in order] + tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_charges, tr_flows) + + tr_dense_linear_positions = fuse_charges( + [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + flows=[1] * len(tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + new_linear_positions = tr_dense_linear_positions[tr_linear_positions] + _, _, inds = np.intersect1d( + linear_positions, + new_linear_positions, + return_indices=True, + assume_unique=True) + self.data = self.data[inds] + + # def transpose_lookup(self, order: Union[List[int], np.ndarray] + # ) -> "BlockSparseTensor": + # """ + # Deprecated + + # Transpose the tensor into the new order `order`. Uses a simple cython std::map + # for the lookup + # Args: + # order: The new order of indices. + # Returns: + # BlockSparseTensor: The transposed tensor. + # """ + # if len(order) != self.rank: + # raise ValueError( + # "`len(order)={}` is different form `self.rank={}`".format( + # len(order), self.rank)) + # charges = self.charges #call only once in case some of the indices are merged indices + # dims = [len(c) for c in charges] + + # strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + # #find the best partition into left and right charges + # left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + # #find the index-positions of the elements in the fusion + # #of `left_charges` and `right_charges` that have `0` + # #total charge (those are the only non-zero elements). + # linear_positions = find_dense_positions( + # left_charges, 1, right_charges, 1, target_charge=0) + + # tr_charges = [charges[n] for n in order] + # tr_flows = [self.flows[n] for n in order] + # tr_strides = [strides[n] for n in order] + # tr_dims = [dims[n] for n in order] + # tr_left_charges, tr_right_charges, _ = _find_best_partition( + # tr_charges, tr_flows) + # #FIXME: this should be done without fully fusing the strides + # tr_dense_linear_positions = fuse_charges( + # [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + # flows=[1] * len(tr_dims)) + # tr_linear_positions = find_dense_positions(tr_left_charges, 1, + # tr_right_charges, 1, 0) + # inds = lookup(linear_positions, + # tr_dense_linear_positions[tr_linear_positions]) + # self.data = self.data[inds] + + def transpose_searchsorted( + self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": + """ + Deprecated: + + Transpose the tensor into the new order `order`. Uses `np.searchsorted` + for the lookup. + Args: + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ + if len(order) != self.rank: + raise ValueError( + "`len(order)={}` is different form `self.rank={}`".format( + len(order), self.rank)) + charges = self.charges #call only once in case some of the indices are merged indices + dims = [len(c) for c in charges] + + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + #find the best partition into left and right charges + left_charges, right_charges, _ = _find_best_partition(charges, self.flows) + #find the index-positions of the elements in the fusion + #of `left_charges` and `right_charges` that have `0` + #total charge (those are the only non-zero elements). + linear_positions = find_dense_positions( + left_charges, 1, right_charges, 1, target_charge=0) + + tr_charges = [charges[n] for n in order] + tr_flows = [self.flows[n] for n in order] + tr_strides = [strides[n] for n in order] + tr_dims = [dims[n] for n in order] + tr_left_charges, tr_right_charges, _ = _find_best_partition( + tr_charges, tr_flows) + #FIXME: this should be done without fully fusing the strides + tr_dense_linear_positions = fuse_charges( + [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], + flows=[1] * len(tr_dims)) + tr_linear_positions = find_dense_positions(tr_left_charges, 1, + tr_right_charges, 1, 0) + + inds = np.searchsorted(linear_positions, + tr_dense_linear_positions[tr_linear_positions]) + self.data = self.data[inds] + + def reset_shape(self) -> None: + """ + Bring the tensor back into its elementary shape. + """ + self.indices = self.get_elementary_indices() + + def get_elementary_indices(self) -> List: + """ + Compute the elementary indices of the array. + """ + elementary_indices = [] + for i in self.indices: + elementary_indices.extend(i.get_elementary_indices()) + + return elementary_indices + + def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: + """ + Reshape `tensor` into `shape` in place. + `BlockSparseTensor.reshape` works essentially the same as the dense + version, with the notable exception that the tensor can only be + reshaped into a form compatible with its elementary indices. + The elementary indices are the indices at the leaves of the `Index` + objects `tensors.indices`. + For example, while the following reshaping is possible for regular + dense numpy tensor, + ``` + A = np.random.rand(6,6,6) + np.reshape(A, (2,3,6,6)) + ``` + the same code for BlockSparseTensor + ``` + q1 = np.random.randint(0,10,6) + q2 = np.random.randint(0,10,6) + q3 = np.random.randint(0,10,6) + i1 = Index(charges=q1,flow=1) + i2 = Index(charges=q2,flow=-1) + i3 = Index(charges=q3,flow=1) + A=BlockSparseTensor.randn(indices=[i1,i2,i3]) + print(A.shape) #prints (6,6,6) + A.reshape((2,3,6,6)) #raises ValueError + ``` + raises a `ValueError` since (2,3,6,6) + is incompatible with the elementary shape (6,6,6) of the tensor. + + Args: + tensor: A symmetric tensor. + shape: The new shape. Can either be a list of `Index` + or a list of `int`. + Returns: + BlockSparseTensor: A new tensor reshaped into `shape` + """ + dense_shape = [] + for s in shape: + if isinstance(s, Index): + dense_shape.append(s.dimension) + else: + dense_shape.append(s) + # a few simple checks + if np.prod(dense_shape) != np.prod(self.dense_shape): + raise ValueError("A tensor with {} elements cannot be " + "reshaped into a tensor with {} elements".format( + np.prod(self.shape), np.prod(dense_shape))) + + #keep a copy of the old indices for the case where reshaping fails + #FIXME: this is pretty hacky! + index_copy = [i.copy() for i in self.indices] + + def raise_error(): + #if this error is raised then `shape` is incompatible + #with the elementary indices. We then reset the shape + #to what is was before the call to `reshape`. + self.indices = index_copy + elementary_indices = [] + for i in self.indices: + elementary_indices.extend(i.get_elementary_indices()) + raise ValueError("The shape {} is incompatible with the " + "elementary shape {} of the tensor.".format( + dense_shape, + tuple([e.dimension for e in elementary_indices]))) + + self.reset_shape() #bring tensor back into its elementary shape + for n in range(len(dense_shape)): + if dense_shape[n] > self.dense_shape[n]: + while dense_shape[n] > self.dense_shape[n]: + #fuse indices + i1, i2 = self.indices.pop(n), self.indices.pop(n) + #note: the resulting flow is set to one since the flow + #is multiplied into the charges. As a result the tensor + #will then be invariant in any case. + self.indices.insert(n, fuse_index_pair(i1, i2)) + if self.dense_shape[n] > dense_shape[n]: + raise_error() + elif dense_shape[n] < self.dense_shape[n]: + raise_error() + #at this point the first len(dense_shape) indices of the tensor + #match the `dense_shape`. + while len(dense_shape) < len(self.indices): + i2, i1 = self.indices.pop(), self.indices.pop() + self.indices.append(fuse_index_pair(i1, i2)) + + def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: + """ + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + For matrices with shape[0] << shape[1], this routine avoids explicit fusion + of column charges. + + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + + row_indices = self.indices[0].get_elementary_indices() + column_indices = self.indices[1].get_elementary_indices() + + return find_diagonal_sparse_blocks( + data=self.data, + row_charges=[i.charges for i in row_indices], + column_charges=[i.charges for i in column_indices], + row_flows=[i.flow for i in row_indices], + column_flows=[i.flow for i in column_indices], + return_data=return_data) + + def get_diagonal_blocks_deprecated_1( + self, return_data: Optional[bool] = True) -> Dict: + """ + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + For matrices with shape[0] << shape[1], this routine avoids explicit fusion + of column charges. + + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + + row_indices = self.indices[0].get_elementary_indices() + column_indices = self.indices[1].get_elementary_indices() + + return find_diagonal_sparse_blocks_deprecated_1( + data=self.data, + row_charges=[i.charges for i in row_indices], + column_charges=[i.charges for i in column_indices], + row_flows=[i.flow for i in row_indices], + column_flows=[i.flow for i in column_indices], + return_data=return_data) + + def get_diagonal_blocks_deprecated_0( + self, return_data: Optional[bool] = True) -> Dict: + """ + Deprecated + + Obtain the diagonal blocks of symmetric matrix. + BlockSparseTensor has to be a matrix. + Args: + return_data: If `True`, the return dictionary maps quantum numbers `q` to + actual `np.ndarray` with the data. This involves a copy of data. + If `False`, the returned dict maps quantum numbers of a list + [locations, shape], where `locations` is an np.ndarray of type np.int64 + containing the locations of the tensor elements within A.data, i.e. + `A.data[locations]` contains the elements belonging to the tensor with + quantum numbers `(q,q). `shape` is the shape of the corresponding array. + Returns: + dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) + + """ + if self.rank != 2: + raise ValueError( + "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" + .format(self.rank)) + + return find_diagonal_sparse_blocks_deprecated_0( + data=self.data, + charges=self.charges, + flows=self.flows, + return_data=return_data) + + +def reshape(tensor: BlockSparseTensor, + shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: + """ + Reshape `tensor` into `shape`. + `reshape` works essentially the same as the dense version, with the + notable exception that the tensor can only be reshaped into a form + compatible with its elementary indices. The elementary indices are + the indices at the leaves of the `Index` objects `tensors.indices`. + For example, while the following reshaping is possible for regular + dense numpy tensor, + ``` + A = np.random.rand(6,6,6) + np.reshape(A, (2,3,6,6)) + ``` + the same code for BlockSparseTensor + ``` + q1 = np.random.randint(0,10,6) + q2 = np.random.randint(0,10,6) + q3 = np.random.randint(0,10,6) + i1 = Index(charges=q1,flow=1) + i2 = Index(charges=q2,flow=-1) + i3 = Index(charges=q3,flow=1) + A=BlockSparseTensor.randn(indices=[i1,i2,i3]) + print(A.shape) #prints (6,6,6) + reshape(A, (2,3,6,6)) #raises ValueError + ``` + raises a `ValueError` since (2,3,6,6) + is incompatible with the elementary shape (6,6,6) of the tensor. + + Args: + tensor: A symmetric tensor. + shape: The new shape. Can either be a list of `Index` + or a list of `int`. + Returns: + BlockSparseTensor: A new tensor reshaped into `shape` + """ + result = BlockSparseTensor( + data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) + result.reshape(shape) + return result diff --git a/tensornetwork/block_tensor/block_tensor_old_test.py b/tensornetwork/block_tensor/block_tensor_old_test.py new file mode 100644 index 000000000..9f11bec6e --- /dev/null +++ b/tensornetwork/block_tensor/block_tensor_old_test.py @@ -0,0 +1,176 @@ +import numpy as np +import pytest +# pylint: disable=line-too-long +from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, find_sparse_positions, find_dense_positions +from index import Index, fuse_charges + +np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] + + +@pytest.mark.parametrize("dtype", np_dtypes) +def test_block_sparse_init(dtype): + D = 10 #bond dimension + B = 10 #number of blocks + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + num_elements = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + A = BlockSparseTensor.random(indices=indices, dtype=dtype) + assert A.dtype == dtype + for r in range(rank): + assert A.indices[r].name == 'index{}'.format(r) + assert A.dense_shape == tuple([D] * rank) + assert len(A.data) == num_elements + + +def test_find_dense_positions(): + left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) + right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) + target_charge = 0 + fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) + dense_positions = find_dense_positions(left_charges, 1, right_charges, 1, + target_charge) + np.testing.assert_allclose(dense_positions, + np.nonzero(fused_charges == target_charge)[0]) + + +def test_find_dense_positions_2(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + n1 = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], + [1 for _ in range(rank // 2)]) + column_charges = fuse_charges( + [indices[n].charges for n in range(rank // 2, rank)], + [1 for _ in range(rank // 2, rank)]) + + i01 = indices[0] * indices[1] + i23 = indices[2] * indices[3] + positions = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) + assert len(positions) == n1 + + +def test_find_sparse_positions(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + n1 = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], + [1 for _ in range(rank // 2)]) + column_charges = fuse_charges( + [indices[n].charges for n in range(rank // 2, rank)], + [1 for _ in range(rank // 2, rank)]) + + i01 = indices[0] * indices[1] + i23 = indices[2] * indices[3] + unique_row_charges = np.unique(i01.charges) + unique_column_charges = np.unique(i23.charges) + common_charges = np.intersect1d( + unique_row_charges, -unique_column_charges, assume_unique=True) + blocks = find_sparse_positions( + i01.charges, 1, i23.charges, 1, target_charges=[0]) + assert sum([len(v) for v in blocks.values()]) == n1 + np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) + + +def test_find_sparse_positions_2(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + flows = [1, -1] + + rank = len(flows) + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + i1, i2 = indices + common_charges = np.intersect1d(i1.charges, i2.charges) + row_locations = find_sparse_positions( + left_charges=i1.charges, + left_flow=flows[0], + right_charges=i2.charges, + right_flow=flows[1], + target_charges=common_charges) + fused = (i1 * i2).charges + relevant = fused[np.isin(fused, common_charges)] + for k, v in row_locations.items(): + np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) + + +def test_get_diagonal_blocks(): + D = 40 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + common_charges = np.intersect1d(indices[0].charges, indices[1].charges) + row_locations = find_sparse_positions( + left_charges=indices[0].charges, + left_flow=1, + right_charges=indices[1].charges, + right_flow=1, + target_charges=common_charges) + + +def test_dense_transpose(): + Ds = [10, 11, 12] #bond dimension + rank = len(Ds) + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [np.zeros(Ds[n], dtype=np.int16) for n in range(rank)] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + A = BlockSparseTensor.random(indices=indices, dtype=np.float64) + B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) + A.transpose((1, 0, 2)) + np.testing.assert_allclose(A.data, B.flat) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 378760e1c..b5e8ec339 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -16,125 +16,11 @@ from __future__ import division from __future__ import print_function import numpy as np -from tensornetwork.network_components import Node, contract, contract_between -# pylint: disable=line-too-long -from tensornetwork.backends import backend_factory +from tensornetwork.block_tensor.charge import BaseCharge, ChargeCollection import copy from typing import List, Union, Any, Optional, Tuple, Text -def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, - q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: - """ - Fuse charges `q1` with charges `q2` by simple addition (valid - for U(1) charges). `q1` and `q2` typically belong to two consecutive - legs of `BlockSparseTensor`. - Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns - `[10, 100, 11, 101, 12, 102]`. - When using row-major ordering of indices in `BlockSparseTensor`, - the position of q1 should be "to the left" of the position of q2. - - Args: - q1: Iterable of integers - flow1: Flow direction of charge `q1`. - q2: Iterable of integers - flow2: Flow direction of charge `q2`. - Returns: - np.ndarray: The result of fusing `q1` with `q2`. - """ - return np.reshape( - flow1 * np.asarray(q1)[:, None] + flow2 * np.asarray(q2)[None, :], - len(q1) * len(q2)) - - -def fuse_charges(charges: List[Union[List, np.ndarray]], - flows: List[int]) -> np.ndarray: - """ - Fuse all `charges` by simple addition (valid - for U(1) charges). Charges are fused from "right to left", - in accordance with row-major order (see `fuse_charges_pair`). - - Args: - chargs: A list of charges to be fused. - flows: A list of flows, one for each element in `charges`. - Returns: - np.ndarray: The result of fusing `charges`. - """ - if len(charges) == 1: - #nothing to do - return flows[0] * charges[0] - fused_charges = charges[0] * flows[0] - for n in range(1, len(charges)): - fused_charges = fuse_charge_pair( - q1=fused_charges, flow1=1, q2=charges[n], flow2=flows[n]) - return fused_charges - - -def fuse_degeneracies(degen1: Union[List, np.ndarray], - degen2: Union[List, np.ndarray]) -> np.ndarray: - """ - Fuse degeneracies `degen1` and `degen2` of two leg-charges - by simple kronecker product. `degen1` and `degen2` typically belong to two - consecutive legs of `BlockSparseTensor`. - Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns - `[10, 100, 20, 200, 30, 300]`. - When using row-major ordering of indices in `BlockSparseTensor`, - the position of `degen1` should be "to the left" of the position of `degen2`. - Args: - degen1: Iterable of integers - degen2: Iterable of integers - Returns: - np.ndarray: The result of fusing `dege1` with `degen2`. - """ - return np.reshape(degen1[:, None] * degen2[None, :], - len(degen1) * len(degen2)) - - -def unfuse(fused_indices: np.ndarray, len_left: int, - len_right: int) -> Tuple[np.ndarray, np.ndarray]: - """ - Given an np.ndarray `fused_indices` of integers denoting - index-positions of elements within a 1d array, `unfuse` - obtains the index-positions of the elements in the left and - right np.ndarrays `left`, `right` which, upon fusion, - are placed at the index-positions given by - `fused_indices` in the fused np.ndarray. - An example will help to illuminate this: - Given np.ndarrays `left`, `right` and the result - of their fusion (`fused`): - - ``` - left = [0,1,0,2] - right = [-1,3,-2] - fused = fuse_charges([left, right], flows=[1,1]) - print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] - ``` - - we want to find which elements in `left` and `right` - fuse to a value of 0. In the above case, there are two - 0 in `fused`: one is obtained from fusing `left[1]` and - `right[0]`, the second one from fusing `left[3]` and `right[2]` - `unfuse` returns the index-positions of these values within - `left` and `right`, that is - - ``` - left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) - print(left_index_values) # [1,3] - print(right_index_values) # [0,2] - ``` - - Args: - fused_indices: A 1d np.ndarray of integers. - len_left: The length of the left np.ndarray. - len_right: The length of the right np.ndarray. - Returns: - (np.ndarry, np.ndarray) - """ - right = np.mod(fused_indices, len_right) - left = np.floor_divide(fused_indices - right, len_right) - return left, right - - class Index: """ An index class to store indices of a symmetric tensor. @@ -143,16 +29,21 @@ class Index: """ def __init__(self, - charges: Union[List, np.ndarray], + charges: Union[ChargeCollection, BaseCharge], flow: int, - name: Optional[Text] = "index", + name: Optional[Text] = None, left_child: Optional["Index"] = None, right_child: Optional["Index"] = None): - self._charges = np.asarray(charges) + if isinstance(charges, BaseCharge): + self._charges = charges #ChargeCollection([charges]) + elif isinstance(charges, ChargeCollection) or (charges is None): + self._charges = charges + else: + raise TypeError("Unknown type {}".format(type(charges))) self.flow = flow self.left_child = left_child self.right_child = right_child - self._name = name + self.name = name def __repr__(self): return str(self.dimension) @@ -171,17 +62,17 @@ def _copy_helper(self, index: "Index", copied_index: "Index") -> None: """ if index.left_child != None: left_copy = Index( - charges=copy.copy(index.left_child.charges), - flow=copy.copy(index.left_child.flow), - name=copy.copy(index.left_child.name)) + charges=copy.deepcopy(index.left_child.charges), + flow=copy.deepcopy(index.left_child.flow), + name=copy.deepcopy(index.left_child.name)) copied_index.left_child = left_copy self._copy_helper(index.left_child, left_copy) if index.right_child != None: right_copy = Index( - charges=copy.copy(index.right_child.charges), - flow=copy.copy(index.right_child.flow), - name=copy.copy(index.right_child.name)) + charges=copy.deepcopy(index.right_child.charges), + flow=copy.deepcopy(index.right_child.flow), + name=copy.deepcopy(index.right_child.name)) copied_index.right_child = right_copy self._copy_helper(index.right_child, right_copy) @@ -192,7 +83,9 @@ def copy(self): `Index` are copied as well. """ index_copy = Index( - charges=self._charges.copy(), flow=copy.copy(self.flow), name=self.name) + charges=copy.deepcopy(self._charges), + flow=copy.deepcopy(self.flow), + name=self.name) self._copy_helper(self, index_copy) return index_copy @@ -228,19 +121,7 @@ def __mul__(self, index: "Index") -> "Index": def charges(self): if self.is_leave: return self._charges - fused_charges = fuse_charge_pair( - self.left_child.charges, self.left_child.flow, self.right_child.charges, - self.right_child.flow) - - return fused_charges - - @property - def name(self): - if self._name: - return self._name - if self.is_leave: - return self.name - return self.left_child.name + ' & ' + self.right_child.name + return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow def fuse_index_pair(left_index: Index, diff --git a/tensornetwork/block_tensor/index_new.py b/tensornetwork/block_tensor/index_new.py deleted file mode 100644 index b5e8ec339..000000000 --- a/tensornetwork/block_tensor/index_new.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2019 The TensorNetwork Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import numpy as np -from tensornetwork.block_tensor.charge import BaseCharge, ChargeCollection -import copy -from typing import List, Union, Any, Optional, Tuple, Text - - -class Index: - """ - An index class to store indices of a symmetric tensor. - An index keeps track of all its childs by storing references - to them (i.e. it is a binary tree). - """ - - def __init__(self, - charges: Union[ChargeCollection, BaseCharge], - flow: int, - name: Optional[Text] = None, - left_child: Optional["Index"] = None, - right_child: Optional["Index"] = None): - if isinstance(charges, BaseCharge): - self._charges = charges #ChargeCollection([charges]) - elif isinstance(charges, ChargeCollection) or (charges is None): - self._charges = charges - else: - raise TypeError("Unknown type {}".format(type(charges))) - self.flow = flow - self.left_child = left_child - self.right_child = right_child - self.name = name - - def __repr__(self): - return str(self.dimension) - - @property - def is_leave(self): - return (self.left_child is None) and (self.right_child is None) - - @property - def dimension(self): - return np.prod([len(i.charges) for i in self.get_elementary_indices()]) - - def _copy_helper(self, index: "Index", copied_index: "Index") -> None: - """ - Helper function for copy - """ - if index.left_child != None: - left_copy = Index( - charges=copy.deepcopy(index.left_child.charges), - flow=copy.deepcopy(index.left_child.flow), - name=copy.deepcopy(index.left_child.name)) - - copied_index.left_child = left_copy - self._copy_helper(index.left_child, left_copy) - if index.right_child != None: - right_copy = Index( - charges=copy.deepcopy(index.right_child.charges), - flow=copy.deepcopy(index.right_child.flow), - name=copy.deepcopy(index.right_child.name)) - copied_index.right_child = right_copy - self._copy_helper(index.right_child, right_copy) - - def copy(self): - """ - Returns: - Index: A deep copy of `Index`. Note that all children of - `Index` are copied as well. - """ - index_copy = Index( - charges=copy.deepcopy(self._charges), - flow=copy.deepcopy(self.flow), - name=self.name) - - self._copy_helper(self, index_copy) - return index_copy - - def _leave_helper(self, index: "Index", leave_list: List) -> None: - if index.left_child: - self._leave_helper(index.left_child, leave_list) - if index.right_child: - self._leave_helper(index.right_child, leave_list) - if (index.left_child is None) and (index.right_child is None): - leave_list.append(index) - - def get_elementary_indices(self) -> List: - """ - Returns: - List: A list containing the elementary indices (the leaves) - of `Index`. - """ - leave_list = [] - self._leave_helper(self, leave_list) - return leave_list - - def __mul__(self, index: "Index") -> "Index": - """ - Merge `index` and self into a single larger index. - The flow of the resulting index is set to 1. - Flows of `self` and `index` are multiplied into - the charges upon fusing.n - """ - return fuse_index_pair(self, index) - - @property - def charges(self): - if self.is_leave: - return self._charges - return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow - - -def fuse_index_pair(left_index: Index, - right_index: Index, - flow: Optional[int] = 1) -> Index: - """ - Fuse two consecutive indices (legs) of a symmetric tensor. - Args: - left_index: A tensor Index. - right_index: A tensor Index. - flow: An optional flow of the resulting `Index` object. - Returns: - Index: The result of fusing `index1` and `index2`. - """ - #Fuse the charges of the two indices - if left_index is right_index: - raise ValueError( - "index1 and index2 are the same object. Can only fuse distinct objects") - - return Index( - charges=None, flow=flow, left_child=left_index, right_child=right_index) - - -def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: - """ - Fuse a list of indices (legs) of a symmetric tensor. - Args: - indices: A list of tensor Index objects - flow: An optional flow of the resulting `Index` object. - Returns: - Index: The result of fusing `indices`. - """ - - index = indices[0] - for n in range(1, len(indices)): - index = fuse_index_pair(index, indices[n], flow=flow) - return index - - -def split_index(index: Index) -> Tuple[Index, Index]: - """ - Split an index (leg) of a symmetric tensor into two legs. - Args: - index: A tensor Index. - Returns: - Tuple[Index, Index]: The result of splitting `index`. - """ - if index.is_leave: - raise ValueError("cannot split an elementary index") - - return index.left_child, index.right_child diff --git a/tensornetwork/block_tensor/index_new_test.py b/tensornetwork/block_tensor/index_new_test.py deleted file mode 100644 index 97ef38a4d..000000000 --- a/tensornetwork/block_tensor/index_new_test.py +++ /dev/null @@ -1,151 +0,0 @@ -import numpy as np -# pylint: disable=line-too-long -from tensornetwork.block_tensor.index_new import Index, fuse_index_pair, split_index, fuse_indices -from tensornetwork.block_tensor.charge import U1Charge, Z2Charge, ChargeCollection - - -def test_index_fusion_mul(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = i1 * i2 - assert i12.left_child is i1 - assert i12.right_child is i2 - for n in range(len(i12.charges.charges)): - assert np.all(i12.charges[n].charges == (q1 + q2).charges) - - -def test_fuse_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = fuse_indices([i1, i2]) - assert i12.left_child is i1 - assert i12.right_child is i2 - for n in range(len(i12.charges.charges)): - assert np.all(i12.charges[n].charges == (q1 + q2).charges) - - -def test_split_index(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = i1 * i2 - i1_, i2_ = split_index(i12) - assert i1 is i1_ - assert i2 is i2_ - np.testing.assert_allclose(q1.charges, i1.charges[0].charges) - np.testing.assert_allclose(q2.charges, i2.charges[0].charges) - np.testing.assert_allclose(q1.charges, i1_.charges[0].charges) - np.testing.assert_allclose(q2.charges, i2_.charges[0].charges) - assert i1_.name == 'index1' - assert i2_.name == 'index2' - assert i1_.flow == i1.flow - assert i2_.flow == i2.flow - - -def test_elementary_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - q3 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - q4 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q3, flow=1, name='index3') - i4 = Index(charges=q4, flow=1, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - elmt12 = i12.get_elementary_indices() - assert elmt12[0] is i1 - assert elmt12[1] is i2 - - i1234 = i12 * i34 - elmt1234 = i1234.get_elementary_indices() - assert elmt1234[0] is i1 - assert elmt1234[1] is i2 - assert elmt1234[2] is i3 - assert elmt1234[3] is i4 - assert elmt1234[0].name == 'index1' - assert elmt1234[1].name == 'index2' - assert elmt1234[2].name == 'index3' - assert elmt1234[3].name == 'index4' - assert elmt1234[0].flow == i1.flow - assert elmt1234[1].flow == i2.flow - assert elmt1234[2].flow == i3.flow - assert elmt1234[3].flow == i4.flow - - np.testing.assert_allclose(q1.charges, i1.charges[0].charges) - np.testing.assert_allclose(q2.charges, i2.charges[0].charges) - np.testing.assert_allclose(q3.charges, i3.charges[0].charges) - np.testing.assert_allclose(q4.charges, i4.charges[0].charges) - - -def test_leave(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - assert i1.is_leave - assert i2.is_leave - - i12 = i1 * i2 - assert not i12.is_leave - - -def test_copy(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q1, flow=-1, name='index3') - i4 = Index(charges=q2, flow=-1, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - i1234 = i12 * i34 - i1234_copy = i1234.copy() - - elmt1234 = i1234_copy.get_elementary_indices() - assert elmt1234[0] is not i1 - assert elmt1234[1] is not i2 - assert elmt1234[2] is not i3 - assert elmt1234[3] is not i4 diff --git a/tensornetwork/block_tensor/index_old.py b/tensornetwork/block_tensor/index_old.py new file mode 100644 index 000000000..378760e1c --- /dev/null +++ b/tensornetwork/block_tensor/index_old.py @@ -0,0 +1,294 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensornetwork.network_components import Node, contract, contract_between +# pylint: disable=line-too-long +from tensornetwork.backends import backend_factory +import copy +from typing import List, Union, Any, Optional, Tuple, Text + + +def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, + q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: + """ + Fuse charges `q1` with charges `q2` by simple addition (valid + for U(1) charges). `q1` and `q2` typically belong to two consecutive + legs of `BlockSparseTensor`. + Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns + `[10, 100, 11, 101, 12, 102]`. + When using row-major ordering of indices in `BlockSparseTensor`, + the position of q1 should be "to the left" of the position of q2. + + Args: + q1: Iterable of integers + flow1: Flow direction of charge `q1`. + q2: Iterable of integers + flow2: Flow direction of charge `q2`. + Returns: + np.ndarray: The result of fusing `q1` with `q2`. + """ + return np.reshape( + flow1 * np.asarray(q1)[:, None] + flow2 * np.asarray(q2)[None, :], + len(q1) * len(q2)) + + +def fuse_charges(charges: List[Union[List, np.ndarray]], + flows: List[int]) -> np.ndarray: + """ + Fuse all `charges` by simple addition (valid + for U(1) charges). Charges are fused from "right to left", + in accordance with row-major order (see `fuse_charges_pair`). + + Args: + chargs: A list of charges to be fused. + flows: A list of flows, one for each element in `charges`. + Returns: + np.ndarray: The result of fusing `charges`. + """ + if len(charges) == 1: + #nothing to do + return flows[0] * charges[0] + fused_charges = charges[0] * flows[0] + for n in range(1, len(charges)): + fused_charges = fuse_charge_pair( + q1=fused_charges, flow1=1, q2=charges[n], flow2=flows[n]) + return fused_charges + + +def fuse_degeneracies(degen1: Union[List, np.ndarray], + degen2: Union[List, np.ndarray]) -> np.ndarray: + """ + Fuse degeneracies `degen1` and `degen2` of two leg-charges + by simple kronecker product. `degen1` and `degen2` typically belong to two + consecutive legs of `BlockSparseTensor`. + Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns + `[10, 100, 20, 200, 30, 300]`. + When using row-major ordering of indices in `BlockSparseTensor`, + the position of `degen1` should be "to the left" of the position of `degen2`. + Args: + degen1: Iterable of integers + degen2: Iterable of integers + Returns: + np.ndarray: The result of fusing `dege1` with `degen2`. + """ + return np.reshape(degen1[:, None] * degen2[None, :], + len(degen1) * len(degen2)) + + +def unfuse(fused_indices: np.ndarray, len_left: int, + len_right: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Given an np.ndarray `fused_indices` of integers denoting + index-positions of elements within a 1d array, `unfuse` + obtains the index-positions of the elements in the left and + right np.ndarrays `left`, `right` which, upon fusion, + are placed at the index-positions given by + `fused_indices` in the fused np.ndarray. + An example will help to illuminate this: + Given np.ndarrays `left`, `right` and the result + of their fusion (`fused`): + + ``` + left = [0,1,0,2] + right = [-1,3,-2] + fused = fuse_charges([left, right], flows=[1,1]) + print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] + ``` + + we want to find which elements in `left` and `right` + fuse to a value of 0. In the above case, there are two + 0 in `fused`: one is obtained from fusing `left[1]` and + `right[0]`, the second one from fusing `left[3]` and `right[2]` + `unfuse` returns the index-positions of these values within + `left` and `right`, that is + + ``` + left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) + print(left_index_values) # [1,3] + print(right_index_values) # [0,2] + ``` + + Args: + fused_indices: A 1d np.ndarray of integers. + len_left: The length of the left np.ndarray. + len_right: The length of the right np.ndarray. + Returns: + (np.ndarry, np.ndarray) + """ + right = np.mod(fused_indices, len_right) + left = np.floor_divide(fused_indices - right, len_right) + return left, right + + +class Index: + """ + An index class to store indices of a symmetric tensor. + An index keeps track of all its childs by storing references + to them (i.e. it is a binary tree). + """ + + def __init__(self, + charges: Union[List, np.ndarray], + flow: int, + name: Optional[Text] = "index", + left_child: Optional["Index"] = None, + right_child: Optional["Index"] = None): + self._charges = np.asarray(charges) + self.flow = flow + self.left_child = left_child + self.right_child = right_child + self._name = name + + def __repr__(self): + return str(self.dimension) + + @property + def is_leave(self): + return (self.left_child is None) and (self.right_child is None) + + @property + def dimension(self): + return np.prod([len(i.charges) for i in self.get_elementary_indices()]) + + def _copy_helper(self, index: "Index", copied_index: "Index") -> None: + """ + Helper function for copy + """ + if index.left_child != None: + left_copy = Index( + charges=copy.copy(index.left_child.charges), + flow=copy.copy(index.left_child.flow), + name=copy.copy(index.left_child.name)) + + copied_index.left_child = left_copy + self._copy_helper(index.left_child, left_copy) + if index.right_child != None: + right_copy = Index( + charges=copy.copy(index.right_child.charges), + flow=copy.copy(index.right_child.flow), + name=copy.copy(index.right_child.name)) + copied_index.right_child = right_copy + self._copy_helper(index.right_child, right_copy) + + def copy(self): + """ + Returns: + Index: A deep copy of `Index`. Note that all children of + `Index` are copied as well. + """ + index_copy = Index( + charges=self._charges.copy(), flow=copy.copy(self.flow), name=self.name) + + self._copy_helper(self, index_copy) + return index_copy + + def _leave_helper(self, index: "Index", leave_list: List) -> None: + if index.left_child: + self._leave_helper(index.left_child, leave_list) + if index.right_child: + self._leave_helper(index.right_child, leave_list) + if (index.left_child is None) and (index.right_child is None): + leave_list.append(index) + + def get_elementary_indices(self) -> List: + """ + Returns: + List: A list containing the elementary indices (the leaves) + of `Index`. + """ + leave_list = [] + self._leave_helper(self, leave_list) + return leave_list + + def __mul__(self, index: "Index") -> "Index": + """ + Merge `index` and self into a single larger index. + The flow of the resulting index is set to 1. + Flows of `self` and `index` are multiplied into + the charges upon fusing.n + """ + return fuse_index_pair(self, index) + + @property + def charges(self): + if self.is_leave: + return self._charges + fused_charges = fuse_charge_pair( + self.left_child.charges, self.left_child.flow, self.right_child.charges, + self.right_child.flow) + + return fused_charges + + @property + def name(self): + if self._name: + return self._name + if self.is_leave: + return self.name + return self.left_child.name + ' & ' + self.right_child.name + + +def fuse_index_pair(left_index: Index, + right_index: Index, + flow: Optional[int] = 1) -> Index: + """ + Fuse two consecutive indices (legs) of a symmetric tensor. + Args: + left_index: A tensor Index. + right_index: A tensor Index. + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `index1` and `index2`. + """ + #Fuse the charges of the two indices + if left_index is right_index: + raise ValueError( + "index1 and index2 are the same object. Can only fuse distinct objects") + + return Index( + charges=None, flow=flow, left_child=left_index, right_child=right_index) + + +def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: + """ + Fuse a list of indices (legs) of a symmetric tensor. + Args: + indices: A list of tensor Index objects + flow: An optional flow of the resulting `Index` object. + Returns: + Index: The result of fusing `indices`. + """ + + index = indices[0] + for n in range(1, len(indices)): + index = fuse_index_pair(index, indices[n], flow=flow) + return index + + +def split_index(index: Index) -> Tuple[Index, Index]: + """ + Split an index (leg) of a symmetric tensor into two legs. + Args: + index: A tensor Index. + Returns: + Tuple[Index, Index]: The result of splitting `index`. + """ + if index.is_leave: + raise ValueError("cannot split an elementary index") + + return index.left_child, index.right_child diff --git a/tensornetwork/block_tensor/index_old_test.py b/tensornetwork/block_tensor/index_old_test.py new file mode 100644 index 000000000..293b37bd8 --- /dev/null +++ b/tensornetwork/block_tensor/index_old_test.py @@ -0,0 +1,171 @@ +import numpy as np +# pylint: disable=line-too-long +from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse + + +def test_index_fusion_mul(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + + +def test_fuse_index_pair(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = fuse_index_pair(i1, i2) + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + + +def test_fuse_indices(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = fuse_indices([i1, i2]) + assert i12.left_child is i1 + assert i12.right_child is i2 + assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) + + +def test_split_index(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 1 + q2 = np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype) #quantum numbers on leg 2 + i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + + i12 = i1 * i2 + i1_, i2_ = split_index(i12) + assert i1 is i1_ + assert i2 is i2_ + np.testing.assert_allclose(q1, i1.charges) + np.testing.assert_allclose(q2, i2.charges) + np.testing.assert_allclose(q1, i1_.charges) + np.testing.assert_allclose(q2, i2_.charges) + assert i1_.name == 'index1' + assert i2_.name == 'index2' + assert i1_.flow == i1.flow + assert i2_.flow == i2.flow + + +def test_elementary_indices(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q3, flow=1, name='index3') + i4 = Index(charges=q4, flow=1, name='index4') + + i12 = i1 * i2 + i34 = i3 * i4 + elmt12 = i12.get_elementary_indices() + assert elmt12[0] is i1 + assert elmt12[1] is i2 + + i1234 = i12 * i34 + elmt1234 = i1234.get_elementary_indices() + assert elmt1234[0] is i1 + assert elmt1234[1] is i2 + assert elmt1234[2] is i3 + assert elmt1234[3] is i4 + assert elmt1234[0].name == 'index1' + assert elmt1234[1].name == 'index2' + assert elmt1234[2].name == 'index3' + assert elmt1234[3].name == 'index4' + assert elmt1234[0].flow == i1.flow + assert elmt1234[1].flow == i2.flow + assert elmt1234[2].flow == i3.flow + assert elmt1234[3].flow == i4.flow + + np.testing.assert_allclose(q1, i1.charges) + np.testing.assert_allclose(q2, i2.charges) + np.testing.assert_allclose(q3, i3.charges) + np.testing.assert_allclose(q4, i4.charges) + + +def test_leave(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + assert i1.is_leave + assert i2.is_leave + + i12 = i1 * i2 + assert not i12.is_leave + + +def test_copy(): + D = 10 + B = 4 + dtype = np.int16 + q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + i1 = Index(charges=q1, flow=1, name='index1') + i2 = Index(charges=q2, flow=1, name='index2') + i3 = Index(charges=q1, flow=-1, name='index3') + i4 = Index(charges=q2, flow=-1, name='index4') + + i12 = i1 * i2 + i34 = i3 * i4 + i1234 = i12 * i34 + i1234_copy = i1234.copy() + + elmt1234 = i1234_copy.get_elementary_indices() + assert elmt1234[0] is not i1 + assert elmt1234[1] is not i2 + assert elmt1234[2] is not i3 + assert elmt1234[3] is not i4 + + +def test_unfuse(): + q1 = np.random.randint(-4, 5, 10).astype(np.int16) + q2 = np.random.randint(-4, 5, 4).astype(np.int16) + q3 = np.random.randint(-4, 5, 4).astype(np.int16) + q12 = fuse_charges([q1, q2], [1, 1]) + q123 = fuse_charges([q12, q3], [1, 1]) + nz = np.nonzero(q123 == 0)[0] + q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) + + q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) + np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], + np.zeros(len(q1_inds), dtype=np.int16)) From 0b7f64b0691195c7bf0ec6247310c314c2983fc7 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 11:35:29 -0500 Subject: [PATCH 150/212] fix tutorial, fix import --- tensornetwork/block_tensor/block_tensor.py | 2 ++ tensornetwork/block_tensor/tutorial.py | 32 +++++++++++----------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 0c60284b5..bace1d72b 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -17,8 +17,10 @@ from __future__ import print_function import numpy as np #from tensornetwork.block_tensor.lookup import lookup +from tensornetwork.backends import backend_factory # pylint: disable=line-too-long from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index +# pylint: disable=line-too-long from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, ChargeCollection import numpy as np import scipy as sp diff --git a/tensornetwork/block_tensor/tutorial.py b/tensornetwork/block_tensor/tutorial.py index 01e5eabf0..81d87bbe3 100644 --- a/tensornetwork/block_tensor/tutorial.py +++ b/tensornetwork/block_tensor/tutorial.py @@ -17,9 +17,9 @@ from __future__ import print_function import tensornetwork as tn import numpy as np -import tensornetwork.block_tensor.block_tensor as BT -import tensornetwork.block_tensor.index as IDX - +from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, reshape +from tensornetwork.block_tensor.index import Index +from tensornetwork.block_tensor.charge import U1Charge B = 4 # possible charges on each leg can be between [0,B) ########################################################## ##### Generate a rank 4 symmetrix tensor ####### @@ -27,31 +27,31 @@ # generate random charges on each leg of the tensor D1, D2, D3, D4 = 4, 6, 8, 10 #bond dimensions on each leg -q1 = np.random.randint(0, B, D1) -q2 = np.random.randint(0, B, D2) -q3 = np.random.randint(0, B, D3) -q4 = np.random.randint(0, B, D4) +q1 = U1Charge(np.random.randint(-B, B + 1, D1)) +q2 = U1Charge(np.random.randint(-B, B + 1, D2)) +q3 = U1Charge(np.random.randint(-B, B + 1, D3)) +q4 = U1Charge(np.random.randint(-B, B + 1, D4)) # generate Index objects for each leg. neccessary for initialization of # BlockSparseTensor -i1 = IDX.Index(charges=q1, flow=1) -i2 = IDX.Index(charges=q2, flow=-1) -i3 = IDX.Index(charges=q3, flow=1) -i4 = IDX.Index(charges=q4, flow=-1) +i1 = Index(charges=q1, flow=1) +i2 = Index(charges=q2, flow=-1) +i3 = Index(charges=q3, flow=1) +i4 = Index(charges=q4, flow=-1) # initialize a random symmetric tensor -A = BT.BlockSparseTensor.randn(indices=[i1, i2, i3, i4], dtype=np.complex128) -B = BT.reshape(A, (4, 48, 10)) #creates a new tensor (copy) -shape_A = A.shape #returns the dense shape of A +A = BlockSparseTensor.randn(indices=[i1, i2, i3, i4], dtype=np.complex128) +B = reshape(A, (4, 48, 10)) #creates a new tensor (copy) +shape_A = A.dense_shape #returns the dense shape of A A.reshape([shape_A[0] * shape_A[1], shape_A[2], shape_A[3]]) #in place reshaping A.reshape(shape_A) #reshape back into original shape -sparse_shape = A.sparse_shape #returns a deep copy of `A.indices`. +sparse_shape = A.shape #returns a deep copy of `A.indices`. new_sparse_shape = [ sparse_shape[0] * sparse_shape[1], sparse_shape[2], sparse_shape[3] ] -B = BT.reshape(A, new_sparse_shape) #return a copy +B = reshape(A, new_sparse_shape) #return a copy A.reshape(new_sparse_shape) #in place reshaping A.reshape(sparse_shape) #bring A back into original shape From abb9cef71931bf1ee46f0102677730f1a119f0be Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 10 Jan 2020 14:28:33 -0500 Subject: [PATCH 151/212] faster find_dense_positions --- tensornetwork/block_tensor/block_tensor.py | 159 ++++++++++++++++++--- 1 file changed, 136 insertions(+), 23 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index bace1d72b..371527a79 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -386,34 +386,134 @@ def find_dense_positions( np.ndarray: The indices of the elements fusing to `target_charge`. """ _check_flows([left_flow, right_flow]) + + t1 = time.time() unique_left, left_degeneracies = left_charges.unique(return_counts=True) unique_right, right_degeneracies = right_charges.unique(return_counts=True) - tmp_charges = (target_charge + (unique_right * right_flow * (-1))) * left_flow - concatenated = unique_left.concatenate(tmp_charges) + t1 = time.time() + tmp_left_charges = (target_charge + + (unique_right * right_flow * (-1))) * left_flow + concatenated = unique_left.concatenate(tmp_left_charges) tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[ + relevant_left_charges = tmp_unique[ counts == 2] #common_charges is a BaseCharge or ChargeCollection - right_locations = {} - for n in range(len(common_charges)): - c = common_charges[n] - - right_charge = (target_charge + (c * left_flow * (-1))) * right_flow - right_locations[right_charge.get_item(0)] = np.nonzero( - right_charges == right_charge)[0] + right_locations = {} + t1 = time.time() len_right_charges = len(right_charges) - indices = [] - for n in range(len(left_charges)): - c = left_charges[n] - right_charge = (target_charge + (c * left_flow * (-1))) * right_flow + dense_inds = [] + left_inds = [] + index_table = [] + start = 0 + for n in range(len(relevant_left_charges)): + c = relevant_left_charges[n] + left_ind = np.nonzero(left_charges == c)[0] + index_table.append( + np.stack([ + left_ind, + np.arange(len(left_ind)), + np.full(len(left_ind), n, dtype=np.int64) + ], + axis=1)) + start += len(left_ind) + left_inds.append(left_ind) + + dim_array = np.expand_dims(len_right_charges * left_ind, 1) - if c not in common_charges: - continue - indices.append(n * len_right_charges + - right_locations[right_charge.get_item(0)]) + right_charge = (target_charge + (c * left_flow * (-1))) * right_flow - return np.concatenate(indices) + right_inds = np.nonzero(right_charges == right_charge)[0] + mat = np.tile(right_inds, (len(dim_array), 1)) + + dense_inds.append(mat + dim_array) + + it = np.concatenate(index_table) + + ind_sort = np.argsort(it[:, 0]) + table = it[ind_sort, :] + + return np.concatenate( + [dense_inds[table[n, 2]][table[n, 1], :] for n in range(table.shape[0])]) + + +# def find_dense_positions( +# left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, +# right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, +# target_charge: Union[BaseCharge, ChargeCollection]) -> np.ndarray: +# """ +# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) +# in the vector `fused_charges` (resulting from fusing np.ndarrays +# `left_charges` and `right_charges`) that have a value of `target_charge`. +# For example, given +# ``` +# left_charges = [-2,0,1,0,0] +# right_charges = [-1,0,2,1] +# target_charge = 0 +# fused_charges = fuse_charges([left_charges, right_charges],[1,1]) +# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] +# ``` +# we want to find the all different blocks +# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, +# together with their corresponding index-values of the data in the dense array. +# `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` +# to an array of integers. +# For the above example, we get: +# * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` +# was obtained from fusing -2 and 2. +# * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, +# `fused_charges[5,13,17]` were obtained from fusing 0 and 0. +# * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` +# was obtained from fusing 1 and -1. +# Args: +# left_charges: An np.ndarray of integer charges. +# left_flow: The flow direction of the left charges. +# right_charges: An np.ndarray of integer charges. +# right_flow: The flow direction of the right charges. +# target_charge: The target charge. +# Returns: +# np.ndarray: The indices of the elements fusing to `target_charge`. +# """ +# _check_flows([left_flow, right_flow]) + +# t1 = time.time() +# unique_left, left_degeneracies = left_charges.unique(return_counts=True) +# unique_right, right_degeneracies = right_charges.unique(return_counts=True) +# print('finding unique values: {}s'.format(time.time() - t1)) + +# t1 = time.time() +# tmp_charges = (target_charge + (unique_right * right_flow * (-1))) * left_flow +# concatenated = unique_left.concatenate(tmp_charges) +# tmp_unique, counts = concatenated.unique(return_counts=True) +# common_charges = tmp_unique[ +# counts == 2] #common_charges is a BaseCharge or ChargeCollection +# print('finding common charges: {}s'.format(time.time() - t1)) + +# right_locations = {} +# t1 = time.time() +# for n in range(len(common_charges)): +# c = common_charges[n] + +# right_charge = (target_charge + (c * left_flow * (-1))) * right_flow +# right_locations[right_charge.get_item(0)] = np.nonzero( +# right_charges == right_charge)[0] +# print('finding right locations: {}s'.format(time.time() - t1)) +# len_right_charges = len(right_charges) +# indices = [] +# t1 = time.time() +# print(len(left_charges)) +# for n in range(len(left_charges)): +# c = left_charges[n] +# right_charge = (target_charge + (c * left_flow * (-1))) * right_flow +# #print(' fusing charges: {}s'.format(time.time() - t1)) +# if c not in common_charges: +# continue +# #t1 = time.time() +# indices.append(n * len_right_charges + +# right_locations[right_charge.get_item(0)]) +# #print(' appending indices: {}s'.format(time.time() - t1)) +# print('finding all indices: {}s'.format(time.time() - t1)) +# return np.concatenate(indices) def find_sparse_positions( @@ -707,7 +807,7 @@ def random(cls, indices: List[Index], flows = [i.flow for i in indices] num_non_zero_elements = compute_num_nonzero(charges, flows) - dtype = dtype if dtype is not None else self.np.float64 + dtype = dtype if dtype is not None else np.float64 def init_random(): if ((np.dtype(dtype) is np.dtype(np.complex128)) or @@ -815,12 +915,15 @@ def transpose( #find the index-positions of the elements in the fusion #of `left_charges` and `right_charges` that have `0` #total charge (those are the only non-zero elements). + t1 = time.time() linear_positions = find_dense_positions( left_charges, 1, right_charges, 1, target_charge=flat_charges[0].zero_charge) + print('finding dense positions in the original tensor: {}s'.format( + time.time() - t1)) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] flat_tr_strides = [flat_strides[n] for n in flat_order] @@ -828,8 +931,12 @@ def transpose( tr_left_charges, tr_right_charges, partition = _find_best_partition( flat_tr_charges, flat_tr_flows) + t1 = time.time() tr_linear_positions = find_dense_positions( tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) + print('finding dense positions in the transposed tensor: {}s'.format( + time.time() - t1)) + stride_arrays = [ np.arange(flat_tr_dims[n]) * flat_tr_strides[n] for n in range(len(flat_tr_dims)) @@ -838,8 +945,10 @@ def transpose( dense_permutation = _find_values_in_fused( tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), fuse_ndarrays(stride_arrays[partition::])) - assert np.all(np.sort(dense_permutation) == linear_positions) + t1 = time.time() permutation = np.searchsorted(linear_positions, dense_permutation) + print( + 'finding the permutation with argsort: {}s'.format(time.time() - t1)) self.indices = [self.indices[n] for n in order] self.data = self.data[permutation] @@ -1123,7 +1232,7 @@ def tensordot(tensor1: BlockSparseTensor, free_axes2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) new_order1 = free_axes1 + list(axes1) new_order2 = list(axes2) + free_axes2 - + t1 = time.time() tr1 = transpose( tensor=tensor1, order=new_order1, @@ -1132,13 +1241,14 @@ def tensordot(tensor1: BlockSparseTensor, if return_permutation: permutation1 = tr1[1] tr1 = tr1[1] + print('transposing tensor1: {}s'.format(time.time() - t1)) trshape1 = tr1.dense_shape Dl1 = np.prod([trshape1[n] for n in range(len(free_axes1))]) Dr1 = np.prod([trshape1[n] for n in range(len(free_axes1), len(trshape1))]) tmp1 = reshape(tr1, (Dl1, Dr1)) - + t1 = time.time() tr2 = transpose( tensor=tensor2, order=new_order2, @@ -1147,6 +1257,7 @@ def tensordot(tensor1: BlockSparseTensor, if return_permutation: permutation2 = tr2[1] tr2 = tr2[1] + print('transposing tensor2: {}s'.format(time.time() - t1)) trshape2 = tr2.dense_shape Dl2 = np.prod([trshape2[n] for n in range(len(axes2))]) Dr2 = np.prod([trshape2[n] for n in range(len(axes2), len(trshape2))]) @@ -1154,11 +1265,13 @@ def tensordot(tensor1: BlockSparseTensor, tmp2 = reshape(tr2, (Dl2, Dr2)) #avoid data-copying here by setting `return_data=False` + t1 = time.time() column_charges1, data1, start_positions, row_locations, _ = tmp1._get_diagonal_blocks( return_data=False) row_charges2, data2, _, _, column_degeneracies = tmp2._get_diagonal_blocks( return_data=False) + print('finding diagonal blocks: {}s'.format(time.time() - t1)) #get common charges between rows and columns tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( return_counts=True) From a8de6ac1ba42af75b270c110795c51092af74343 Mon Sep 17 00:00:00 2001 From: Ori Alberton Date: Sun, 12 Jan 2020 02:42:57 +0100 Subject: [PATCH 152/212] compute reduced svd in `backends.numpy.decompositions.svd_decompostion` (#420) * compute reduced svd when calling np.linalg.svd from numpy backend * test SVD when max_singular_values>bond_dimension (numpy backend) --- tensornetwork/backends/numpy/decompositions.py | 2 +- tensornetwork/backends/numpy/decompositions_test.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tensornetwork/backends/numpy/decompositions.py b/tensornetwork/backends/numpy/decompositions.py index 862689f61..0e72f2ac5 100644 --- a/tensornetwork/backends/numpy/decompositions.py +++ b/tensornetwork/backends/numpy/decompositions.py @@ -33,7 +33,7 @@ def svd_decomposition( right_dims = tensor.shape[split_axis:] tensor = np.reshape(tensor, [numpy.prod(left_dims), numpy.prod(right_dims)]) - u, s, vh = np.linalg.svd(tensor) + u, s, vh = np.linalg.svd(tensor, full_matrices=False) if max_singular_values is None: max_singular_values = np.size(s) diff --git a/tensornetwork/backends/numpy/decompositions_test.py b/tensornetwork/backends/numpy/decompositions_test.py index c16517391..cea0b712e 100644 --- a/tensornetwork/backends/numpy/decompositions_test.py +++ b/tensornetwork/backends/numpy/decompositions_test.py @@ -63,6 +63,18 @@ def test_max_singular_values(self): self.assertEqual(vh.shape, (7, 10)) self.assertAllClose(trun, np.arange(2, -1, -1)) + def test_max_singular_values_larger_than_bond_dimension(self): + random_matrix = np.random.rand(10, 6) + unitary1, _, unitary2 = np.linalg.svd(random_matrix, full_matrices=False) + singular_values = np.array(range(6)) + val = unitary1.dot(np.diag(singular_values).dot(unitary2.T)) + u, s, vh, _ = decompositions.svd_decomposition( + np, val, 1, max_singular_values=30) + self.assertEqual(u.shape, (10, 6)) + self.assertEqual(s.shape, (6,)) + self.assertEqual(vh.shape, (6, 6)) + + def test_max_truncation_error(self): random_matrix = np.random.rand(10, 10) unitary1, _, unitary2 = np.linalg.svd(random_matrix) From fd80aac4e14467aaf6db86ebab58f316343a7ff3 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 11 Jan 2020 21:34:10 -0500 Subject: [PATCH 153/212] added intersect to BaseCharge --- tensornetwork/block_tensor/charge.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 690bc7441..3f9f81b1d 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -279,7 +279,7 @@ def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: if not np.all(self.shifts == targets.shifts): raise ValueError( "Cannot compare charges with different shifts {} and {}".format( - self.shifts, tpargets.shifts)) + self.shifts, targets.shifts)) targets = targets.charges targets = np.asarray(targets) @@ -377,6 +377,18 @@ def zero_charge(self): def __iter__(self): return iter(self.charges) + def intersect(self, other: "BaseCharge") -> "BaseCharge": + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot intersect charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + + obj = self.__new__(type(self)) + obj.__init__( + charges=[np.intersect1d(self.charges, other.charges)], + shifts=self.shifts) + return obj + class U1Charge(BaseCharge): """ @@ -589,7 +601,7 @@ def __mul__(self, number: Union[bool, int]) -> "Z2Charge": "can only multiply by `True`, `False`, `1` or `0`, found {}".format( number)) #Z2 is self-dual - return U1Charge(charges=[self.charges], shifts=self.shifts) + return Z2Charge(charges=[self.charges], shifts=self.shifts) def __rmul__(self, number: Union[bool, int]) -> "Z2Charge": if number not in (True, False, 0, 1, -1): From a0eff129dc2d3fcf0398ad15641dcebf51bdebf1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 13 Jan 2020 08:47:18 -0500 Subject: [PATCH 154/212] broken commmit (Apple sucks big time) --- tensornetwork/block_tensor/block_tensor.py | 449 ++++++++++++++---- .../block_tensor/block_tensor_test.py | 23 + tensornetwork/block_tensor/charge.py | 7 + 3 files changed, 390 insertions(+), 89 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 371527a79..fa2e576b5 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -175,6 +175,49 @@ def compute_fused_charge_degeneracies( return accumulated_charges, accumulated_degeneracies +def compute_unique_fused_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]] +) -> Tuple[Union[BaseCharge, ChargeCollection], np.ndarray]: + """ + For a list of charges, compute all possible fused charges resulting + from fusing `charges`. + Args: + charges: List of np.ndarray of int, one for each leg of the + underlying tensor. Each np.ndarray `charges[leg]` + is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + flows: A list of integers, one for each leg, + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + Returns: + Union[BaseCharge, ChargeCollection]: The unique fused charges. + np.ndarray of integers: The degeneracies of each unqiue fused charge. + """ + if len(charges) == 1: + return (charges[0] * flows[0]).unique() + + # get unique charges and their degeneracies on the first leg. + # We are fusing from "left" to "right". + accumulated_charges = (charges[0] * flows[0]).unique() + for n in range(1, len(charges)): + #list of unique charges and list of their degeneracies + #on the next unfused leg of the tensor + leg_charges = charges[n].unique() + #fuse the unique charges + #Note: entries in `fused_charges` are not unique anymore. + #flow1 = 1 because the flow of leg 0 has already been + #mulitplied above + fused_charges = accumulated_charges + leg_charges * flows[n] + #compute the degeneracies of `fused_charges` charges + #`fused_degeneracies` is a list of degeneracies such that + # `fused_degeneracies[n]` is the degeneracy of of + # charge `c = fused_charges[n]`. + accumulated_charges = fused_charges.unique() + return accumulated_charges + + def compute_num_nonzero(charges: List[np.ndarray], flows: List[Union[bool, int]]) -> int: """ @@ -216,8 +259,8 @@ def _find_diagonal_sparse_blocks( all diagonal blocks and return them in a dict. `row_charges` and `column_charges` are lists of np.ndarray. The tensor is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. Note that `column_charges` - are never explicitly fused (`row_charges` are). + columns given by fusing `column_charges`. + Args: data: An np.ndarray of the data. The number of elements in `data` has to match the number of non-zero elements defined by `charges` @@ -245,9 +288,14 @@ def _find_diagonal_sparse_blocks( quantum numbers `(q,q). `shape` is the shape of the corresponding array. Returns: + return common_charges, blocks, start_positions, row_locations, column_degeneracies List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. List[np.ndarray]: A list containing the blocks. - + np.ndarray: The start position within the sparse data array of each row with non-zero + elements. + Dict: Dict mapping row-charges of each block to an np.ndarray of sparse positions + along the rows + Dict: Dict mapping row-charges of each block to its column-degeneracy """ flows = row_flows.copy() flows.extend(column_flows) @@ -263,6 +311,10 @@ def _find_diagonal_sparse_blocks( #`compute_fused_charge_degeneracies` multiplies flows into the column_charges unique_column_charges, column_dims = compute_fused_charge_degeneracies( column_charges, column_flows) + unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) + #get the charges common to rows and columns (only those matter) + common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + #convenience container for storing the degeneracies of each #column charge #column_degeneracies = dict(zip(unique_column_charges, column_dims)) @@ -270,16 +322,12 @@ def _find_diagonal_sparse_blocks( if len(row_charges) > 1: left_row_charges, right_row_charges, _ = _find_best_partition( row_charges, row_flows) - unique_left = left_row_charges.unique() - unique_right = right_row_charges.unique() - unique_row_charges = (unique_left + unique_right).unique() + # unique_left = left_row_charges.unique() + # unique_right = right_row_charges.unique() + # unique_row_charges = (unique_left + unique_right).unique() #get the charges common to rows and columns (only those matter) - concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) - tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[ - counts == 2] #common_charges is a BaseCharge or ChargeCollection - + #common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) row_locations = find_sparse_positions( left_charges=left_row_charges, left_flow=1, @@ -291,14 +339,10 @@ def _find_diagonal_sparse_blocks( fused_row_charges = fuse_charges(row_charges, row_flows) #get the unique row-charges - unique_row_charges, row_dims = fused_row_charges.unique(return_counts=True) + #unique_row_charges = fused_row_charges.unique() #get the charges common to rows and columns (only those matter) #get the charges common to rows and columns (only those matter) - concatenated = unique_row_charges.concatenate(unique_column_charges * (-1)) - tmp_unique, counts = concatenated.unique(return_counts=True) - common_charges = tmp_unique[ - counts == 2] #common_charges is a BaseCharge or ChargeCollection - + #common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( common_charges)] row_locations = {} @@ -312,7 +356,7 @@ def _find_diagonal_sparse_blocks( np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} + for c in common_charges: degeneracy_vector[row_locations[c]] = column_degeneracies[c] @@ -348,93 +392,227 @@ def _find_diagonal_sparse_blocks( return common_charges, blocks, start_positions, row_locations, column_degeneracies -def find_dense_positions( - left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, - right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, - target_charge: Union[BaseCharge, ChargeCollection]) -> np.ndarray: +def _find_diagonal_dense_blocks( + row_charges: List[Union[BaseCharge, ChargeCollection]], + column_charges: List[Union[BaseCharge, ChargeCollection]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + strides: Optional[np.ndarray] = None, +) -> Tuple[Union[BaseCharge, ChargeCollection], List, np.ndarray, Dict, Dict]: + """ + Given the meta data and underlying data of a symmetric matrix, compute the + dense positions of all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. + + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + strides: An optional np.ndarray denoting the strides of the tensors. + If None, natural strides ordering is assumed. + + Returns: + List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. + List[np.ndarray]: A list containing the blocks. + + """ + flows = row_flows.copy() + flows.extend(column_flows) + _check_flows(flows) + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) + if strides is None: + strides = [len(c) for c in row_charges] + [len(c) for c in column_charges] + if len(strides) != len(row_charges) + len(column_charges): + raise ValueError("len(strides) = {} does not match tensor rank = {}".format( + len(strides), + len(row_charges) + len(column_charges))) + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + #`compute_fused_charge_degeneracies` multiplies flows into the column_charges + unique_column_charges = compute_unique_fused_charges(column_charges, + column_flows) + unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) + #get the charges common to rows and columns (only those matter) + common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + + if len(row_charges) > 1: + left_row_charges, right_row_charges, _ = _find_best_partition( + row_charges, row_flows) + + row_locations = { + common_charges.get_item(n): find_dense_positions( + left_charges=[left_row_charges], + left_flow=[1], + right_charges=[right_row_charges], + right_flow=[1], + target_charge=common_charges[n]) + for n in range(len(common_charges)) + } + + elif len(row_charges) == 1: + fused_row_charges = fuse_charges(row_charges, row_flows) + row_locations = { + c: np.nonzero(fused_row_charges == c)[0] for c in common_charges + } + else: + raise ValueError('Found an empty sequence for `row_charges`') + + if len(column_charges) > 1: + left_column_charges, right_column_charges, _ = _find_best_partition( + column_charges, column_flows) + column_locations = { + common_charges.get_item(n): find_dense_positions( + left_charges=[left_column_charges], + left_flow=[1], + right_charges=[right_column_charges], + right_flow=[1], + target_charge=common_charges[n] * (-1)) + for n in range(len(common_charges)) + } + + elif len(column_charges) == 1: + fused_column_charges = fuse_charges(column_charges, column_flows) + column_locations = { + common_charges.get_item(n): + np.nonzero(fused_column_charges == common_charges[n] * (-1))[0] + for n in range(len(common_charges)) + } + else: + raise ValueError('Found an empty sequence for `column_charges`') + + column_dim = np.prod(strides[len(row_charges):]) + blocks = [] + + for c in common_charges: + #numpy broadcasting is substantially faster than kron! + rlocs = np.expand_dims(column_dim * row_locations[c], 1) + clocs = np.expand_dims(column_locations[c], 0) + inds = np.reshape(rlocs + clocs, rlocs.shape[0] * clocs.shape[1]) + blocks.append([inds, (rlocs.shape[0], clocs.shape[1])]) + return common_charges, blocks + + +def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charge: Union[BaseCharge, ChargeCollection], + order: Optional[np.ndarray] = None) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`. + in the vector of `fused_charges` resulting from fusing all elements of `charges` + that have a value of `target_charge`. For example, given ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] + charges = [[-2,0,1,0,0],[-1,0,2,1]] target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + fused_charges = fuse_charges(charges,[1,1]) print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] ``` we want to find the all different blocks that fuse to `target_charge=0`, i.e. where `fused_charges==0`, together with their corresponding index-values of the data in the dense array. - `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` - to an array of integers. + `find_dense_blocks` returns an np.ndarray containing the indices-positions of + these elements. For the above example, we get: - * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` + * for `charge[0]` = -2 and `charge[1]` = 2 we get an array [2]. Thus, `fused_charges[2]` was obtained from fusing -2 and 2. - * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, + * for `charge[0]` = 0 and `charge[1]` = 0 we get an array [5, 13, 17]. Thus, `fused_charges[5,13,17]` were obtained from fusing 0 and 0. - * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` + * for `charge[0]` = 1 and `charge[1]` = -1 we get an array [8]. Thus, `fused_charges[8]` was obtained from fusing 1 and -1. Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. + charges: A list of BaseCharge or ChargeCollection. + flows: The flow directions of the `charges`. target_charge: The target charge. Returns: np.ndarray: The indices of the elements fusing to `target_charge`. """ - _check_flows([left_flow, right_flow]) + if order is not None: + if len(order) != len(charges): + raise ValueError("len(order) ={} != len(charges) = {}".format( + len(order), len(charges))) + + if not np.all(np.sort(order) == np.arange(len(order))): + raise ValueError( + "order = {} is not a valid permutation of {}".format(order), + np.arange(len(order))) + + _check_flows(flows) + if order is not None: + left_charges, right_charges, partition = _find_best_partition( + [charges[n] for n in order], [flows[n] for n in order]) + + dims = [len(c) for c in charges] + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + stride_arrays = [np.arange(dims[n]) * strides[n] for n in order] + permuted_row_inds = fuse_ndarrays(stride_arrays[0:partition]) + permuted_column_inds = fuse_ndarrays(stride_arrays[partition:]) + #return fuse_ndarrays([permuted_row_inds, permuted_column_inds]) + else: + left_charges, right_charges, partition = _find_best_partition( + charges, flows) - t1 = time.time() unique_left, left_degeneracies = left_charges.unique(return_counts=True) unique_right, right_degeneracies = right_charges.unique(return_counts=True) - t1 = time.time() - tmp_left_charges = (target_charge + - (unique_right * right_flow * (-1))) * left_flow - concatenated = unique_left.concatenate(tmp_left_charges) - tmp_unique, counts = concatenated.unique(return_counts=True) - relevant_left_charges = tmp_unique[ - counts == 2] #common_charges is a BaseCharge or ChargeCollection - + tmp_left_charges = (target_charge + (unique_right * (-1))) + relevant_left_charges = unique_left.intersect(tmp_left_charges) right_locations = {} - t1 = time.time() len_right_charges = len(right_charges) dense_inds = [] left_inds = [] index_table = [] - start = 0 + for n in range(len(relevant_left_charges)): c = relevant_left_charges[n] left_ind = np.nonzero(left_charges == c)[0] index_table.append( np.stack([ - left_ind, np.arange(len(left_ind)), np.full(len(left_ind), n, dtype=np.int64) ], axis=1)) - start += len(left_ind) left_inds.append(left_ind) + right_charge = (target_charge + (c * (-1))) + if order is None: + dim_array = np.expand_dims(len_right_charges * left_ind, 1) + right_inds = np.nonzero(right_charges == right_charge)[0] + mat = np.tile(right_inds, (len(dim_array), 1)) - dim_array = np.expand_dims(len_right_charges * left_ind, 1) - - right_charge = (target_charge + (c * left_flow * (-1))) * right_flow - - right_inds = np.nonzero(right_charges == right_charge)[0] - mat = np.tile(right_inds, (len(dim_array), 1)) - + else: + dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) + right_inds = permuted_column_inds[np.nonzero( + right_charges == right_charge)[0]] + mat = np.tile(right_inds, (len(dim_array), 1)) dense_inds.append(mat + dim_array) - it = np.concatenate(index_table) - - ind_sort = np.argsort(it[:, 0]) - table = it[ind_sort, :] - - return np.concatenate( - [dense_inds[table[n, 2]][table[n, 1], :] for n in range(table.shape[0])]) + if len(index_table) > 0: + it = np.concatenate(index_table) + ind_sort = np.argsort(np.concatenate(left_inds)) + table = it[ind_sort, :] + return np.concatenate([ + dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) + ]) + return np.array([]) # def find_dense_positions( @@ -669,8 +847,8 @@ def compute_dense_to_sparse_mapping( dims = np.asarray([len(c) for c in charges]) #note: left_charges and right_charges have been fused from RIGHT to LEFT left_charges, right_charges, partition = _find_best_partition(charges, flows) - nz_indices = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=target_charge) + nz_indices = find_dense_positions([left_charges], [1], [right_charges], [1], + target_charge=target_charge) if len(nz_indices) == 0: raise ValueError( @@ -912,18 +1090,7 @@ def transpose( #find the best partition into left and right charges left_charges, right_charges, _ = _find_best_partition( flat_charges, flat_flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - t1 = time.time() - linear_positions = find_dense_positions( - left_charges, - 1, - right_charges, - 1, - target_charge=flat_charges[0].zero_charge) - print('finding dense positions in the original tensor: {}s'.format( - time.time() - t1)) + flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] flat_tr_strides = [flat_strides[n] for n in flat_order] @@ -932,8 +1099,9 @@ def transpose( tr_left_charges, tr_right_charges, partition = _find_best_partition( flat_tr_charges, flat_tr_flows) t1 = time.time() - tr_linear_positions = find_dense_positions( - tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) + tr_linear_positions = find_dense_positions([tr_left_charges], [1], + [tr_right_charges], [1], + tr_left_charges.zero_charge) print('finding dense positions in the transposed tensor: {}s'.format( time.time() - t1)) @@ -946,6 +1114,7 @@ def transpose( tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), fuse_ndarrays(stride_arrays[partition::])) t1 = time.time() + print(len(linear_positions), len(dense_permutation)) permutation = np.searchsorted(linear_positions, dense_permutation) print( 'finding the permutation with argsort: {}s'.format(time.time() - t1)) @@ -955,6 +1124,105 @@ def transpose( if return_permutation: return permutation + # def transpose( + # self, + # order: Union[List[int], np.ndarray], + # permutation: Optional[np.ndarray] = None, + # return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + # """ + # Transpose the tensor into the new order `order`. This routine currently shuffles + # data. + # Args: + # order: The new order of indices. + # permutation: An np.ndarray of int for reshuffling the data, + # typically the output of a prior call to `transpose`. Passing `permutation` + # can greatly speed up the transposition. + # return_permutation: If `True`, return the the permutation data. + # Returns: + # BlockSparseTensor: The transposed tensor. + # """ + # if (permutation is not None) and (len(permutation) != len(self.data)): + # raise ValueError("len(permutation) != len(tensor.data).") + + # if len(order) != self.rank: + # raise ValueError( + # "`len(order)={}` is different form `self.rank={}`".format( + # len(order), self.rank)) + + # #check for trivial permutation + # if np.all(order == np.arange(len(order))): + # if return_permutation: + # return np.arange(len(self.data)) + # return + + # #we use elementary indices here because it is + # #more efficient to get the fused charges using + # #the best partition + # if permutation is None: + # elementary_indices = {} + # flat_elementary_indices = [] + # for n in range(len(self.indices)): + # elementary_indices[n] = self.indices[n].get_elementary_indices() + # flat_elementary_indices.extend(elementary_indices[n]) + # flat_index_list = np.arange(len(flat_elementary_indices)) + # cum_num_legs = np.append( + # 0, + # np.cumsum( + # [len(elementary_indices[n]) for n in range(len(self.indices))])) + + # flat_charges = [i.charges for i in flat_elementary_indices] + # flat_flows = [i.flow for i in flat_elementary_indices] + # flat_dims = [len(c) for c in flat_charges] + # flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + # flat_order = np.concatenate( + # [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + # #find the best partition into left and right charges + # left_charges, right_charges, _ = _find_best_partition( + # flat_charges, flat_flows) + # #find the index-positions of the elements in the fusion + # #of `left_charges` and `right_charges` that have `0` + # #total charge (those are the only non-zero elements). + # t1 = time.time() + # linear_positions = find_dense_positions( + # left_charges, + # 1, + # right_charges, + # 1, + # target_charge=flat_charges[0].zero_charge) + # print('finding dense positions in the original tensor: {}s'.format( + # time.time() - t1)) + # flat_tr_charges = [flat_charges[n] for n in flat_order] + # flat_tr_flows = [flat_flows[n] for n in flat_order] + # flat_tr_strides = [flat_strides[n] for n in flat_order] + # flat_tr_dims = [flat_dims[n] for n in flat_order] + + # tr_left_charges, tr_right_charges, partition = _find_best_partition( + # flat_tr_charges, flat_tr_flows) + # t1 = time.time() + # tr_linear_positions = find_dense_positions( + # tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) + # print('finding dense positions in the transposed tensor: {}s'.format( + # time.time() - t1)) + + # stride_arrays = [ + # np.arange(flat_tr_dims[n]) * flat_tr_strides[n] + # for n in range(len(flat_tr_dims)) + # ] + + # dense_permutation = _find_values_in_fused( + # tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), + # fuse_ndarrays(stride_arrays[partition::])) + # t1 = time.time() + # print(len(linear_positions), len(dense_permutation)) + # permutation = np.searchsorted(linear_positions, dense_permutation) + # print( + # 'finding the permutation with argsort: {}s'.format(time.time() - t1)) + + # self.indices = [self.indices[n] for n in order] + # self.data = self.data[permutation] + # if return_permutation: + # return permutation + def reset_shape(self) -> None: """ Bring the tensor back into its elementary shape. @@ -1056,7 +1324,7 @@ def raise_error(): i2, i1 = self.indices.pop(), self.indices.pop() self.indices.append(fuse_index_pair(i1, i2)) - def _get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: + def _get_diagonal_blocks(self, return_data: Optional[bool] = False) -> Dict: """ Obtain the diagonal blocks of a symmetric matrix. BlockSparseTensor has to be a matrix. @@ -1240,7 +1508,7 @@ def tensordot(tensor1: BlockSparseTensor, return_permutation=return_permutation) if return_permutation: permutation1 = tr1[1] - tr1 = tr1[1] + tr1 = tr1[0] print('transposing tensor1: {}s'.format(time.time() - t1)) trshape1 = tr1.dense_shape @@ -1256,7 +1524,7 @@ def tensordot(tensor1: BlockSparseTensor, return_permutation=return_permutation) if return_permutation: permutation2 = tr2[1] - tr2 = tr2[1] + tr2 = tr2[0] print('transposing tensor2: {}s'.format(time.time() - t1)) trshape2 = tr2.dense_shape Dl2 = np.prod([trshape2[n] for n in range(len(axes2))]) @@ -1273,10 +1541,10 @@ def tensordot(tensor1: BlockSparseTensor, print('finding diagonal blocks: {}s'.format(time.time() - t1)) #get common charges between rows and columns - tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( - return_counts=True) - common_charges = tmp_charges[cnts == 2] - + # tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( + # return_counts=True) + # common_charges = tmp_charges[cnts == 2] + common_charges = column_charges1.intersect(row_charges2) #get the flattened indices for the output tensor indices = [] indices.extend(tmp1.indices[0].get_elementary_indices()) @@ -1293,7 +1561,7 @@ def tensordot(tensor1: BlockSparseTensor, [i.flow for i in indices]) data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) - + ts = [] for c in common_charges: rlocs = row_locations[c] cdegs = column_degeneracies[c] @@ -1302,18 +1570,21 @@ def tensordot(tensor1: BlockSparseTensor, new_locations = np.reshape(a + b, len(rlocs) * cdegs) i1 = np.nonzero(column_charges1 == c)[0][0] i2 = np.nonzero(row_charges2 == c)[0][0] + try: #place the result of the block-matrix multiplication #into the new data-vector + t1 = time.time() data[new_locations] = np.matmul( np.reshape(tensor1.data[data1[i1][0]], data1[i1][1]), np.reshape(tensor2.data[data2[i2][0]], data2[i2][1])).flat + ts.append(time.time() - t1) except ValueError: raise ValueError("for quantum number {}, shapes {} and {} " "of left and right blocks have " "incompatible shapes".format(c, data1[i1].shape, data2[i2].shape)) - + print('totalnumpy', np.sum(ts)) out = BlockSparseTensor(data=data, indices=indices) resulting_shape = [trshape1[n] for n in range(len(free_axes1)) ] + [trshape2[n] for n in range(len(axes2), len(trshape2))] diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 805ed2256..338eeab8a 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -189,6 +189,29 @@ def test_block_sparse_init(dtype): assert len(A.data) == num_elements +@pytest.mark.parametrize("dtype", np_dtypes) +def test_get_diagonal_blocks(dtype): + D = 10 #bond dimension + B = 10 #number of blocks + rank = 4 + flows = np.asarray([1 for _ in range(rank)]) + flows[-2::] = -1 + charges = [ + U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)) + for _ in range(rank) + ] + indices = [ + Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) + for n in range(rank) + ] + num_elements = compute_num_nonzero([i.charges for i in indices], + [i.flow for i in indices]) + A = BlockSparseTensor.random(indices=indices, dtype=dtype) + A.reshape((100, 100)) + _, blocks, _, _, _ = A._get_diagonal_blocks(return_data=False) + assert num_elements == np.sum([len(v[0]) for v in blocks]) + + def test_find_dense_positions(): left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 3f9f81b1d..1b542b813 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -950,6 +950,13 @@ def zero_charge(self): obj.__init__(charges=[c.zero_charge for c in self.charges]) return obj + def intersect(self, other: "ChargeCollection") -> "ChargeCollection": + self_unique = self.unique() + other_unique = other.unique() + concatenated = self_unique.concatenate(other_unique) + tmp_unique, counts = concatenated.unique(return_counts=True) + return tmp_unique[counts == 2] + def fuse_charges( charges: List[Union[BaseCharge, ChargeCollection]], From ca67919c662e46637951487f8effcac0706e5a43 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 13 Jan 2020 15:53:39 -0500 Subject: [PATCH 155/212] broken commit --- tensornetwork/block_tensor/block_tensor.py | 398 ++++++++++++------ .../block_tensor/block_tensor_test.py | 180 +++++--- 2 files changed, 386 insertions(+), 192 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index fa2e576b5..520be05c3 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -30,6 +30,15 @@ Tensor = Any +def _get_strides(dims): + return np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + + +def _get_stride_arrays(dims): + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + return [np.arange(dims[n]) * strides[n] for n in range(len(dims))] + + def _find_values_in_fused(indices: np.ndarray, left: np.ndarray, right: np.ndarray) -> np.ndarray: """ @@ -81,10 +90,10 @@ def _check_flows(flows: List[int]) -> None: "flows = {} contains values different from 1 and -1".format(flows)) -def _find_best_partition( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[int]) -> Tuple[Union[BaseCharge, ChargeCollection], - Union[BaseCharge, ChargeCollection], int]: +def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[int] + ) -> Tuple[Union[BaseCharge, ChargeCollection], + Union[BaseCharge, ChargeCollection], int]: """ compute the best partition for fusing `charges`, i.e. the integer `p` such that fusing `len(fuse_charges(charges[0:p],flows[0:p]))` is @@ -147,8 +156,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - charges[0] * flows[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (charges[0] * + flows[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -319,38 +329,25 @@ def _find_diagonal_sparse_blocks( #column charge #column_degeneracies = dict(zip(unique_column_charges, column_dims)) column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - if len(row_charges) > 1: - left_row_charges, right_row_charges, _ = _find_best_partition( - row_charges, row_flows) - # unique_left = left_row_charges.unique() - # unique_right = right_row_charges.unique() - # unique_row_charges = (unique_left + unique_right).unique() - - #get the charges common to rows and columns (only those matter) - #common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - row_locations = find_sparse_positions( - left_charges=left_row_charges, - left_flow=1, - right_charges=right_row_charges, - right_flow=1, - target_charges=common_charges) - - elif len(row_charges) == 1: - fused_row_charges = fuse_charges(row_charges, row_flows) - - #get the unique row-charges - #unique_row_charges = fused_row_charges.unique() - #get the charges common to rows and columns (only those matter) - #get the charges common to rows and columns (only those matter) - #common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( - common_charges)] - row_locations = {} - for c in common_charges: - #c = common_charges.get_item(n) - row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] - else: - raise ValueError('Found an empty sequence for `row_charges`') + row_locations = find_sparse_positions( + charges=row_charges, flows=row_flows, target_charges=common_charges) + + # elif len(row_charges) == 1: + # fused_row_charges = fuse_charges(row_charges, row_flows) + + # #get the unique row-charges + # #unique_row_charges = fused_row_charges.unique() + # #get the charges common to rows and columns (only those matter) + # #get the charges common to rows and columns (only those matter) + # #common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + # relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( + # common_charges)] + # row_locations = {} + # for c in common_charges: + # #c = common_charges.get_item(n) + # row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] + # else: + # raise ValueError('Found an empty sequence for `row_charges`') degeneracy_vector = np.empty( np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) @@ -397,8 +394,9 @@ def _find_diagonal_dense_blocks( column_charges: List[Union[BaseCharge, ChargeCollection]], row_flows: List[Union[bool, int]], column_flows: List[Union[bool, int]], - strides: Optional[np.ndarray] = None, -) -> Tuple[Union[BaseCharge, ChargeCollection], List, np.ndarray, Dict, Dict]: + row_strides: Optional[np.ndarray] = None, + column_strides: Optional[np.ndarray] = None, +) -> Tuple[Union[BaseCharge, ChargeCollection], List[np.ndarray]]: """ Given the meta data and underlying data of a symmetric matrix, compute the dense positions of all diagonal blocks and return them in a dict. @@ -424,12 +422,17 @@ def _find_diagonal_dense_blocks( with values `1` or `-1`, denoting the flow direction of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. - strides: An optional np.ndarray denoting the strides of the tensors. - If None, natural strides ordering is assumed. + row_strides: An optional np.ndarray denoting the strides of `row_charges`. + If `None`, natural stride ordering is assumed. + column_strides: An optional np.ndarray denoting the strides of + `column_charges`. If `None`, natural stride ordering is assumed. Returns: List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. - List[np.ndarray]: A list containing the blocks. + List[List]: A list containing the blocks information. + For each element `e` in the list `e[0]` is an `np.ndarray` of ints + denoting the dense positions of the non-zero elements and `e[1]` + is a tuple corresponding to the blocks' matrix shape """ flows = row_flows.copy() @@ -439,12 +442,6 @@ def _find_diagonal_dense_blocks( raise ValueError( "`len(flows)` is different from `len(row_charges) + len(column_charges)`" ) - if strides is None: - strides = [len(c) for c in row_charges] + [len(c) for c in column_charges] - if len(strides) != len(row_charges) + len(column_charges): - raise ValueError("len(strides) = {} does not match tensor rank = {}".format( - len(strides), - len(row_charges) + len(column_charges))) #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly @@ -454,58 +451,63 @@ def _find_diagonal_dense_blocks( unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) #get the charges common to rows and columns (only those matter) common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + if ((row_strides is None) and + (column_strides is not None)) or ((row_strides is not None) and + (column_strides is None)): + raise ValueError("`row_strides` and `column_strides` " + "have to be passed simultaneously." + " Found `row_strides={}` and " + "`column_strides={}`".format(row_strides, column_strides)) + + if row_strides is not None: + row_stride_arrays = [ + np.arange(len(row_charges[n])) * row_strides[n] + for n in range(len(row_charges)) + ] + column_stride_arrays = [ + np.arange(len(column_charges[n])) * column_strides[n] + for n in range(len(column_charges)) + ] - if len(row_charges) > 1: - left_row_charges, right_row_charges, _ = _find_best_partition( - row_charges, row_flows) - + if row_strides is not None: row_locations = { - common_charges.get_item(n): find_dense_positions( - left_charges=[left_row_charges], - left_flow=[1], - right_charges=[right_row_charges], - right_flow=[1], - target_charge=common_charges[n]) + common_charges.get_item(n): _find_transposed_dense_positions( + charges=row_charges, + flows=row_flows, + target_charge=common_charges[n], + stride_arrays=row_stride_arrays) for n in range(len(common_charges)) } - - elif len(row_charges) == 1: - fused_row_charges = fuse_charges(row_charges, row_flows) + else: + column_dim = np.prod([len(c) for c in column_charges]) row_locations = { - c: np.nonzero(fused_row_charges == c)[0] for c in common_charges + common_charges.get_item(n): column_dim * find_dense_positions( + charges=row_charges, + flows=row_flows, + target_charge=common_charges[n]) + for n in range(len(common_charges)) } - else: - raise ValueError('Found an empty sequence for `row_charges`') - - if len(column_charges) > 1: - left_column_charges, right_column_charges, _ = _find_best_partition( - column_charges, column_flows) + if column_strides is not None: column_locations = { - common_charges.get_item(n): find_dense_positions( - left_charges=[left_column_charges], - left_flow=[1], - right_charges=[right_column_charges], - right_flow=[1], - target_charge=common_charges[n] * (-1)) + common_charges.get_item(n): _find_transposed_dense_positions( + charges=column_charges, + flows=column_flows, + target_charge=common_charges[n] * (-1), + stride_arrays=column_stride_arrays) for n in range(len(common_charges)) } - - elif len(column_charges) == 1: - fused_column_charges = fuse_charges(column_charges, column_flows) + else: column_locations = { - common_charges.get_item(n): - np.nonzero(fused_column_charges == common_charges[n] * (-1))[0] + common_charges.get_item(n): find_dense_positions( + charges=column_charges, + flows=column_flows, + target_charge=common_charges[n] * (-1)) for n in range(len(common_charges)) } - else: - raise ValueError('Found an empty sequence for `column_charges`') - - column_dim = np.prod(strides[len(row_charges):]) blocks = [] - for c in common_charges: #numpy broadcasting is substantially faster than kron! - rlocs = np.expand_dims(column_dim * row_locations[c], 1) + rlocs = np.expand_dims(row_locations[c], 1) clocs = np.expand_dims(column_locations[c], 0) inds = np.reshape(rlocs + clocs, rlocs.shape[0] * clocs.shape[1]) blocks.append([inds, (rlocs.shape[0], clocs.shape[1])]) @@ -515,7 +517,8 @@ def _find_diagonal_dense_blocks( def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], flows: List[Union[int, bool]], target_charge: Union[BaseCharge, ChargeCollection], - order: Optional[np.ndarray] = None) -> np.ndarray: + order: Optional[np.ndarray] = None, + return_sorted: Optional[bool] = True) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector of `fused_charges` resulting from fusing all elements of `charges` @@ -543,8 +546,12 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], charges: A list of BaseCharge or ChargeCollection. flows: The flow directions of the `charges`. target_charge: The target charge. + order: An optional order for the elements in `charges`. + Useful for finding dense positions in a permuted tensor + with respect to the unpermuted order. Returns: - np.ndarray: The indices of the elements fusing to `target_charge`. + np.ndarray: The index-positions within the dense data array + of the elements fusing to `target_charge`. """ if order is not None: if len(order) != len(charges): @@ -552,27 +559,151 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], len(order), len(charges))) if not np.all(np.sort(order) == np.arange(len(order))): - raise ValueError( - "order = {} is not a valid permutation of {}".format(order), - np.arange(len(order))) + raise ValueError("order = {} is not a valid permutation of {}".format( + order, np.arange(len(order)))) _check_flows(flows) + if len(charges) == 1: + fused_charges = charges[0] * flows[0] + return np.nonzero(fused_charges == target_charge)[0] + if order is not None: left_charges, right_charges, partition = _find_best_partition( [charges[n] for n in order], [flows[n] for n in order]) dims = [len(c) for c in charges] - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + strides = _get_strides( + dims) #np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) stride_arrays = [np.arange(dims[n]) * strides[n] for n in order] permuted_row_inds = fuse_ndarrays(stride_arrays[0:partition]) permuted_column_inds = fuse_ndarrays(stride_arrays[partition:]) - #return fuse_ndarrays([permuted_row_inds, permuted_column_inds]) else: left_charges, right_charges, partition = _find_best_partition( charges, flows) - unique_left, left_degeneracies = left_charges.unique(return_counts=True) - unique_right, right_degeneracies = right_charges.unique(return_counts=True) + unique_left = left_charges.unique() + unique_right = right_charges.unique() + + tmp_left_charges = (target_charge + (unique_right * (-1))) + relevant_left_charges = unique_left.intersect(tmp_left_charges) + right_locations = {} + len_right_charges = len(right_charges) + dense_inds = [] + left_inds = [] + index_table = [] + + for n in range(len(relevant_left_charges)): + c = relevant_left_charges[n] + left_ind = np.nonzero(left_charges == c)[0] + if return_sorted: + index_table.append( + np.stack([ + np.arange(len(left_ind)), + np.full(len(left_ind), n, dtype=np.int64) + ], + axis=1)) + left_inds.append(left_ind) + right_charge = (target_charge + (c * (-1))) + if order is None: + dim_array = np.expand_dims(len_right_charges * left_ind, 1) + right_inds = np.nonzero(right_charges == right_charge)[0] + mat = np.tile(right_inds, (len(dim_array), 1)) + + else: + dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) + right_inds = permuted_column_inds[np.nonzero( + right_charges == right_charge)[0]] + mat = np.tile(right_inds, (len(dim_array), 1)) + if return_sorted: + dense_inds.append(mat + dim_array) + else: + dense_inds.append(np.reshape(mat + dim_array, np.prod(mat.shape))) + if return_sorted: + if len(index_table) > 0: + it = np.concatenate(index_table) + ind_sort = np.argsort(np.concatenate(left_inds)) + table = it[ind_sort, :] + return np.concatenate([ + dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) + ]) + return np.array([]) + return np.concatenate(dense_inds) + + +def _find_transposed_dense_positions( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charge: Union[BaseCharge, ChargeCollection], + stride_arrays: Optional[List[np.ndarray]] = None) -> np.ndarray: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector of `fused_charges` resulting from fusing all elements of `charges` + that have a value of `target_charge`. + For example, given + ``` + charges = [[-2,0,1,0,0],[-1,0,2,1]] + target_charge = 0 + fused_charges = fuse_charges(charges,[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the index-positions of charges + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + within the dense array. As one additional wrinkle, `charges` + is a subset of the permuted charges of a tensor with rank R > len(charges), + and `stride_arrays` are their corresponding range of strides, i.e. + + ``` + R=5 + D = [2,3,4,5,6] + tensor_flows = np.random.randint(-1,2,R) + tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] + order = np.arange(R) + np.random.shuffle(order) + tensor_strides = [360, 120, 30, 6, 1] + tensor_stride_arrays = [np.arange(D[n]) * strides[n] for n in range(R)] + + charges = [tensor_charges[order[n]] for n in range(3)] + flows = [tensor_flows[order[n]] for n in range(len(3))] + stride_arrays = [tensor_stride_arrays[order[n]] for n in range(3)] + _ = _find_transposed_dense_positions(charges, flows, 0, stride_arrays) + + ``` + `_find_transposed_dense_blocks` returns an np.ndarray containing the + index-positions of these elements calculated using `stride_arrays`. + The result only makes sense in conjuction with the complementary + data computed from the complementary + elements in`tensor_charges`, + `tensor_strides` and `tensor_flows`. + This routine is mainly used in `_find_diagonal_dense_blocks`. + + Args: + charges: A list of BaseCharge or ChargeCollection. + flows: The flow directions of the `charges`. + target_charge: The target charge. + stride_arrays: The stride-arrays for the `charges` subset. + if `None`, natural stride ordering is assumed. + + Returns: + np.ndarray: The index-positions within the dense data array + of the elements fusing to `target_charge`. + """ + + _check_flows(flows) + if len(charges) == 1: + fused_charges = charges[0] * flows[0] + inds = np.nonzero(fused_charges == target_charge)[0] + if stride_arrays is not None: + permuted_inds = stride_arrays[0] + return permuted_inds[inds] + return inds + + left_charges, right_charges, partition = _find_best_partition(charges, flows) + if stride_arrays is not None: + permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) + permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) + + unique_left = left_charges.unique() + unique_right = right_charges.unique() tmp_left_charges = (target_charge + (unique_right * (-1))) relevant_left_charges = unique_left.intersect(tmp_left_charges) @@ -593,14 +724,14 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], axis=1)) left_inds.append(left_ind) right_charge = (target_charge + (c * (-1))) - if order is None: + if stride_arrays is None: dim_array = np.expand_dims(len_right_charges * left_ind, 1) right_inds = np.nonzero(right_charges == right_charge)[0] mat = np.tile(right_inds, (len(dim_array), 1)) else: - dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) - right_inds = permuted_column_inds[np.nonzero( + dim_array = np.expand_dims(permuted_left_inds[left_ind], 1) + right_inds = permuted_right_inds[np.nonzero( right_charges == right_charge)[0]] mat = np.tile(right_inds, (len(dim_array), 1)) dense_inds.append(mat + dim_array) @@ -694,10 +825,10 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], # return np.concatenate(indices) -def find_sparse_positions( - left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, - right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, - target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: +def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection] + ) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -734,11 +865,25 @@ def find_sparse_positions( """ #FIXME: this is probably still not optimal - _check_flows([left_flow, right_flow]) + _check_flows(flows) + if len(charges) == 1: + fused_charges = charges[0] * flows[0] + unique_charges = fused_charges.unique() + target_charges = target_charges.unique() + relevant_target_charges = unique_charges.intersect(target_charges) + relevant_fused_charges = fused_charges[fused_charges.isin( + relevant_target_charges)] + return { + c: np.nonzero(relevant_fused_charges == c)[0] + for c in relevant_target_charges + } + + left_charges, right_charges, partition = _find_best_partition(charges, flows) + target_charges = target_charges.unique() unique_left = left_charges.unique() unique_right = right_charges.unique() - fused = unique_left * left_flow + unique_right * right_flow + fused = unique_left + unique_right #compute all unique charges that can add up to #target_charges @@ -768,18 +913,17 @@ def find_sparse_positions( for n in range(len(unique_left_charges)): left_charge = unique_left_charges[n] - total_charge = left_charge * left_flow + unique_right_charges * right_flow + total_charge = left_charge + unique_right_charges total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) tmp_relevant_right_charges = relevant_right_charges[ - relevant_right_charges.isin( - (target_charges + left_charge * ((-1) * left_flow)) * right_flow)] + relevant_right_charges.isin((target_charges + left_charge * (-1)))] for n in range(len(target_charges)): target_charge = target_charges[n] - right_indices[( - left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + left_charge * ( - (-1) * left_flow)) * right_flow)[0] + right_indices[(left_charge.get_item(0), + target_charge.get_item(0))] = np.nonzero( + tmp_relevant_right_charges == (target_charge + + left_charge * (-1)))[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1035,11 +1179,11 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose( - self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + def transpose(self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -1084,7 +1228,9 @@ def transpose( flat_charges = [i.charges for i in flat_elementary_indices] flat_flows = [i.flow for i in flat_elementary_indices] flat_dims = [len(c) for c in flat_charges] - flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) + flat_strides = _get_strides( + flat_dims + ) #np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) flat_order = np.concatenate( [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) #find the best partition into left and right charges @@ -1116,8 +1262,8 @@ def transpose( t1 = time.time() print(len(linear_positions), len(dense_permutation)) permutation = np.searchsorted(linear_positions, dense_permutation) - print( - 'finding the permutation with argsort: {}s'.format(time.time() - t1)) + print('finding the permutation with argsort: {}s'.format(time.time() - + t1)) self.indices = [self.indices[n] for n in order] self.data = self.data[permutation] @@ -1406,11 +1552,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose( - tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": +def transpose(tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. @@ -1485,8 +1631,8 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == - (-1) * np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == (-1) * + np.array([i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 338eeab8a..9d889f8de 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -3,7 +3,7 @@ from tensornetwork.block_tensor.charge import U1Charge, ChargeCollection, fuse_charges from tensornetwork.block_tensor.index import Index -from tensornetwork.block_tensor.block_tensor import _find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions, find_dense_positions, BlockSparseTensor, fuse_ndarrays +from tensornetwork.block_tensor.block_tensor import _find_diagonal_dense_blocks, _find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions, find_dense_positions, BlockSparseTensor, fuse_ndarrays, _find_values_in_fused np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] @@ -51,22 +51,16 @@ def test_find_sparse_positions_consistency(): ] data1 = find_sparse_positions( - left_charges=charges1[0] + charges1[1], - left_flow=1, - right_charges=charges1[2] + charges1[3], - right_flow=1, + charges=charges1, + flows=[1, 1, 1, 1], target_charges=charges1[0].zero_charge) data2 = find_sparse_positions( - left_charges=charges2[0] + charges2[1], - left_flow=1, - right_charges=charges2[2] + charges2[3], - right_flow=1, + charges=charges2, + flows=[1, 1, 1, 1], target_charges=charges2[0].zero_charge) data3 = find_sparse_positions( - left_charges=charges3[0] + charges3[1], - left_flow=1, - right_charges=charges3[2] + charges3[3], - right_flow=1, + charges=charges3, + flows=[1, 1, 1, 1], target_charges=charges3[0].zero_charge) nz1 = np.asarray(list(data1.values())[0]) @@ -95,23 +89,11 @@ def test_find_dense_positions_consistency(): ] flows = [1, 1, 1, -1] data1 = find_dense_positions( - left_charges=charges1[0] * flows[0] + charges1[1] * flows[0], - left_flow=1, - right_charges=charges1[2] * flows[2] + charges1[3] * flows[3], - right_flow=1, - target_charge=charges1[0].zero_charge) + charges=charges1, flows=flows, target_charge=charges1[0].zero_charge) data2 = find_dense_positions( - left_charges=charges2[0] * flows[0] + charges2[1] * flows[1], - left_flow=1, - right_charges=charges2[2] * flows[2] + charges2[3] * flows[3], - right_flow=1, - target_charge=charges2[0].zero_charge) + charges=charges2, flows=flows, target_charge=charges2[0].zero_charge) data3 = find_dense_positions( - left_charges=charges3[0] * flows[0] + charges3[1] * flows[1], - left_flow=1, - right_charges=charges3[2] * flows[2] + charges3[3] * flows[3], - right_flow=1, - target_charge=charges3[0].zero_charge) + charges=charges3, flows=flows, target_charge=charges3[0].zero_charge) nz = compute_num_nonzero(charges1, flows) assert nz == len(data1) @@ -218,7 +200,7 @@ def test_find_dense_positions(): target_charge = 0 fused_charges = fuse_ndarrays([left_charges, right_charges]) dense_positions = find_dense_positions( - U1Charge(left_charges), 1, U1Charge(right_charges), 1, + [U1Charge(left_charges), U1Charge(right_charges)], [1, 1], U1Charge(np.asarray([target_charge]))) np.testing.assert_allclose(dense_positions, np.nonzero(fused_charges == target_charge)[0]) @@ -242,15 +224,10 @@ def test_find_dense_positions_2(): ] n1 = compute_num_nonzero([i.charges for i in indices], [i.flow for i in indices]) - row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], - [1 for _ in range(rank // 2)]) - column_charges = fuse_charges( - [indices[n].charges for n in range(rank // 2, rank)], - [1 for _ in range(rank // 2, rank)]) i01 = indices[0] * indices[1] i23 = indices[2] * indices[3] - positions = find_dense_positions(i01.charges, 1, i23.charges, 1, + positions = find_dense_positions([i01.charges, i23.charges], [1, 1], U1Charge(np.asarray([0]))) assert len(positions) == n1 @@ -273,25 +250,38 @@ def test_find_sparse_positions(): ] n1 = compute_num_nonzero([i.charges for i in indices], [i.flow for i in indices]) - row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], - [1 for _ in range(rank // 2)]) - column_charges = fuse_charges( - [indices[n].charges for n in range(rank // 2, rank)], - [1 for _ in range(rank // 2, rank)]) - i01 = indices[0] * indices[1] i23 = indices[2] * indices[3] unique_row_charges = np.unique(i01.charges.charges) unique_column_charges = np.unique(i23.charges.charges) common_charges = np.intersect1d( unique_row_charges, -unique_column_charges, assume_unique=True) - blocks = find_sparse_positions( - i01.charges, 1, i23.charges, 1, target_charges=U1Charge(np.asarray([0]))) + blocks = find_sparse_positions([i01.charges, i23.charges], [1, 1], + target_charges=U1Charge(np.asarray([0]))) assert sum([len(v) for v in blocks.values()]) == n1 np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) def test_find_sparse_positions_2(): + D = 1000 #bond dimension + B = 4 #number of blocks + dtype = np.int16 #the dtype of the quantum numbers + charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) + index = Index(charges=U1Charge(charges), flow=1, name='index0') + targets = np.asarray([-1, 0, 1]) + blocks = find_sparse_positions([index.charges], [index.flow], + target_charges=U1Charge(targets)) + + inds = np.isin(charges, targets) + relevant_charges = charges[inds] + blocks_ = {t: np.nonzero(relevant_charges == t)[0] for t in targets} + assert np.all( + np.asarray(list(blocks.keys())) == np.asarray(list(blocks_.keys()))) + for k in blocks.keys(): + assert np.all(blocks[k] == blocks_[k]) + + +def test_find_sparse_positions_3(): D = 40 #bond dimension B = 4 #number of blocks dtype = np.int16 #the dtype of the quantum numbers @@ -310,10 +300,8 @@ def test_find_sparse_positions_2(): i1, i2 = indices common_charges = np.intersect1d(i1.charges.charges, i2.charges.charges) row_locations = find_sparse_positions( - left_charges=i1.charges, - left_flow=flows[0], - right_charges=i2.charges, - right_flow=flows[1], + charges=[i1.charges, i2.charges], + flows=flows, target_charges=U1Charge(common_charges)) fused = (i1 * i2).charges relevant = fused.charges[np.isin(fused.charges, common_charges)] @@ -321,22 +309,82 @@ def test_find_sparse_positions_2(): np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) -def test_dense_transpose(): - Ds = [10, 11, 12] #bond dimension - rank = len(Ds) - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - A = BlockSparseTensor.random(indices=indices, dtype=np.float64) - B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) - A.transpose((1, 0, 2)) - np.testing.assert_allclose(A.data, B.flat) - - B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) - A.transpose((1, 0, 2)) - - np.testing.assert_allclose(A.data, B.flat) +# def test_dense_transpose(): +# Ds = [10, 11, 12] #bond dimension +# rank = len(Ds) +# flows = np.asarray([1 for _ in range(rank)]) +# flows[-2::] = -1 +# charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] +# indices = [ +# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# A = BlockSparseTensor.random(indices=indices, dtype=np.float64) +# B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) +# A.transpose((1, 0, 2)) +# np.testing.assert_allclose(A.data, B.flat) + +# B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) +# A.transpose((1, 0, 2)) + +# np.testing.assert_allclose(A.data, B.flat) + + +def test_find_diagonal_dense_blocks(): + R = 2 + rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] + cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] + charges = rs + cs + + left_fused = fuse_charges(charges[0:R], [1] * R) + right_fused = fuse_charges(charges[R:], [1] * R) + left_unique = left_fused.unique() + right_unique = right_fused.unique() + zero = left_unique.zero_charge + blocks = {} + rdim = len(right_fused) + for lu in left_unique: + linds = np.nonzero(left_fused == lu)[0] + rinds = np.nonzero(right_fused == lu * (-1))[0] + if (len(linds) > 0) and (len(rinds) > 0): + blocks[lu] = fuse_ndarrays([linds * rdim, rinds]) + comm, blocks_ = _find_diagonal_dense_blocks(rs, cs, [1] * R, [1] * R) + for n in range(len(comm)): + assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) + + +def test_find_diagonal_dense_blocks_transposed(): + R = 2 + order = np.arange(2 * R) + np.random.shuffle(order) + R = 2 + rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] + cs = [U1Charge(np.random.randint(-4, 4, 40)) for _ in range(R)] + charges = rs + cs + dims = np.asarray([len(c) for c in charges]) + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(2 * R)] + + left_fused = fuse_charges([charges[n] for n in order[0:R]], [1] * R) + right_fused = fuse_charges([charges[n] for n in order[R:]], [1] * R) + lstrides = fuse_ndarrays([stride_arrays[n] for n in order[0:R]]) + rstrides = fuse_ndarrays([stride_arrays[n] for n in order[R:]]) + + left_unique = left_fused.unique() + right_unique = right_fused.unique() + blocks = {} + rdim = len(right_fused) + for lu in left_unique: + linds = np.nonzero(left_fused == lu)[0] + rinds = np.nonzero(right_fused == lu * (-1))[0] + if (len(linds) > 0) and (len(rinds) > 0): + tmp = fuse_ndarrays([linds * rdim, rinds]) + blocks[lu] = _find_values_in_fused(tmp, lstrides, rstrides) + + comm, blocks_ = _find_diagonal_dense_blocks([charges[n] for n in order[0:R]], + [charges[n] for n in order[R:]], + [1] * R, [1] * R, + row_strides=strides[order[0:R]], + column_strides=strides[order[R:]]) + for n in range(len(comm)): + assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) From ff1b94a0e7b1742284a5746a1bb27cd813b34ab1 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 14 Jan 2020 09:21:21 -0500 Subject: [PATCH 156/212] broken commit --- tensornetwork/block_tensor/block_tensor.py | 195 ++++++++++++++++++--- 1 file changed, 166 insertions(+), 29 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 520be05c3..ac5949228 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -91,7 +91,8 @@ def _check_flows(flows: List[int]) -> None: def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[int] + flows: List[int], + return_charges: Optional[bool] = True ) -> Tuple[Union[BaseCharge, ChargeCollection], Union[BaseCharge, ChargeCollection], int]: """ @@ -123,12 +124,14 @@ def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], min_ind = min_inds[np.argmax(right_dims)] else: min_ind = min_inds[0] - fused_left_charges = fuse_charges(charges[0:min_ind + 1], - flows[0:min_ind + 1]) - fused_right_charges = fuse_charges(charges[min_ind + 1::], - flows[min_ind + 1::]) + if return_charges: + fused_left_charges = fuse_charges(charges[0:min_ind + 1], + flows[0:min_ind + 1]) + fused_right_charges = fuse_charges(charges[min_ind + 1::], + flows[min_ind + 1::]) - return fused_left_charges, fused_right_charges, min_ind + 1 + return fused_left_charges, fused_right_charges, min_ind + 1 + return min_ind + 1 def compute_fused_charge_degeneracies( @@ -156,9 +159,8 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (charges[0] * - flows[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -825,10 +827,10 @@ def _find_transposed_dense_positions( # return np.concatenate(indices) -def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection] - ) -> Dict: +def find_sparse_positions( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -922,8 +924,8 @@ def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], target_charge = target_charges[n] right_indices[(left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + - left_charge * (-1)))[0] + tmp_relevant_right_charges == ( + target_charge + left_charge * (-1)))[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1179,11 +1181,146 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose(self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": + def transpose( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order`. This routine currently shuffles + data. + Args: + order: The new order of indices. + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` + can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. + Returns: + BlockSparseTensor: The transposed tensor. + """ + strides = _get_strides(self.dense_shape) + dims = self.dense_shape + charges = self.charges + flows = self.flows + partition = _find_best_partition(charges, flows, return_charges=False) + tr_partition = _find_best_partition([charges[n] for n in order], + [flows[n] for n in order], + return_charges=False) + + unique_row_charges = compute_unique_fused_charges(charges[0:partition], + flows[0:partition]) + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + charges[partition:], flows[partition:]) + + common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + sp_row_blocks = find_sparse_positions(charges[0:partition], + flows[0:partition], common_charges) + sp_column_blocks = find_sparse_positions(charges[partition:], + flows[partition:], + common_charges * (-1)) + + for k, v in sp_column_blocks.items(): + print(k, len(v), column_degeneracies[-k]) + degeneracy_vector = np.empty( + np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) + for c in common_charges: + degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + + dense_row_blocks = { + common_charges.get_item(n): find_dense_positions( + charges[0:partition], + flows[0:partition], + common_charges[n], + return_sorted=False) for n in range(len(common_charges)) + } + dense_column_blocks = { + common_charges.get_item(n): find_dense_positions( + charges[partition:], + flows[partition:], + common_charges[n] * (-1), + return_sorted=False) for n in range(len(common_charges)) + } + + dense_row_positions = np.sort( + np.concatenate(list(dense_row_blocks.values()))) + + sp_column_positions = np.sort( + np.concatenate(list(sp_column_blocks.values()))) + dense_column_positions = np.sort( + np.concatenate(list(dense_column_blocks.values()))) + + row_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) + column_lookup = np.empty(dense_column_positions[-1] + 1, dtype=np.int64) + row_lookup[dense_row_positions] = start_positions + column_lookup[dense_column_positions] = sp_column_positions + print('dense col pos', dense_column_positions) + print('sp_col_pos', sp_column_positions) + data = np.empty(len(self.data), dtype=self.data.dtype) + # _, dense_blocks = _find_diagonal_dense_blocks( + # [charges[n] for n in order[0:tr_partition]], + # [charges[n] for n in order[tr_partition:]], + # flows[0:tr_partition], + # flows[tr_partition:], + # row_strides=strides[order[0:tr_partition]], + # column_strides=strides[order[tr_partition:]]) + + stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] + tr_linear_positions = find_dense_positions([charges[n] for n in order], + [flows[n] for n in order], + charges[0].zero_charge) + tr_stride_arrays = [stride_arrays[n] for n in order] + + dense_permutation = _find_values_in_fused( + tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), + fuse_ndarrays(tr_stride_arrays[partition:])) + + column_dim = np.prod( + [len(charges[n]) for n in range(partition, len(charges))]) + # for b in dense_blocks: + # rinds, cinds = np.divmod(b[0], column_dim) + # transposed_positions = row_lookup[rinds] + column_lookup[cinds] + # self.data[transposed_positions] + common_charges, blocks, start_positions_2, row_locations, column_degeneracies_2 = _find_diagonal_sparse_blocks( + data=[], + row_charges=charges[0:partition], + column_charges=charges[partition:], + row_flows=flows[:partition], + column_flows=flows[partition:], + return_data=False) + sp_row_positions_2 = np.sort(np.concatenate(list(row_locations.values()))) + sp_row_positions = np.sort(np.concatenate(list(sp_row_blocks.values()))) + # print(np.all(sp_row_positions == sp_row_positions_2)) + print('asdf', np.all(start_positions == start_positions_2)) + print(start_positions) + # print(column_dim) + # print(partition, tr_partition) + # print(dense_permutation) + # print(tr_linear_positions) + rinds, cinds = np.divmod(dense_permutation, column_dim) + #print(np.max(rinds), np.max(cinds), len(row_lookup), len(column_lookup)) + + print('row lookup', row_lookup) + print('rinds', rinds) + print('col lookup', column_lookup) + print('cinds', cinds) + print('dense col pos', dense_column_positions) + u1 = np.unique(cinds) + u2 = np.unique(dense_column_positions) + print(np.all(u1 == u2)) + print('row_lookup[rinds]', row_lookup[rinds]) + print('col_lookup[cinds]', column_lookup[cinds]) + + transposed_positions = row_lookup[rinds] + column_lookup[cinds] + self.data[transposed_positions] + + def transpose_2( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -1262,8 +1399,8 @@ def transpose(self, t1 = time.time() print(len(linear_positions), len(dense_permutation)) permutation = np.searchsorted(linear_positions, dense_permutation) - print('finding the permutation with argsort: {}s'.format(time.time() - - t1)) + print( + 'finding the permutation with argsort: {}s'.format(time.time() - t1)) self.indices = [self.indices[n] for n in order] self.data = self.data[permutation] @@ -1552,11 +1689,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose(tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": +def transpose( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. @@ -1631,8 +1768,8 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == (-1) * - np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == + (-1) * np.array([i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), From dc2739ca03400e4423ec36320d7abe9ee0d0292b Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 14 Jan 2020 12:51:03 -0500 Subject: [PATCH 157/212] broken commit --- tensornetwork/block_tensor/block_tensor.py | 470 +++++++++++++++------ 1 file changed, 341 insertions(+), 129 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index ac5949228..11de324fb 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -159,8 +159,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - charges[0] * flows[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (charges[0] * + flows[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -453,6 +454,7 @@ def _find_diagonal_dense_blocks( unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) #get the charges common to rows and columns (only those matter) common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + if ((row_strides is None) and (column_strides is not None)) or ((row_strides is not None) and (column_strides is None)): @@ -461,24 +463,13 @@ def _find_diagonal_dense_blocks( " Found `row_strides={}` and " "`column_strides={}`".format(row_strides, column_strides)) - if row_strides is not None: - row_stride_arrays = [ - np.arange(len(row_charges[n])) * row_strides[n] - for n in range(len(row_charges)) - ] - column_stride_arrays = [ - np.arange(len(column_charges[n])) * column_strides[n] - for n in range(len(column_charges)) - ] - if row_strides is not None: row_locations = { common_charges.get_item(n): _find_transposed_dense_positions( charges=row_charges, flows=row_flows, target_charge=common_charges[n], - stride_arrays=row_stride_arrays) - for n in range(len(common_charges)) + strides=row_strides) for n in range(len(common_charges)) } else: column_dim = np.prod([len(c) for c in column_charges]) @@ -495,8 +486,7 @@ def _find_diagonal_dense_blocks( charges=column_charges, flows=column_flows, target_charge=common_charges[n] * (-1), - stride_arrays=column_stride_arrays) - for n in range(len(common_charges)) + strides=column_strides) for n in range(len(common_charges)) } else: column_locations = { @@ -636,7 +626,7 @@ def _find_transposed_dense_positions( charges: List[Union[BaseCharge, ChargeCollection]], flows: List[Union[int, bool]], target_charge: Union[BaseCharge, ChargeCollection], - stride_arrays: Optional[List[np.ndarray]] = None) -> np.ndarray: + strides: Optional[np.ndarray] = None) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector of `fused_charges` resulting from fusing all elements of `charges` @@ -662,12 +652,11 @@ def _find_transposed_dense_positions( order = np.arange(R) np.random.shuffle(order) tensor_strides = [360, 120, 30, 6, 1] - tensor_stride_arrays = [np.arange(D[n]) * strides[n] for n in range(R)] - + charges = [tensor_charges[order[n]] for n in range(3)] flows = [tensor_flows[order[n]] for n in range(len(3))] - stride_arrays = [tensor_stride_arrays[order[n]] for n in range(3)] - _ = _find_transposed_dense_positions(charges, flows, 0, stride_arrays) + strides = [tensor_stride[order[n]] for n in range(3)] + _ = _find_transposed_dense_positions(charges, flows, 0, strides) ``` `_find_transposed_dense_blocks` returns an np.ndarray containing the @@ -682,7 +671,7 @@ def _find_transposed_dense_positions( charges: A list of BaseCharge or ChargeCollection. flows: The flow directions of the `charges`. target_charge: The target charge. - stride_arrays: The stride-arrays for the `charges` subset. + strides: The strides for the `charges` subset. if `None`, natural stride ordering is assumed. Returns: @@ -694,13 +683,16 @@ def _find_transposed_dense_positions( if len(charges) == 1: fused_charges = charges[0] * flows[0] inds = np.nonzero(fused_charges == target_charge)[0] - if stride_arrays is not None: - permuted_inds = stride_arrays[0] + if strides is not None: + permuted_inds = strides[0] * np.arange(len(charges[0])) return permuted_inds[inds] return inds left_charges, right_charges, partition = _find_best_partition(charges, flows) - if stride_arrays is not None: + if strides is not None: + stride_arrays = [ + np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) + ] permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) @@ -827,10 +819,10 @@ def _find_transposed_dense_positions( # return np.concatenate(indices) -def find_sparse_positions( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: +def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection] + ) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -924,8 +916,8 @@ def find_sparse_positions( target_charge = target_charges[n] right_indices[(left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == ( - target_charge + left_charge * (-1)))[0] + tmp_relevant_right_charges == (target_charge + + left_charge * (-1)))[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1181,11 +1173,11 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose( - self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + def transpose(self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -1207,21 +1199,65 @@ def transpose( [flows[n] for n in order], return_charges=False) - unique_row_charges = compute_unique_fused_charges(charges[0:partition], - flows[0:partition]) - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - charges[partition:], flows[partition:]) - - common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + # unique_row_charges = compute_unique_fused_charges(charges[0:partition], + # flows[0:partition]) + # unique_column_charges, column_dims = compute_fused_charge_degeneracies( + # charges[partition:], flows[partition:]) + + # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + + tr_unique_row_charges = compute_unique_fused_charges( + [charges[n] for n in order[0:tr_partition]], + [flows[n] for n in order[0:tr_partition]]) + + tr_unique_column_charges = compute_unique_fused_charges( + [charges[n] for n in order[tr_partition:]], + [flows[n] for n in order[tr_partition:]]) + + tr_common_charges = tr_unique_row_charges.intersect( + tr_unique_column_charges * (-1)) + left_dense = { + tr_common_charges.get_item(m): _find_transposed_dense_positions( + charges=[charges[n] for n in order[0:tr_partition]], + flows=[flows[n] for n in order[0:tr_partition]], + target_charge=tr_common_charges[m], + strides=strides[order[0:tr_partition]]) + for m in range(len(tr_common_charges)) + } + right_dense = { + tr_common_charges.get_item(m): _find_transposed_dense_positions( + charges=[charges[n] for n in order[tr_partition:]], + flows=[flows[n] for n in order[tr_partition:]], + target_charge=tr_common_charges[m] * (-1), + strides=strides[order[tr_partition:]]) + for m in range(len(tr_common_charges)) + } + # cc, dense_blocks = _find_diagonal_dense_blocks( + # [charges[n] for n in order[0:tr_partition]], + # [charges[n] for n in order[tr_partition:]], + # [flows[n] for n in order[0:tr_partition]], + # [flows[n] for n in order[tr_partition:]], + # row_strides=strides[order[0:tr_partition]], + # column_strides=strides[order[tr_partition:]]) + row_dim = np.prod([len(charges[n]) for n in range(partition)]) + for n in range(len(tr_common_charges)): + c = tr_common_charges.get_item(n) + #d = dense_blocks[n] + tmp = fuse_ndarrays([left_dense[c], right_dense[c]]) + tmp2 = fuse_ndarrays( + [np.mod(left_dense[c], row_dim), + np.mod(right_dense[c], row_dim)]) + tmp3 = (tmp - tmp2) / row_dim + #print(np.all(tmp == d[0])) + + return sp_row_blocks = find_sparse_positions(charges[0:partition], flows[0:partition], common_charges) sp_column_blocks = find_sparse_positions(charges[partition:], flows[partition:], common_charges * (-1)) - for k, v in sp_column_blocks.items(): - print(k, len(v), column_degeneracies[-k]) degeneracy_vector = np.empty( np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) for c in common_charges: @@ -1243,84 +1279,258 @@ def transpose( common_charges[n] * (-1), return_sorted=False) for n in range(len(common_charges)) } + dtype = charges[0].dtype + drbs = list(dense_row_blocks.values()) + block_dict = dict(zip(np.arange(len(drbs)), list(dense_row_blocks.keys()))) + + dense_row_positions = np.concatenate(drbs) + dense_block_numbers = np.concatenate([ + np.full(len(drbs[n]), fill_value=n, dtype=np.int16) + for n in range(len(drbs)) + ]) - dense_row_positions = np.sort( - np.concatenate(list(dense_row_blocks.values()))) + ind_sort = np.argsort(dense_row_positions) + dense_row_positions = dense_row_positions[ind_sort] + dense_block_number = dense_block_numbers[ind_sort] - sp_column_positions = np.sort( - np.concatenate(list(sp_column_blocks.values()))) - dense_column_positions = np.sort( - np.concatenate(list(dense_column_blocks.values()))) + #sp_column_positions = np.sort( + # np.concatenate(list(sp_column_blocks.values()))) + #dense_column_positions = np.sort( + # np.concatenate(list(dense_column_blocks.values()))) + #print('dense_row_positions:', dense_row_positions) + #print('dense_column_positions:', dense_column_positions) + #print('start_positions:', start_positions) row_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) - column_lookup = np.empty(dense_column_positions[-1] + 1, dtype=np.int64) + block_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) row_lookup[dense_row_positions] = start_positions - column_lookup[dense_column_positions] = sp_column_positions - print('dense col pos', dense_column_positions) - print('sp_col_pos', sp_column_positions) + block_lookup[dense_row_positions] = dense_block_number + #column_lookup = np.empty(dense_column_positions[-1] + 1, dtype=np.int64) + #column_lookup[dense_column_positions] = sp_column_positions + # print('row_lookup', row_lookup) + # print('col_lookup', column_lookup) + # return + # print('dense col pos', dense_column_positions) + # print('sp_col_pos', sp_column_positions) data = np.empty(len(self.data), dtype=self.data.dtype) - # _, dense_blocks = _find_diagonal_dense_blocks( - # [charges[n] for n in order[0:tr_partition]], - # [charges[n] for n in order[tr_partition:]], - # flows[0:tr_partition], - # flows[tr_partition:], - # row_strides=strides[order[0:tr_partition]], - # column_strides=strides[order[tr_partition:]]) - - stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] - tr_linear_positions = find_dense_positions([charges[n] for n in order], - [flows[n] for n in order], - charges[0].zero_charge) - tr_stride_arrays = [stride_arrays[n] for n in order] - - dense_permutation = _find_values_in_fused( - tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), - fuse_ndarrays(tr_stride_arrays[partition:])) - + _, dense_blocks = _find_diagonal_dense_blocks( + [charges[n] for n in order[0:tr_partition]], + [charges[n] for n in order[tr_partition:]], + flows[0:tr_partition], + flows[tr_partition:], + row_strides=strides[order[0:tr_partition]], + column_strides=strides[order[tr_partition:]]) + + # stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] + # tr_linear_positions = find_dense_positions([charges[n] for n in order], + # [flows[n] for n in order], + # charges[0].zero_charge) + # tr_stride_arrays = [stride_arrays[n] for n in order] + + # dense_permutation = _find_values_in_fused( + # tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), + # fuse_ndarrays(tr_stride_arrays[partition:])) + + # dense_permutation_2 = np.sort(np.concatenate([b[0] for b in dense_blocks])) + # print('dense_perm == dense_perm2', + # np.all(dense_permutation == dense_permutation_2)) column_dim = np.prod( [len(charges[n]) for n in range(partition, len(charges))]) - # for b in dense_blocks: - # rinds, cinds = np.divmod(b[0], column_dim) - # transposed_positions = row_lookup[rinds] + column_lookup[cinds] - # self.data[transposed_positions] - common_charges, blocks, start_positions_2, row_locations, column_degeneracies_2 = _find_diagonal_sparse_blocks( - data=[], - row_charges=charges[0:partition], - column_charges=charges[partition:], - row_flows=flows[:partition], - column_flows=flows[partition:], - return_data=False) - sp_row_positions_2 = np.sort(np.concatenate(list(row_locations.values()))) - sp_row_positions = np.sort(np.concatenate(list(sp_row_blocks.values()))) - # print(np.all(sp_row_positions == sp_row_positions_2)) - print('asdf', np.all(start_positions == start_positions_2)) - print(start_positions) - # print(column_dim) - # print(partition, tr_partition) - # print(dense_permutation) - # print(tr_linear_positions) - rinds, cinds = np.divmod(dense_permutation, column_dim) - #print(np.max(rinds), np.max(cinds), len(row_lookup), len(column_lookup)) - - print('row lookup', row_lookup) - print('rinds', rinds) - print('col lookup', column_lookup) - print('cinds', cinds) - print('dense col pos', dense_column_positions) - u1 = np.unique(cinds) - u2 = np.unique(dense_column_positions) - print(np.all(u1 == u2)) - print('row_lookup[rinds]', row_lookup[rinds]) - print('col_lookup[cinds]', column_lookup[cinds]) - - transposed_positions = row_lookup[rinds] + column_lookup[cinds] - self.data[transposed_positions] - - def transpose_2( - self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + for b in dense_blocks: + # print(b[1]) + # t1 = time.time() + rinds, cinds = np.divmod(b[0], column_dim) + # print('divmod', time.time() - t1) + # t1 = time.time() + start_pos = row_lookup[rinds] + # print('startpos', time.time() - t1) + # t1 = time.time() + block_vals = block_lookup[rinds] + # print('blockvals', time.time() - t1) + # t1 = time.time() + unique, cnts = np.unique(block_vals, return_counts=True) + + # def transpose(self, + # order: Union[List[int], np.ndarray], + # permutation: Optional[np.ndarray] = None, + # return_permutation: Optional[bool] = False + # ) -> "BlockSparseTensor": + # """ + # Transpose the tensor into the new order `order`. This routine currently shuffles + # data. + # Args: + # order: The new order of indices. + # permutation: An np.ndarray of int for reshuffling the data, + # typically the output of a prior call to `transpose`. Passing `permutation` + # can greatly speed up the transposition. + # return_permutation: If `True`, return the the permutation data. + # Returns: + # BlockSparseTensor: The transposed tensor. + # """ + # strides = _get_strides(self.dense_shape) + # dims = self.dense_shape + # charges = self.charges + # flows = self.flows + # partition = _find_best_partition(charges, flows, return_charges=False) + # tr_partition = _find_best_partition([charges[n] for n in order], + # [flows[n] for n in order], + # return_charges=False) + + # unique_row_charges = compute_unique_fused_charges(charges[0:partition], + # flows[0:partition]) + # unique_column_charges, column_dims = compute_fused_charge_degeneracies( + # charges[partition:], flows[partition:]) + + # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + # # print('row_charges', charges[0].charges) + # # print('col_charges', charges[1].charges) + # # print('common_charges', common_charges.charges) + # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + # sp_row_blocks = find_sparse_positions(charges[0:partition], + # flows[0:partition], common_charges) + # sp_column_blocks = find_sparse_positions(charges[partition:], + # flows[partition:], + # common_charges * (-1)) + + # degeneracy_vector = np.empty( + # np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) + # for c in common_charges: + # degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] + # stop_positions = np.cumsum(degeneracy_vector) + # start_positions = stop_positions - degeneracy_vector + + # dense_row_blocks = { + # common_charges.get_item(n): find_dense_positions( + # charges[0:partition], + # flows[0:partition], + # common_charges[n], + # return_sorted=False) for n in range(len(common_charges)) + # } + # dense_column_blocks = { + # common_charges.get_item(n): find_dense_positions( + # charges[partition:], + # flows[partition:], + # common_charges[n] * (-1), + # return_sorted=False) for n in range(len(common_charges)) + # } + # dtype = charges[0].dtype + # drbs = list(dense_row_blocks.values()) + # block_dict = dict(zip(np.arange(len(drbs)), list(dense_row_blocks.keys()))) + + # dense_row_positions = np.concatenate(drbs) + # dense_block_numbers = np.concatenate([ + # np.full(len(drbs[n]), fill_value=n, dtype=np.int16) + # for n in range(len(drbs)) + # ]) + + # ind_sort = np.argsort(dense_row_positions) + # dense_row_positions = dense_row_positions[ind_sort] + # dense_block_number = dense_block_numbers[ind_sort] + + # #sp_column_positions = np.sort( + # # np.concatenate(list(sp_column_blocks.values()))) + # #dense_column_positions = np.sort( + # # np.concatenate(list(dense_column_blocks.values()))) + # #print('dense_row_positions:', dense_row_positions) + # #print('dense_column_positions:', dense_column_positions) + # #print('start_positions:', start_positions) + + # row_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) + # block_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) + # row_lookup[dense_row_positions] = start_positions + # block_lookup[dense_row_positions] = dense_block_number + # #column_lookup = np.empty(dense_column_positions[-1] + 1, dtype=np.int64) + # #column_lookup[dense_column_positions] = sp_column_positions + # # print('row_lookup', row_lookup) + # # print('col_lookup', column_lookup) + # # return + # # print('dense col pos', dense_column_positions) + # # print('sp_col_pos', sp_column_positions) + # data = np.empty(len(self.data), dtype=self.data.dtype) + # _, dense_blocks = _find_diagonal_dense_blocks( + # [charges[n] for n in order[0:tr_partition]], + # [charges[n] for n in order[tr_partition:]], + # flows[0:tr_partition], + # flows[tr_partition:], + # row_strides=strides[order[0:tr_partition]], + # column_strides=strides[order[tr_partition:]]) + + # # stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] + # # tr_linear_positions = find_dense_positions([charges[n] for n in order], + # # [flows[n] for n in order], + # # charges[0].zero_charge) + # # tr_stride_arrays = [stride_arrays[n] for n in order] + + # # dense_permutation = _find_values_in_fused( + # # tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), + # # fuse_ndarrays(tr_stride_arrays[partition:])) + + # # dense_permutation_2 = np.sort(np.concatenate([b[0] for b in dense_blocks])) + # # print('dense_perm == dense_perm2', + # # np.all(dense_permutation == dense_permutation_2)) + # column_dim = np.prod( + # [len(charges[n]) for n in range(partition, len(charges))]) + # for b in dense_blocks: + # # print(b[1]) + # # t1 = time.time() + # rinds, cinds = np.divmod(b[0], column_dim) + # # print('divmod', time.time() - t1) + # # t1 = time.time() + # start_pos = row_lookup[rinds] + # # print('startpos', time.time() - t1) + # # t1 = time.time() + # block_vals = block_lookup[rinds] + # # print('blockvals', time.time() - t1) + # # t1 = time.time() + # unique, cnts = np.unique(block_vals, return_counts=True) + # # print('unique', time.time() - t1) + # # for n in range(len(unique)): + + # # degen = column_degeneracies[block_dict[unique[n]]] + # # print(degen) + # # np.expand_dims(row_lookup[rinds[block_vals == unique[n]]], + # # 1) + np.arange(degen) + + # # transposed_positions = +column_lookup[cinds] + # # self.data[transposed_positions] + # # common_charges, blocks, start_positions_2, row_locations, column_degeneracies_2 = _find_diagonal_sparse_blocks( + # # data=[], + # # row_charges=charges[0:partition], + # # column_charges=charges[partition:], + # # row_flows=flows[:partition], + # # column_flows=flows[partition:], + # # return_data=False) + # # sp_row_positions_2 = np.sort(np.concatenate(list(row_locations.values()))) + # # sp_row_positions = np.sort(np.concatenate(list(sp_row_blocks.values()))) + # # print(np.all(sp_row_positions == sp_row_positions_2)) + # # print('asdf', np.all(start_positions == start_positions_2)) + # # print(start_positions) + # # print(column_dim) + # # print(partition, tr_partition) + # # print(dense_permutation) + # # print(tr_linear_positions) + # #rinds, cinds = np.divmod(dense_permutation, column_dim) + # #print(np.max(rinds), np.max(cinds), len(row_lookup), len(column_lookup)) + + # # print('row lookup', row_lookup) + # # print('rinds', rinds) + # # print('col lookup', column_lookup) + # # print('cinds', cinds) + # # print('dense col pos', dense_column_positions) + # # u1 = np.unique(cinds) + # # u2 = np.unique(dense_column_positions) + # # print(np.all(u1 == u2)) + # # print('row_lookup[rinds]', row_lookup[rinds]) + # # print('col_lookup[cinds]', column_lookup[cinds]) + + # # transposed_positions = row_lookup[rinds] + column_lookup[cinds] + # # self.data[transposed_positions] + + def transpose_2(self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -1373,6 +1583,8 @@ def transpose_2( #find the best partition into left and right charges left_charges, right_charges, _ = _find_best_partition( flat_charges, flat_flows) + linear_positions = find_dense_positions([left_charges, right_charges], + [1, 1], left_charges.zero_charge) flat_tr_charges = [flat_charges[n] for n in flat_order] flat_tr_flows = [flat_flows[n] for n in flat_order] @@ -1382,9 +1594,9 @@ def transpose_2( tr_left_charges, tr_right_charges, partition = _find_best_partition( flat_tr_charges, flat_tr_flows) t1 = time.time() - tr_linear_positions = find_dense_positions([tr_left_charges], [1], - [tr_right_charges], [1], - tr_left_charges.zero_charge) + tr_linear_positions = find_dense_positions( + [tr_left_charges, tr_right_charges], [1, 1], + tr_left_charges.zero_charge) print('finding dense positions in the transposed tensor: {}s'.format( time.time() - t1)) @@ -1396,11 +1608,11 @@ def transpose_2( dense_permutation = _find_values_in_fused( tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), fuse_ndarrays(stride_arrays[partition::])) - t1 = time.time() - print(len(linear_positions), len(dense_permutation)) + #t1 = time.time() + #print(len(linear_positions), len(dense_permutation)) permutation = np.searchsorted(linear_positions, dense_permutation) - print( - 'finding the permutation with argsort: {}s'.format(time.time() - t1)) + #print('finding the permutation with argsort: {}s'.format(time.time() - + #t1)) self.indices = [self.indices[n] for n in order] self.data = self.data[permutation] @@ -1689,11 +1901,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose( - tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": +def transpose(tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. @@ -1768,8 +1980,8 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == - (-1) * np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == (-1) * + np.array([i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), From 8a4f8eb4b0a4a3e57022f63877ae903ef42cfe4c Mon Sep 17 00:00:00 2001 From: Ashley Milsted Date: Tue, 14 Jan 2020 10:55:29 -0800 Subject: [PATCH 158/212] Fixes for contract_between(). (#421) * Fixes for contract_between(). * output_edge_ordering was not respected in trace or outer_product cases. * axis names are now applied *after* edge reordering * added some docstring to clarify ordering * avoid a warning when contracting all edges of one or more of the input tensors. * Split out ordering tests. Also improves the basic contract_between() test so that it outputs a non-symmetric matrix (now rectangular). --- tensornetwork/network_components.py | 109 +++++++++++++++------------- tensornetwork/tests/network_test.py | 72 ++++++++++++++++-- 2 files changed, 123 insertions(+), 58 deletions(-) diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index f8d713f58..38f51a995 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -1753,6 +1753,10 @@ def contract_between( ) -> BaseNode: """Contract all of the edges between the two given nodes. + If `output_edge_order` is not set, the output axes will be ordered as: + [...free axes of `node1`..., ...free axes of `node2`...]. Within the axes + of each node, the input order is preserved. + Args: node1: The first node. node2: The second node. @@ -1764,7 +1768,8 @@ def contract_between( contain all edges belonging to, but not shared by `node1` and `node2`. The axes of the new node will be permuted (if necessary) to match this ordering of Edges. - axis_names: An optional list of names for the axis of the new node + axis_names: An optional list of names for the axis of the new node in order + of the output axes. Returns: The new node created. @@ -1784,64 +1789,68 @@ def contract_between( node2.backend.name)) backend = node1.backend + shared_edges = get_shared_edges(node1, node2) # Trace edges cannot be contracted using tensordot. if node1 is node2: flat_edge = flatten_edges_between(node1, node2) if not flat_edge: raise ValueError("No trace edges found on contraction of edges between " "node '{}' and itself.".format(node1)) - return contract(flat_edge, name) - - shared_edges = get_shared_edges(node1, node2) - if not shared_edges: - if allow_outer_product: - return outer_product(node1, node2, name=name, axis_names=axis_names) - raise ValueError("No edges found between nodes '{}' and '{}' " - "and allow_outer_product=False.".format(node1, node2)) - - # Collect the axis of each node corresponding to each edge, in order. - # This specifies the contraction for tensordot. - # NOTE: The ordering of node references in each contraction edge is ignored. - axes1 = [] - axes2 = [] - for edge in shared_edges: - if edge.node1 is node1: - axes1.append(edge.axis1) - axes2.append(edge.axis2) - else: - axes1.append(edge.axis2) - axes2.append(edge.axis1) - - if output_edge_order: - # Determine heuristically if output transposition can be minimized by - # flipping the arguments to tensordot. - node1_output_axes = [] - node2_output_axes = [] - for (i, edge) in enumerate(output_edge_order): - if edge in shared_edges: - raise ValueError( - "Edge '{}' in output_edge_order is shared by the nodes to be " - "contracted: '{}' and '{}'.".format(edge, node1, node2)) - edge_nodes = set(edge.get_nodes()) - if node1 in edge_nodes: - node1_output_axes.append(i) - elif node2 in edge_nodes: - node2_output_axes.append(i) + new_node = contract(flat_edge, name) + elif not shared_edges: + if not allow_outer_product: + raise ValueError("No edges found between nodes '{}' and '{}' " + "and allow_outer_product=False.".format(node1, node2)) + new_node = outer_product(node1, node2, name=name) + else: + # Collect the axis of each node corresponding to each edge, in order. + # This specifies the contraction for tensordot. + # NOTE: The ordering of node references in each contraction edge is ignored. + axes1 = [] + axes2 = [] + for edge in shared_edges: + if edge.node1 is node1: + axes1.append(edge.axis1) + axes2.append(edge.axis2) else: - raise ValueError( - "Edge '{}' in output_edge_order is not connected to node '{}' or " - "node '{}'".format(edge, node1, node2)) - if np.mean(node1_output_axes) > np.mean(node2_output_axes): - node1, node2 = node2, node1 - axes1, axes2 = axes2, axes1 - - new_tensor = backend.tensordot(node1.tensor, node2.tensor, [axes1, axes2]) - new_node = Node( - tensor=new_tensor, name=name, axis_names=axis_names, backend=backend) - # node1 and node2 get new edges in _remove_edges - _remove_edges(shared_edges, node1, node2, new_node) + axes1.append(edge.axis2) + axes2.append(edge.axis1) + + if output_edge_order: + # Determine heuristically if output transposition can be minimized by + # flipping the arguments to tensordot. + node1_output_axes = [] + node2_output_axes = [] + for (i, edge) in enumerate(output_edge_order): + if edge in shared_edges: + raise ValueError( + "Edge '{}' in output_edge_order is shared by the nodes to be " + "contracted: '{}' and '{}'.".format(edge, node1, node2)) + edge_nodes = set(edge.get_nodes()) + if node1 in edge_nodes: + node1_output_axes.append(i) + elif node2 in edge_nodes: + node2_output_axes.append(i) + else: + raise ValueError( + "Edge '{}' in output_edge_order is not connected to node '{}' or " + "node '{}'".format(edge, node1, node2)) + if node1_output_axes and node2_output_axes and ( + np.mean(node1_output_axes) > np.mean(node2_output_axes)): + node1, node2 = node2, node1 + axes1, axes2 = axes2, axes1 + + new_tensor = backend.tensordot(node1.tensor, node2.tensor, [axes1, axes2]) + new_node = Node( + tensor=new_tensor, name=name, backend=backend) + # node1 and node2 get new edges in _remove_edges + _remove_edges(shared_edges, node1, node2, new_node) + if output_edge_order: new_node = new_node.reorder_edges(list(output_edge_order)) + if axis_names: + new_node.add_axis_names(axis_names) + return new_node diff --git a/tensornetwork/tests/network_test.py b/tensornetwork/tests/network_test.py index 8efcd9d4d..f4e83a892 100644 --- a/tensornetwork/tests/network_test.py +++ b/tensornetwork/tests/network_test.py @@ -483,26 +483,45 @@ def test_flatten_all_edges(backend): def test_contract_between(backend): - a_val = np.ones((2, 3, 4, 5)) - b_val = np.ones((3, 5, 4, 2)) + a_val = np.random.rand(2, 3, 4, 5) + b_val = np.random.rand(3, 5, 6, 2) a = tn.Node(a_val, backend=backend) b = tn.Node(b_val, backend=backend) tn.connect(a[0], b[3]) tn.connect(b[1], a[3]) tn.connect(a[1], b[0]) - edge_a = a[2] - edge_b = b[2] - c = tn.contract_between(a, b, name="New Node") - c.reorder_edges([edge_a, edge_b]) + output_axis_names = ["a2", "b2"] + c = tn.contract_between(a, b, name="New Node", axis_names=output_axis_names) tn.check_correct({c}) # Check expected values. a_flat = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30)) - b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (4, 30)) + b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (6, 30)) final_val = np.matmul(a_flat, b_flat.T) assert c.name == "New Node" + assert c.axis_names == output_axis_names np.testing.assert_allclose(c.tensor, final_val) +def test_contract_between_output_edge_order(backend): + a_val = np.random.rand(2, 3, 4, 5) + b_val = np.random.rand(3, 5, 6, 2) + a = tn.Node(a_val, backend=backend) + b = tn.Node(b_val, backend=backend) + tn.connect(a[0], b[3]) + tn.connect(b[1], a[3]) + tn.connect(a[1], b[0]) + output_axis_names = ["b2", "a2"] + c = tn.contract_between(a, b, name="New Node", axis_names=output_axis_names, + output_edge_order=[b[2], a[2]]) + # Check expected values. + a_flat = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30)) + b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (6, 30)) + final_val = np.matmul(a_flat, b_flat.T) + assert c.name == "New Node" + assert c.axis_names == output_axis_names + np.testing.assert_allclose(c.tensor, final_val.T) + + def test_contract_between_no_outer_product_value_error(backend): a_val = np.ones((2, 3, 4)) b_val = np.ones((5, 6, 7)) @@ -517,8 +536,45 @@ def test_contract_between_outer_product_no_value_error(backend): b_val = np.ones((5, 6, 7)) a = tn.Node(a_val, backend=backend) b = tn.Node(b_val, backend=backend) - c = tn.contract_between(a, b, allow_outer_product=True) + output_axis_names = ["a0", "a1", "a2", "b0", "b1", "b2"] + c = tn.contract_between(a, b, allow_outer_product=True, + axis_names=output_axis_names) assert c.shape == (2, 3, 4, 5, 6, 7) + assert c.axis_names == output_axis_names + + +def test_contract_between_outer_product_output_edge_order(backend): + a_val = np.ones((2, 3, 4)) + b_val = np.ones((5, 6, 7)) + a = tn.Node(a_val, backend=backend) + b = tn.Node(b_val, backend=backend) + output_axis_names = ["b0", "b1", "a0", "b2", "a1", "a2"] + c = tn.contract_between( + a, b, + allow_outer_product=True, + output_edge_order=[b[0], b[1], a[0], b[2], a[1], a[2]], + axis_names=output_axis_names) + assert c.shape == (5, 6, 2, 7, 3, 4) + assert c.axis_names == output_axis_names + + +def test_contract_between_trace(backend): + a_val = np.ones((2, 3, 2, 4)) + a = tn.Node(a_val, backend=backend) + tn.connect(a[0], a[2]) + c = tn.contract_between(a, a, axis_names=["1", "3"]) + assert c.shape == (3, 4) + assert c.axis_names == ["1", "3"] + + +def test_contract_between_trace_output_edge_order(backend): + a_val = np.ones((2, 3, 2, 4)) + a = tn.Node(a_val, backend=backend) + tn.connect(a[0], a[2]) + c = tn.contract_between(a, a, output_edge_order=[a[3], a[1]], + axis_names=["3", "1"]) + assert c.shape == (4, 3) + assert c.axis_names == ["3", "1"] def test_contract_parallel(backend): From 83c39decc69e712a986c4b380b0c08c143fe4408 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 14 Jan 2020 15:55:15 -0500 Subject: [PATCH 159/212] broken commit --- tensornetwork/block_tensor/block_tensor.py | 441 ++++++++++----------- 1 file changed, 213 insertions(+), 228 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 11de324fb..87b169659 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -1173,183 +1173,6 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - def transpose(self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order`. This routine currently shuffles - data. - Args: - order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. - Returns: - BlockSparseTensor: The transposed tensor. - """ - strides = _get_strides(self.dense_shape) - dims = self.dense_shape - charges = self.charges - flows = self.flows - partition = _find_best_partition(charges, flows, return_charges=False) - tr_partition = _find_best_partition([charges[n] for n in order], - [flows[n] for n in order], - return_charges=False) - - # unique_row_charges = compute_unique_fused_charges(charges[0:partition], - # flows[0:partition]) - # unique_column_charges, column_dims = compute_fused_charge_degeneracies( - # charges[partition:], flows[partition:]) - - # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - - tr_unique_row_charges = compute_unique_fused_charges( - [charges[n] for n in order[0:tr_partition]], - [flows[n] for n in order[0:tr_partition]]) - - tr_unique_column_charges = compute_unique_fused_charges( - [charges[n] for n in order[tr_partition:]], - [flows[n] for n in order[tr_partition:]]) - - tr_common_charges = tr_unique_row_charges.intersect( - tr_unique_column_charges * (-1)) - left_dense = { - tr_common_charges.get_item(m): _find_transposed_dense_positions( - charges=[charges[n] for n in order[0:tr_partition]], - flows=[flows[n] for n in order[0:tr_partition]], - target_charge=tr_common_charges[m], - strides=strides[order[0:tr_partition]]) - for m in range(len(tr_common_charges)) - } - right_dense = { - tr_common_charges.get_item(m): _find_transposed_dense_positions( - charges=[charges[n] for n in order[tr_partition:]], - flows=[flows[n] for n in order[tr_partition:]], - target_charge=tr_common_charges[m] * (-1), - strides=strides[order[tr_partition:]]) - for m in range(len(tr_common_charges)) - } - # cc, dense_blocks = _find_diagonal_dense_blocks( - # [charges[n] for n in order[0:tr_partition]], - # [charges[n] for n in order[tr_partition:]], - # [flows[n] for n in order[0:tr_partition]], - # [flows[n] for n in order[tr_partition:]], - # row_strides=strides[order[0:tr_partition]], - # column_strides=strides[order[tr_partition:]]) - row_dim = np.prod([len(charges[n]) for n in range(partition)]) - for n in range(len(tr_common_charges)): - c = tr_common_charges.get_item(n) - #d = dense_blocks[n] - tmp = fuse_ndarrays([left_dense[c], right_dense[c]]) - tmp2 = fuse_ndarrays( - [np.mod(left_dense[c], row_dim), - np.mod(right_dense[c], row_dim)]) - tmp3 = (tmp - tmp2) / row_dim - #print(np.all(tmp == d[0])) - - return - sp_row_blocks = find_sparse_positions(charges[0:partition], - flows[0:partition], common_charges) - sp_column_blocks = find_sparse_positions(charges[partition:], - flows[partition:], - common_charges * (-1)) - - degeneracy_vector = np.empty( - np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) - for c in common_charges: - degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - - dense_row_blocks = { - common_charges.get_item(n): find_dense_positions( - charges[0:partition], - flows[0:partition], - common_charges[n], - return_sorted=False) for n in range(len(common_charges)) - } - dense_column_blocks = { - common_charges.get_item(n): find_dense_positions( - charges[partition:], - flows[partition:], - common_charges[n] * (-1), - return_sorted=False) for n in range(len(common_charges)) - } - dtype = charges[0].dtype - drbs = list(dense_row_blocks.values()) - block_dict = dict(zip(np.arange(len(drbs)), list(dense_row_blocks.keys()))) - - dense_row_positions = np.concatenate(drbs) - dense_block_numbers = np.concatenate([ - np.full(len(drbs[n]), fill_value=n, dtype=np.int16) - for n in range(len(drbs)) - ]) - - ind_sort = np.argsort(dense_row_positions) - dense_row_positions = dense_row_positions[ind_sort] - dense_block_number = dense_block_numbers[ind_sort] - - #sp_column_positions = np.sort( - # np.concatenate(list(sp_column_blocks.values()))) - #dense_column_positions = np.sort( - # np.concatenate(list(dense_column_blocks.values()))) - #print('dense_row_positions:', dense_row_positions) - #print('dense_column_positions:', dense_column_positions) - #print('start_positions:', start_positions) - - row_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) - block_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) - row_lookup[dense_row_positions] = start_positions - block_lookup[dense_row_positions] = dense_block_number - #column_lookup = np.empty(dense_column_positions[-1] + 1, dtype=np.int64) - #column_lookup[dense_column_positions] = sp_column_positions - # print('row_lookup', row_lookup) - # print('col_lookup', column_lookup) - # return - # print('dense col pos', dense_column_positions) - # print('sp_col_pos', sp_column_positions) - data = np.empty(len(self.data), dtype=self.data.dtype) - _, dense_blocks = _find_diagonal_dense_blocks( - [charges[n] for n in order[0:tr_partition]], - [charges[n] for n in order[tr_partition:]], - flows[0:tr_partition], - flows[tr_partition:], - row_strides=strides[order[0:tr_partition]], - column_strides=strides[order[tr_partition:]]) - - # stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] - # tr_linear_positions = find_dense_positions([charges[n] for n in order], - # [flows[n] for n in order], - # charges[0].zero_charge) - # tr_stride_arrays = [stride_arrays[n] for n in order] - - # dense_permutation = _find_values_in_fused( - # tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), - # fuse_ndarrays(tr_stride_arrays[partition:])) - - # dense_permutation_2 = np.sort(np.concatenate([b[0] for b in dense_blocks])) - # print('dense_perm == dense_perm2', - # np.all(dense_permutation == dense_permutation_2)) - column_dim = np.prod( - [len(charges[n]) for n in range(partition, len(charges))]) - for b in dense_blocks: - # print(b[1]) - # t1 = time.time() - rinds, cinds = np.divmod(b[0], column_dim) - # print('divmod', time.time() - t1) - # t1 = time.time() - start_pos = row_lookup[rinds] - # print('startpos', time.time() - t1) - # t1 = time.time() - block_vals = block_lookup[rinds] - # print('blockvals', time.time() - t1) - # t1 = time.time() - unique, cnts = np.unique(block_vals, return_counts=True) - # def transpose(self, # order: Union[List[int], np.ndarray], # permutation: Optional[np.ndarray] = None, @@ -1376,16 +1199,59 @@ def transpose(self, # [flows[n] for n in order], # return_charges=False) - # unique_row_charges = compute_unique_fused_charges(charges[0:partition], - # flows[0:partition]) - # unique_column_charges, column_dims = compute_fused_charge_degeneracies( - # charges[partition:], flows[partition:]) + # # unique_row_charges = compute_unique_fused_charges(charges[0:partition], + # # flows[0:partition]) + # # unique_column_charges, column_dims = compute_fused_charge_degeneracies( + # # charges[partition:], flows[partition:]) + + # # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + # # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - # # print('row_charges', charges[0].charges) - # # print('col_charges', charges[1].charges) - # # print('common_charges', common_charges.charges) - # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + # tr_unique_row_charges = compute_unique_fused_charges( + # [charges[n] for n in order[0:tr_partition]], + # [flows[n] for n in order[0:tr_partition]]) + + # tr_unique_column_charges = compute_unique_fused_charges( + # [charges[n] for n in order[tr_partition:]], + # [flows[n] for n in order[tr_partition:]]) + + # tr_common_charges = tr_unique_row_charges.intersect( + # tr_unique_column_charges * (-1)) + # left_dense = { + # tr_common_charges.get_item(m): _find_transposed_dense_positions( + # charges=[charges[n] for n in order[0:tr_partition]], + # flows=[flows[n] for n in order[0:tr_partition]], + # target_charge=tr_common_charges[m], + # strides=strides[order[0:tr_partition]]) + # for m in range(len(tr_common_charges)) + # } + # right_dense = { + # tr_common_charges.get_item(m): _find_transposed_dense_positions( + # charges=[charges[n] for n in order[tr_partition:]], + # flows=[flows[n] for n in order[tr_partition:]], + # target_charge=tr_common_charges[m] * (-1), + # strides=strides[order[tr_partition:]]) + # for m in range(len(tr_common_charges)) + # } + # # cc, dense_blocks = _find_diagonal_dense_blocks( + # # [charges[n] for n in order[0:tr_partition]], + # # [charges[n] for n in order[tr_partition:]], + # # [flows[n] for n in order[0:tr_partition]], + # # [flows[n] for n in order[tr_partition:]], + # # row_strides=strides[order[0:tr_partition]], + # # column_strides=strides[order[tr_partition:]]) + # row_dim = np.prod([len(charges[n]) for n in range(partition)]) + # for n in range(len(tr_common_charges)): + # c = tr_common_charges.get_item(n) + # #d = dense_blocks[n] + # tmp = fuse_ndarrays([left_dense[c], right_dense[c]]) + # tmp2 = fuse_ndarrays( + # [np.mod(left_dense[c], row_dim), + # np.mod(right_dense[c], row_dim)]) + # tmp3 = (tmp - tmp2) / row_dim + # #print(np.all(tmp == d[0])) + + # return # sp_row_blocks = find_sparse_positions(charges[0:partition], # flows[0:partition], common_charges) # sp_column_blocks = find_sparse_positions(charges[partition:], @@ -1483,48 +1349,167 @@ def transpose(self, # # print('blockvals', time.time() - t1) # # t1 = time.time() # unique, cnts = np.unique(block_vals, return_counts=True) - # # print('unique', time.time() - t1) - # # for n in range(len(unique)): - - # # degen = column_degeneracies[block_dict[unique[n]]] - # # print(degen) - # # np.expand_dims(row_lookup[rinds[block_vals == unique[n]]], - # # 1) + np.arange(degen) - - # # transposed_positions = +column_lookup[cinds] - # # self.data[transposed_positions] - # # common_charges, blocks, start_positions_2, row_locations, column_degeneracies_2 = _find_diagonal_sparse_blocks( - # # data=[], - # # row_charges=charges[0:partition], - # # column_charges=charges[partition:], - # # row_flows=flows[:partition], - # # column_flows=flows[partition:], - # # return_data=False) - # # sp_row_positions_2 = np.sort(np.concatenate(list(row_locations.values()))) - # # sp_row_positions = np.sort(np.concatenate(list(sp_row_blocks.values()))) - # # print(np.all(sp_row_positions == sp_row_positions_2)) - # # print('asdf', np.all(start_positions == start_positions_2)) - # # print(start_positions) - # # print(column_dim) - # # print(partition, tr_partition) - # # print(dense_permutation) - # # print(tr_linear_positions) - # #rinds, cinds = np.divmod(dense_permutation, column_dim) - # #print(np.max(rinds), np.max(cinds), len(row_lookup), len(column_lookup)) - - # # print('row lookup', row_lookup) - # # print('rinds', rinds) - # # print('col lookup', column_lookup) - # # print('cinds', cinds) - # # print('dense col pos', dense_column_positions) - # # u1 = np.unique(cinds) - # # u2 = np.unique(dense_column_positions) - # # print(np.all(u1 == u2)) - # # print('row_lookup[rinds]', row_lookup[rinds]) - # # print('col_lookup[cinds]', column_lookup[cinds]) - - # # transposed_positions = row_lookup[rinds] + column_lookup[cinds] - # # self.data[transposed_positions] + + def transpose(self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False + ) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order`. This routine currently shuffles + data. + Args: + order: The new order of indices. + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` + can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. + Returns: + BlockSparseTensor: The transposed tensor. + """ + strides = _get_strides(self.dense_shape) + dims = self.dense_shape + charges = self.charges + flows = self.flows + partition = _find_best_partition(charges, flows, return_charges=False) + tr_partition = _find_best_partition([charges[n] for n in order], + [flows[n] for n in order], + return_charges=False) + + unique_row_charges = compute_unique_fused_charges(charges[0:partition], + flows[0:partition]) + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + charges[partition:], flows[partition:]) + + common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + sp_row_blocks = find_sparse_positions(charges[0:partition], + flows[0:partition], common_charges) + + degeneracy_vector = np.empty( + np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) + for c in common_charges: + degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] + stop_positions = np.cumsum(degeneracy_vector) + start_positions = stop_positions - degeneracy_vector + + dense_row_blocks = { + common_charges.get_item(n): find_dense_positions( + charges[0:partition], + flows[0:partition], + common_charges[n], + return_sorted=False) for n in range(len(common_charges)) + } + dense_column_blocks = { + common_charges.get_item(n): find_dense_positions( + charges[partition:], + flows[partition:], + common_charges[n] * (-1), + return_sorted=False) for n in range(len(common_charges)) + } + dtype = charges[0].dtype + drbs = list(dense_row_blocks.values()) + block_dict = dict(zip(np.arange(len(drbs)), list(dense_row_blocks.keys()))) + + dense_row_positions = np.concatenate(drbs) + dense_block_numbers = np.concatenate([ + np.full(len(drbs[n]), fill_value=n, dtype=np.int16) + for n in range(len(drbs)) + ]) + + ind_sort = np.argsort(dense_row_positions) + dense_row_positions = dense_row_positions[ind_sort] + dense_block_number = dense_block_numbers[ind_sort] + + #sp_column_positions = np.sort( + # np.concatenate(list(sp_column_blocks.values()))) + #dense_column_positions = np.sort( + # np.concatenate(list(dense_column_blocks.values()))) + #print('dense_row_positions:', dense_row_positions) + #print('dense_column_positions:', dense_column_positions) + #print('start_positions:', start_positions) + + row_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) + row_lookup[dense_row_positions] = start_positions + data = np.empty(len(self.data), dtype=self.data.dtype) + _, dense_blocks = _find_diagonal_dense_blocks( + [charges[n] for n in order[0:tr_partition]], + [charges[n] for n in order[tr_partition:]], + [flows[n] for n in order[0:tr_partition]], + [flows[n] for n in order[tr_partition:]], + row_strides=strides[order[0:tr_partition]], + column_strides=strides[order[tr_partition:]]) + + # stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] + # tr_linear_positions = find_dense_positions([charges[n] for n in order], + # [flows[n] for n in order], + # charges[0].zero_charge) + # tr_stride_arrays = [stride_arrays[n] for n in order] + + # dense_permutation = _find_values_in_fused( + # tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), + # fuse_ndarrays(tr_stride_arrays[partition:])) + + # dense_permutation_2 = np.sort(np.concatenate([b[0] for b in dense_blocks])) + # print('dense_perm == dense_perm2', + # np.all(dense_permutation == dense_permutation_2)) + column_dim = np.prod( + [len(charges[n]) for n in range(partition, len(charges))]) + for b in dense_blocks: + # print(b[1]) + # t1 = time.time() + rinds, cinds = np.divmod(b[0], column_dim) + # print('divmod', time.time() - t1) + # t1 = time.time() + start_pos = row_lookup[rinds] + # print('startpos', time.time() - t1) + # t1 = time.time() + block_vals = block_lookup[rinds] + # print('blockvals', time.time() - t1) + # t1 = time.time() + unique, cnts = np.unique(block_vals, return_counts=True) + # print('unique', time.time() - t1) + # for n in range(len(unique)): + + # degen = column_degeneracies[block_dict[unique[n]]] + # print(degen) + # np.expand_dims(row_lookup[rinds[block_vals == unique[n]]], + # 1) + np.arange(degen) + + # transposed_positions = +column_lookup[cinds] + # self.data[transposed_positions] + # common_charges, blocks, start_positions_2, row_locations, column_degeneracies_2 = _find_diagonal_sparse_blocks( + # data=[], + # row_charges=charges[0:partition], + # column_charges=charges[partition:], + # row_flows=flows[:partition], + # column_flows=flows[partition:], + # return_data=False) + # sp_row_positions_2 = np.sort(np.concatenate(list(row_locations.values()))) + # sp_row_positions = np.sort(np.concatenate(list(sp_row_blocks.values()))) + # print(np.all(sp_row_positions == sp_row_positions_2)) + # print('asdf', np.all(start_positions == start_positions_2)) + # print(start_positions) + # print(column_dim) + # print(partition, tr_partition) + # print(dense_permutation) + # print(tr_linear_positions) + #rinds, cinds = np.divmod(dense_permutation, column_dim) + #print(np.max(rinds), np.max(cinds), len(row_lookup), len(column_lookup)) + + # print('row lookup', row_lookup) + # print('rinds', rinds) + # print('col lookup', column_lookup) + # print('cinds', cinds) + # print('dense col pos', dense_column_positions) + # u1 = np.unique(cinds) + # u2 = np.unique(dense_column_positions) + # print(np.all(u1 == u2)) + # print('row_lookup[rinds]', row_lookup[rinds]) + # print('col_lookup[cinds]', column_lookup[cinds]) + + # transposed_positions = row_lookup[rinds] + column_lookup[cinds] + # self.data[transposed_positions] def transpose_2(self, order: Union[List[int], np.ndarray], From 0522f1aecdb2b6c9f15617933506c17898284d43 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 15 Jan 2020 08:21:51 -0500 Subject: [PATCH 160/212] broken --- tensornetwork/block_tensor/block_tensor.py | 310 +++++++++++++-------- tensornetwork/block_tensor/charge.py | 4 +- 2 files changed, 197 insertions(+), 117 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 87b169659..556bd300b 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -30,6 +30,72 @@ Tensor = Any +def _compute_sparse_lookups(row_charges, row_flows, column_charges, + column_flows): + + fused_column_charges = fuse_charges(column_charges, column_flows) + fused_row_charges = fuse_charges(row_charges, row_flows) + unique_column_charges, column_inverse = fused_column_charges.unique( + return_inverse=True) + unique_row_charges, row_inverse = fused_column_charges.unique( + return_inverse=True) + common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + + col_ind_sort = np.argsort(column_inverse, kind='stable') + row_ind_sort = np.argsort(row_inverse, kind='stable') + _, col_charge_degeneracies = compute_fused_charge_degeneracies( + column_charges, column_flows) + + tmp = dict(zip(compute_fused_charge_degeneracies(row_charges, row_flows))) + + # labelsorted_indices = column_inverse[col_ind_sort] + # tmp = np.nonzero( + # np.append(labelsorted_indices, unique_column_charges.charges.shape[0] + 1) - + # np.append(labelsorted_indices[0], labelsorted_indices))[0] + #charge_degeneracies = tmp - np.append(0, tmp[0:-1]) + col_start_positions = np.cumsum(np.append(0, col_charge_degeneracies)) + row_start_positions = np.cumsum(np.append(0, [tmp[]])) + column_lookup = np.empty(len(fused_column_charges), dtype=np.int64) + row_lookup = np.zeros(len(fused_row_charges), dtype=np.int64) + is_relevant = unique_column_charges.isin(common_charges * (-1)) + for n in range(len(unique_column_charges)): + if is_relevant[n]: + column_lookup[col_ind_sort[col_start_positions[n]:col_start_positions[ + n + 1]]] = np.arange(col_charge_degeneracies[n]) + + row_lookup[row_ind_sort[row_start_positions[n]:row_start_positions[ + n + 1]]] = col_charge_degeneracies[n] + + return np.append(0, np.cumsum(row_lookup[0:-1])), column_lookup + + +def _compute_sparse_lookup(charges, flows, target_charges=None): + fused = fuse_charges(charges, flows) + unique, inverse = fused.unique(return_inverse=True) + ind_sort = np.argsort(inverse, kind='stable') + _, charge_degeneracies = compute_fused_charge_degeneracies(charges, flows) + + # labelsorted_indices = inverse[ind_sort] + # tmp = np.nonzero( + # np.append(labelsorted_indices, unique.charges.shape[0] + 1) - + # np.append(labelsorted_indices[0], labelsorted_indices))[0] + # charge_degeneracies = tmp - np.append(0, tmp[0:-1]) + start_positions = np.cumsum(np.append(0, charge_degeneracies)) + lookup = np.empty(len(fused), dtype=np.int64) + if target_charges is not None: + is_relevant = unique.isin(target_charges) + for n in range(len(unique)): + if is_relevant[n]: + lookup[ind_sort[start_positions[n]:start_positions[n + 1]]] = np.arange( + charge_degeneracies[n]) + else: + for n in range(len(unique)): + lookup[ind_sort[start_positions[n]:start_positions[n + 1]]] = np.arange( + charge_degeneracies[n]) + + return lookup + + def _get_strides(dims): return np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) @@ -159,9 +225,8 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (charges[0] * - flows[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -819,10 +884,10 @@ def _find_transposed_dense_positions( # return np.concatenate(indices) -def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection] - ) -> Dict: +def find_sparse_positions( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -916,8 +981,8 @@ def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], target_charge = target_charges[n] right_indices[(left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + - left_charge * (-1)))[0] + tmp_relevant_right_charges == ( + target_charge + left_charge * (-1)))[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -1350,11 +1415,11 @@ def charges(self): # # t1 = time.time() # unique, cnts = np.unique(block_vals, return_counts=True) - def transpose(self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": + def transpose_a( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -1390,8 +1455,7 @@ def transpose(self, np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) for c in common_charges: degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector + start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector dense_row_blocks = { common_charges.get_item(n): find_dense_positions( @@ -1400,39 +1464,99 @@ def transpose(self, common_charges[n], return_sorted=False) for n in range(len(common_charges)) } - dense_column_blocks = { - common_charges.get_item(n): find_dense_positions( - charges[partition:], - flows[partition:], - common_charges[n] * (-1), - return_sorted=False) for n in range(len(common_charges)) - } - dtype = charges[0].dtype - drbs = list(dense_row_blocks.values()) - block_dict = dict(zip(np.arange(len(drbs)), list(dense_row_blocks.keys()))) - - dense_row_positions = np.concatenate(drbs) - dense_block_numbers = np.concatenate([ - np.full(len(drbs[n]), fill_value=n, dtype=np.int16) - for n in range(len(drbs)) - ]) + dense_row_positions = np.sort( + np.concatenate(list(dense_row_blocks.values()))) + row_lookup = np.zeros(dense_row_positions[-1] + 1, dtype=np.int64) + row_lookup[dense_row_positions] = start_positions + return row_lookup + column_lookup = _compute_sparse_lookup(charges[partition:], + flows[partition:], + common_charges * (-1)) + data = np.empty(len(self.data), dtype=self.data.dtype) + cs, dense_blocks = _find_diagonal_dense_blocks( + [charges[n] for n in order[0:tr_partition]], + [charges[n] for n in order[tr_partition:]], + [flows[n] for n in order[0:tr_partition]], + [flows[n] for n in order[tr_partition:]], + row_strides=strides[order[0:tr_partition]], + column_strides=strides[order[tr_partition:]]) - ind_sort = np.argsort(dense_row_positions) - dense_row_positions = dense_row_positions[ind_sort] - dense_block_number = dense_block_numbers[ind_sort] + column_dim = np.prod( + [len(charges[n]) for n in range(partition, len(charges))]) + transposed_positions = {} + for n in range(len(dense_blocks)): + b = dense_blocks[n] + rinds, cinds = np.divmod(b[0], column_dim) + start_pos = row_lookup[rinds] + transposed_positions[cs.get_item( + n)] = row_lookup[rinds] + column_lookup[cinds] + #self.data[transposed_positions] - #sp_column_positions = np.sort( - # np.concatenate(list(sp_column_blocks.values()))) - #dense_column_positions = np.sort( - # np.concatenate(list(dense_column_blocks.values()))) - #print('dense_row_positions:', dense_row_positions) - #print('dense_column_positions:', dense_column_positions) - #print('start_positions:', start_positions) + return transposed_positions - row_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) - row_lookup[dense_row_positions] = start_positions + def transpose_b( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + """ + Transpose the tensor into the new order `order`. This routine currently shuffles + data. + Args: + order: The new order of indices. + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` + can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. + Returns: + BlockSparseTensor: The transposed tensor. + """ + strides = _get_strides(self.dense_shape) + dims = self.dense_shape + charges = self.charges + flows = self.flows + partition = _find_best_partition(charges, flows, return_charges=False) + tr_partition = _find_best_partition([charges[n] for n in order], + [flows[n] for n in order], + return_charges=False) + + # unique_row_charges = compute_unique_fused_charges(charges[0:partition], + # flows[0:partition]) + # unique_column_charges, column_dims = compute_fused_charge_degeneracies( + # charges[partition:], flows[partition:]) + + # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + # sp_row_blocks = find_sparse_positions(charges[0:partition], + # flows[0:partition], common_charges) + + # degeneracy_vector = np.empty( + # np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) + # for c in common_charges: + # degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] + # start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector + + # dense_row_blocks = { + # common_charges.get_item(n): find_dense_positions( + # charges[0:partition], + # flows[0:partition], + # common_charges[n], + # return_sorted=False) for n in range(len(common_charges)) + # } + # dense_column_blocks = { + # common_charges.get_item(n): find_dense_positions( + # charges[partition:], + # flows[partition:], + # common_charges[n] * (-1), + # return_sorted=False) for n in range(len(common_charges)) + # } + + row_lookup, column_lookup = _compute_sparse_lookups( + charges[0:partition], flows[0:partition], charges[partition:], + flows[partition:]) + return row_lookup data = np.empty(len(self.data), dtype=self.data.dtype) - _, dense_blocks = _find_diagonal_dense_blocks( + cs, dense_blocks = _find_diagonal_dense_blocks( [charges[n] for n in order[0:tr_partition]], [charges[n] for n in order[tr_partition:]], [flows[n] for n in order[0:tr_partition]], @@ -1440,82 +1564,36 @@ def transpose(self, row_strides=strides[order[0:tr_partition]], column_strides=strides[order[tr_partition:]]) + column_dim = np.prod( + [len(charges[n]) for n in range(partition, len(charges))]) + transposed_positions = {} + for n in range(len(dense_blocks)): + b = dense_blocks[n] + rinds, cinds = np.divmod(b[0], column_dim) + start_pos = row_lookup[rinds] + transposed_positions[cs.get_item( + n)] = row_lookup[rinds] + column_lookup[cinds] + #self.data[transposed_positions] + + return transposed_positions + # stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] # tr_linear_positions = find_dense_positions([charges[n] for n in order], # [flows[n] for n in order], # charges[0].zero_charge) # tr_stride_arrays = [stride_arrays[n] for n in order] - # dense_permutation = _find_values_in_fused( # tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), # fuse_ndarrays(tr_stride_arrays[partition:])) - - # dense_permutation_2 = np.sort(np.concatenate([b[0] for b in dense_blocks])) - # print('dense_perm == dense_perm2', - # np.all(dense_permutation == dense_permutation_2)) - column_dim = np.prod( - [len(charges[n]) for n in range(partition, len(charges))]) - for b in dense_blocks: - # print(b[1]) - # t1 = time.time() - rinds, cinds = np.divmod(b[0], column_dim) - # print('divmod', time.time() - t1) - # t1 = time.time() - start_pos = row_lookup[rinds] - # print('startpos', time.time() - t1) - # t1 = time.time() - block_vals = block_lookup[rinds] - # print('blockvals', time.time() - t1) - # t1 = time.time() - unique, cnts = np.unique(block_vals, return_counts=True) - # print('unique', time.time() - t1) - # for n in range(len(unique)): - - # degen = column_degeneracies[block_dict[unique[n]]] - # print(degen) - # np.expand_dims(row_lookup[rinds[block_vals == unique[n]]], - # 1) + np.arange(degen) - - # transposed_positions = +column_lookup[cinds] - # self.data[transposed_positions] - # common_charges, blocks, start_positions_2, row_locations, column_degeneracies_2 = _find_diagonal_sparse_blocks( - # data=[], - # row_charges=charges[0:partition], - # column_charges=charges[partition:], - # row_flows=flows[:partition], - # column_flows=flows[partition:], - # return_data=False) - # sp_row_positions_2 = np.sort(np.concatenate(list(row_locations.values()))) - # sp_row_positions = np.sort(np.concatenate(list(sp_row_blocks.values()))) - # print(np.all(sp_row_positions == sp_row_positions_2)) - # print('asdf', np.all(start_positions == start_positions_2)) - # print(start_positions) - # print(column_dim) - # print(partition, tr_partition) - # print(dense_permutation) - # print(tr_linear_positions) #rinds, cinds = np.divmod(dense_permutation, column_dim) - #print(np.max(rinds), np.max(cinds), len(row_lookup), len(column_lookup)) - - # print('row lookup', row_lookup) - # print('rinds', rinds) - # print('col lookup', column_lookup) - # print('cinds', cinds) - # print('dense col pos', dense_column_positions) - # u1 = np.unique(cinds) - # u2 = np.unique(dense_column_positions) - # print(np.all(u1 == u2)) - # print('row_lookup[rinds]', row_lookup[rinds]) - # print('col_lookup[cinds]', column_lookup[cinds]) - # transposed_positions = row_lookup[rinds] + column_lookup[cinds] # self.data[transposed_positions] - def transpose_2(self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": + def transpose_2( + self, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose the tensor into the new order `order`. This routine currently shuffles data. @@ -1886,11 +1964,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose(tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": +def transpose( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. @@ -1965,8 +2043,8 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == (-1) * - np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == + (-1) * np.array([i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 1b542b813..8379fd561 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -377,7 +377,9 @@ def zero_charge(self): def __iter__(self): return iter(self.charges) - def intersect(self, other: "BaseCharge") -> "BaseCharge": + def intersect(self, + other: "BaseCharge", + return_indices: Optional[bool] = False) -> "BaseCharge": if not np.all(self.shifts == other.shifts): raise ValueError( "Cannot intersect charges with different shifts {} and {}".format( From 58168660cd58f3dc42883135a6993fdc84fceafb Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 15 Jan 2020 14:40:19 -0500 Subject: [PATCH 161/212] added `return_indices` to intersect --- tensornetwork/block_tensor/charge.py | 56 ++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 8379fd561..385094f58 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -384,11 +384,16 @@ def intersect(self, raise ValueError( "Cannot intersect charges with different shifts {} and {}".format( self.shifts, other.shifts)) + if return_indices: + charges, comm1, comm2 = np.intersect1d( + self.charges, other.charges, return_indices=return_indices) + else: + charges = np.intersect1d(self.charges, other.charges) obj = self.__new__(type(self)) - obj.__init__( - charges=[np.intersect1d(self.charges, other.charges)], - shifts=self.shifts) + obj.__init__(charges=[charges], shifts=self.shifts) + if return_indices: + return obj, comm1, comm2 return obj @@ -952,17 +957,44 @@ def zero_charge(self): obj.__init__(charges=[c.zero_charge for c in self.charges]) return obj - def intersect(self, other: "ChargeCollection") -> "ChargeCollection": - self_unique = self.unique() - other_unique = other.unique() - concatenated = self_unique.concatenate(other_unique) - tmp_unique, counts = concatenated.unique(return_counts=True) - return tmp_unique[counts == 2] + def intersect(self, + other: "ChargeCollection", + return_indices: Optional[bool] = False) -> "ChargeCollection": + if return_indices: + ua, ia = self.unique(return_index=True) + ub, ib = other.unique(return_index=True) + conc = ua.concatenate(ub) + uab, iab, cntab = conc.unique(return_index=True, return_counts=True) + intersection = uab[cntab == 2] + comm1 = np.argmax( + np.logical_and.reduce( + np.repeat( + np.expand_dims(self._stacked_charges, 2), + intersection._stacked_charges.shape[0], + axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), + axis=1), + axis=0) + comm2 = np.argmax( + np.logical_and.reduce( + np.repeat( + np.expand_dims(other._stacked_charges, 2), + intersection._stacked_charges.shape[0], + axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), + axis=1), + axis=0) + return intersection, comm1, comm2 + + else: + self_unique = self.unique() + other_unique = other.unique() + concatenated = self_unique.concatenate(other_unique) + tmp_unique, counts = concatenated.unique(return_counts=True) + return tmp_unique[counts == 2] -def fuse_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]] + ) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", From d0bb7878fa3ee9e48eb86a5aca20ff5939725d42 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 15 Jan 2020 16:39:10 -0500 Subject: [PATCH 162/212] faster transpose + tensordot implementation --- tensornetwork/block_tensor/block_tensor.py | 970 +++++---------------- 1 file changed, 199 insertions(+), 771 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 556bd300b..0d88dd444 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -33,69 +33,45 @@ def _compute_sparse_lookups(row_charges, row_flows, column_charges, column_flows): + column_flows = list(-np.asarray(column_flows)) fused_column_charges = fuse_charges(column_charges, column_flows) fused_row_charges = fuse_charges(row_charges, row_flows) unique_column_charges, column_inverse = fused_column_charges.unique( return_inverse=True) - unique_row_charges, row_inverse = fused_column_charges.unique( + unique_row_charges, row_inverse = fused_row_charges.unique( return_inverse=True) - common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + common_charges, comm_row, comm_col = unique_row_charges.intersect( + unique_column_charges, return_indices=True) col_ind_sort = np.argsort(column_inverse, kind='stable') row_ind_sort = np.argsort(row_inverse, kind='stable') _, col_charge_degeneracies = compute_fused_charge_degeneracies( column_charges, column_flows) - - tmp = dict(zip(compute_fused_charge_degeneracies(row_charges, row_flows))) - + _, row_charge_degeneracies = compute_fused_charge_degeneracies( + row_charges, row_flows) # labelsorted_indices = column_inverse[col_ind_sort] # tmp = np.nonzero( # np.append(labelsorted_indices, unique_column_charges.charges.shape[0] + 1) - # np.append(labelsorted_indices[0], labelsorted_indices))[0] #charge_degeneracies = tmp - np.append(0, tmp[0:-1]) + col_start_positions = np.cumsum(np.append(0, col_charge_degeneracies)) - row_start_positions = np.cumsum(np.append(0, [tmp[]])) + row_start_positions = np.cumsum(np.append(0, row_charge_degeneracies)) column_lookup = np.empty(len(fused_column_charges), dtype=np.int64) row_lookup = np.zeros(len(fused_row_charges), dtype=np.int64) - is_relevant = unique_column_charges.isin(common_charges * (-1)) - for n in range(len(unique_column_charges)): - if is_relevant[n]: - column_lookup[col_ind_sort[col_start_positions[n]:col_start_positions[ - n + 1]]] = np.arange(col_charge_degeneracies[n]) - - row_lookup[row_ind_sort[row_start_positions[n]:row_start_positions[ - n + 1]]] = col_charge_degeneracies[n] + for n in range(len(common_charges)): + column_lookup[col_ind_sort[col_start_positions[ + comm_col[n]]:col_start_positions[comm_col[n] + 1]]] = np.arange( + col_charge_degeneracies[comm_col[n]]) + row_start_positions[comm_row[n]] + row_start_positions[comm_row[n] + 1] + row_lookup[ + row_ind_sort[row_start_positions[comm_row[n]]:row_start_positions[ + comm_row[n] + 1]]] = col_charge_degeneracies[comm_col[n]] return np.append(0, np.cumsum(row_lookup[0:-1])), column_lookup -def _compute_sparse_lookup(charges, flows, target_charges=None): - fused = fuse_charges(charges, flows) - unique, inverse = fused.unique(return_inverse=True) - ind_sort = np.argsort(inverse, kind='stable') - _, charge_degeneracies = compute_fused_charge_degeneracies(charges, flows) - - # labelsorted_indices = inverse[ind_sort] - # tmp = np.nonzero( - # np.append(labelsorted_indices, unique.charges.shape[0] + 1) - - # np.append(labelsorted_indices[0], labelsorted_indices))[0] - # charge_degeneracies = tmp - np.append(0, tmp[0:-1]) - start_positions = np.cumsum(np.append(0, charge_degeneracies)) - lookup = np.empty(len(fused), dtype=np.int64) - if target_charges is not None: - is_relevant = unique.isin(target_charges) - for n in range(len(unique)): - if is_relevant[n]: - lookup[ind_sort[start_positions[n]:start_positions[n + 1]]] = np.arange( - charge_degeneracies[n]) - else: - for n in range(len(unique)): - lookup[ind_sort[start_positions[n]:start_positions[n + 1]]] = np.arange( - charge_degeneracies[n]) - - return lookup - - def _get_strides(dims): return np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) @@ -225,8 +201,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - charges[0] * flows[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (charges[0] * + flows[0]).unique( + return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -400,28 +377,10 @@ def _find_diagonal_sparse_blocks( row_locations = find_sparse_positions( charges=row_charges, flows=row_flows, target_charges=common_charges) - # elif len(row_charges) == 1: - # fused_row_charges = fuse_charges(row_charges, row_flows) - - # #get the unique row-charges - # #unique_row_charges = fused_row_charges.unique() - # #get the charges common to rows and columns (only those matter) - # #get the charges common to rows and columns (only those matter) - # #common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - # relevant_fused_row_charges = fused_row_charges[fused_row_charges.isin( - # common_charges)] - # row_locations = {} - # for c in common_charges: - # #c = common_charges.get_item(n) - # row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] - # else: - # raise ValueError('Found an empty sequence for `row_charges`') - degeneracy_vector = np.empty( np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. - for c in common_charges: degeneracy_vector[row_locations[c]] = column_degeneracies[c] @@ -438,8 +397,7 @@ def _find_diagonal_sparse_blocks( # each row with charge `c=0` within the data vector are then simply obtained using # masks[0] = [True, False, True, True, False] # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector + start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector blocks = [] for c in common_charges: @@ -514,12 +472,13 @@ def _find_diagonal_dense_blocks( #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly #`compute_fused_charge_degeneracies` multiplies flows into the column_charges + t1 = time.time() unique_column_charges = compute_unique_fused_charges(column_charges, column_flows) unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) #get the charges common to rows and columns (only those matter) common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - + print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) if ((row_strides is None) and (column_strides is not None)) or ((row_strides is not None) and (column_strides is None)): @@ -527,7 +486,7 @@ def _find_diagonal_dense_blocks( "have to be passed simultaneously." " Found `row_strides={}` and " "`column_strides={}`".format(row_strides, column_strides)) - + t1 = time.time() if row_strides is not None: row_locations = { common_charges.get_item(n): _find_transposed_dense_positions( @@ -561,6 +520,7 @@ def _find_diagonal_dense_blocks( target_charge=common_charges[n] * (-1)) for n in range(len(common_charges)) } + print('finding locations:', time.time() - t1) blocks = [] for c in common_charges: #numpy broadcasting is substantially faster than kron! @@ -805,89 +765,10 @@ def _find_transposed_dense_positions( return np.array([]) -# def find_dense_positions( -# left_charges: Union[BaseCharge, ChargeCollection], left_flow: int, -# right_charges: Union[BaseCharge, ChargeCollection], right_flow: int, -# target_charge: Union[BaseCharge, ChargeCollection]) -> np.ndarray: -# """ -# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) -# in the vector `fused_charges` (resulting from fusing np.ndarrays -# `left_charges` and `right_charges`) that have a value of `target_charge`. -# For example, given -# ``` -# left_charges = [-2,0,1,0,0] -# right_charges = [-1,0,2,1] -# target_charge = 0 -# fused_charges = fuse_charges([left_charges, right_charges],[1,1]) -# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] -# ``` -# we want to find the all different blocks -# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, -# together with their corresponding index-values of the data in the dense array. -# `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` -# to an array of integers. -# For the above example, we get: -# * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` -# was obtained from fusing -2 and 2. -# * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, -# `fused_charges[5,13,17]` were obtained from fusing 0 and 0. -# * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` -# was obtained from fusing 1 and -1. -# Args: -# left_charges: An np.ndarray of integer charges. -# left_flow: The flow direction of the left charges. -# right_charges: An np.ndarray of integer charges. -# right_flow: The flow direction of the right charges. -# target_charge: The target charge. -# Returns: -# np.ndarray: The indices of the elements fusing to `target_charge`. -# """ -# _check_flows([left_flow, right_flow]) - -# t1 = time.time() -# unique_left, left_degeneracies = left_charges.unique(return_counts=True) -# unique_right, right_degeneracies = right_charges.unique(return_counts=True) -# print('finding unique values: {}s'.format(time.time() - t1)) - -# t1 = time.time() -# tmp_charges = (target_charge + (unique_right * right_flow * (-1))) * left_flow -# concatenated = unique_left.concatenate(tmp_charges) -# tmp_unique, counts = concatenated.unique(return_counts=True) -# common_charges = tmp_unique[ -# counts == 2] #common_charges is a BaseCharge or ChargeCollection -# print('finding common charges: {}s'.format(time.time() - t1)) - -# right_locations = {} -# t1 = time.time() -# for n in range(len(common_charges)): -# c = common_charges[n] - -# right_charge = (target_charge + (c * left_flow * (-1))) * right_flow -# right_locations[right_charge.get_item(0)] = np.nonzero( -# right_charges == right_charge)[0] -# print('finding right locations: {}s'.format(time.time() - t1)) -# len_right_charges = len(right_charges) -# indices = [] -# t1 = time.time() -# print(len(left_charges)) -# for n in range(len(left_charges)): -# c = left_charges[n] -# right_charge = (target_charge + (c * left_flow * (-1))) * right_flow -# #print(' fusing charges: {}s'.format(time.time() - t1)) -# if c not in common_charges: -# continue -# #t1 = time.time() -# indices.append(n * len_right_charges + -# right_locations[right_charge.get_item(0)]) -# #print(' appending indices: {}s'.format(time.time() - t1)) -# print('finding all indices: {}s'.format(time.time() - t1)) -# return np.concatenate(indices) - - -def find_sparse_positions( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: +def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection] + ) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -981,13 +862,12 @@ def find_sparse_positions( target_charge = target_charges[n] right_indices[(left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == ( - target_charge + left_charge * (-1)))[0] + tmp_relevant_right_charges == (target_charge + + left_charge * (-1)))[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector + start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector blocks = {t: [] for t in target_charges} # iterator returns tuple of `int` for ChargeCollection objects # and `int` for Ba seCharge objects (both hashable) @@ -1238,376 +1118,17 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] - # def transpose(self, - # order: Union[List[int], np.ndarray], - # permutation: Optional[np.ndarray] = None, - # return_permutation: Optional[bool] = False - # ) -> "BlockSparseTensor": - # """ - # Transpose the tensor into the new order `order`. This routine currently shuffles - # data. - # Args: - # order: The new order of indices. - # permutation: An np.ndarray of int for reshuffling the data, - # typically the output of a prior call to `transpose`. Passing `permutation` - # can greatly speed up the transposition. - # return_permutation: If `True`, return the the permutation data. - # Returns: - # BlockSparseTensor: The transposed tensor. - # """ - # strides = _get_strides(self.dense_shape) - # dims = self.dense_shape - # charges = self.charges - # flows = self.flows - # partition = _find_best_partition(charges, flows, return_charges=False) - # tr_partition = _find_best_partition([charges[n] for n in order], - # [flows[n] for n in order], - # return_charges=False) - - # # unique_row_charges = compute_unique_fused_charges(charges[0:partition], - # # flows[0:partition]) - # # unique_column_charges, column_dims = compute_fused_charge_degeneracies( - # # charges[partition:], flows[partition:]) - - # # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - # # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - - # tr_unique_row_charges = compute_unique_fused_charges( - # [charges[n] for n in order[0:tr_partition]], - # [flows[n] for n in order[0:tr_partition]]) - - # tr_unique_column_charges = compute_unique_fused_charges( - # [charges[n] for n in order[tr_partition:]], - # [flows[n] for n in order[tr_partition:]]) - - # tr_common_charges = tr_unique_row_charges.intersect( - # tr_unique_column_charges * (-1)) - # left_dense = { - # tr_common_charges.get_item(m): _find_transposed_dense_positions( - # charges=[charges[n] for n in order[0:tr_partition]], - # flows=[flows[n] for n in order[0:tr_partition]], - # target_charge=tr_common_charges[m], - # strides=strides[order[0:tr_partition]]) - # for m in range(len(tr_common_charges)) - # } - # right_dense = { - # tr_common_charges.get_item(m): _find_transposed_dense_positions( - # charges=[charges[n] for n in order[tr_partition:]], - # flows=[flows[n] for n in order[tr_partition:]], - # target_charge=tr_common_charges[m] * (-1), - # strides=strides[order[tr_partition:]]) - # for m in range(len(tr_common_charges)) - # } - # # cc, dense_blocks = _find_diagonal_dense_blocks( - # # [charges[n] for n in order[0:tr_partition]], - # # [charges[n] for n in order[tr_partition:]], - # # [flows[n] for n in order[0:tr_partition]], - # # [flows[n] for n in order[tr_partition:]], - # # row_strides=strides[order[0:tr_partition]], - # # column_strides=strides[order[tr_partition:]]) - # row_dim = np.prod([len(charges[n]) for n in range(partition)]) - # for n in range(len(tr_common_charges)): - # c = tr_common_charges.get_item(n) - # #d = dense_blocks[n] - # tmp = fuse_ndarrays([left_dense[c], right_dense[c]]) - # tmp2 = fuse_ndarrays( - # [np.mod(left_dense[c], row_dim), - # np.mod(right_dense[c], row_dim)]) - # tmp3 = (tmp - tmp2) / row_dim - # #print(np.all(tmp == d[0])) - - # return - # sp_row_blocks = find_sparse_positions(charges[0:partition], - # flows[0:partition], common_charges) - # sp_column_blocks = find_sparse_positions(charges[partition:], - # flows[partition:], - # common_charges * (-1)) - - # degeneracy_vector = np.empty( - # np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) - # for c in common_charges: - # degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] - # stop_positions = np.cumsum(degeneracy_vector) - # start_positions = stop_positions - degeneracy_vector - - # dense_row_blocks = { - # common_charges.get_item(n): find_dense_positions( - # charges[0:partition], - # flows[0:partition], - # common_charges[n], - # return_sorted=False) for n in range(len(common_charges)) - # } - # dense_column_blocks = { - # common_charges.get_item(n): find_dense_positions( - # charges[partition:], - # flows[partition:], - # common_charges[n] * (-1), - # return_sorted=False) for n in range(len(common_charges)) - # } - # dtype = charges[0].dtype - # drbs = list(dense_row_blocks.values()) - # block_dict = dict(zip(np.arange(len(drbs)), list(dense_row_blocks.keys()))) - - # dense_row_positions = np.concatenate(drbs) - # dense_block_numbers = np.concatenate([ - # np.full(len(drbs[n]), fill_value=n, dtype=np.int16) - # for n in range(len(drbs)) - # ]) - - # ind_sort = np.argsort(dense_row_positions) - # dense_row_positions = dense_row_positions[ind_sort] - # dense_block_number = dense_block_numbers[ind_sort] - - # #sp_column_positions = np.sort( - # # np.concatenate(list(sp_column_blocks.values()))) - # #dense_column_positions = np.sort( - # # np.concatenate(list(dense_column_blocks.values()))) - # #print('dense_row_positions:', dense_row_positions) - # #print('dense_column_positions:', dense_column_positions) - # #print('start_positions:', start_positions) - - # row_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) - # block_lookup = np.empty(dense_row_positions[-1] + 1, dtype=np.int64) - # row_lookup[dense_row_positions] = start_positions - # block_lookup[dense_row_positions] = dense_block_number - # #column_lookup = np.empty(dense_column_positions[-1] + 1, dtype=np.int64) - # #column_lookup[dense_column_positions] = sp_column_positions - # # print('row_lookup', row_lookup) - # # print('col_lookup', column_lookup) - # # return - # # print('dense col pos', dense_column_positions) - # # print('sp_col_pos', sp_column_positions) - # data = np.empty(len(self.data), dtype=self.data.dtype) - # _, dense_blocks = _find_diagonal_dense_blocks( - # [charges[n] for n in order[0:tr_partition]], - # [charges[n] for n in order[tr_partition:]], - # flows[0:tr_partition], - # flows[tr_partition:], - # row_strides=strides[order[0:tr_partition]], - # column_strides=strides[order[tr_partition:]]) - - # # stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] - # # tr_linear_positions = find_dense_positions([charges[n] for n in order], - # # [flows[n] for n in order], - # # charges[0].zero_charge) - # # tr_stride_arrays = [stride_arrays[n] for n in order] - - # # dense_permutation = _find_values_in_fused( - # # tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), - # # fuse_ndarrays(tr_stride_arrays[partition:])) - - # # dense_permutation_2 = np.sort(np.concatenate([b[0] for b in dense_blocks])) - # # print('dense_perm == dense_perm2', - # # np.all(dense_permutation == dense_permutation_2)) - # column_dim = np.prod( - # [len(charges[n]) for n in range(partition, len(charges))]) - # for b in dense_blocks: - # # print(b[1]) - # # t1 = time.time() - # rinds, cinds = np.divmod(b[0], column_dim) - # # print('divmod', time.time() - t1) - # # t1 = time.time() - # start_pos = row_lookup[rinds] - # # print('startpos', time.time() - t1) - # # t1 = time.time() - # block_vals = block_lookup[rinds] - # # print('blockvals', time.time() - t1) - # # t1 = time.time() - # unique, cnts = np.unique(block_vals, return_counts=True) - - def transpose_a( - self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order`. This routine currently shuffles - data. - Args: - order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. - Returns: - BlockSparseTensor: The transposed tensor. - """ - strides = _get_strides(self.dense_shape) - dims = self.dense_shape - charges = self.charges - flows = self.flows - partition = _find_best_partition(charges, flows, return_charges=False) - tr_partition = _find_best_partition([charges[n] for n in order], - [flows[n] for n in order], - return_charges=False) - - unique_row_charges = compute_unique_fused_charges(charges[0:partition], - flows[0:partition]) - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - charges[partition:], flows[partition:]) - - common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - sp_row_blocks = find_sparse_positions(charges[0:partition], - flows[0:partition], common_charges) - - degeneracy_vector = np.empty( - np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) - for c in common_charges: - degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] - start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector - - dense_row_blocks = { - common_charges.get_item(n): find_dense_positions( - charges[0:partition], - flows[0:partition], - common_charges[n], - return_sorted=False) for n in range(len(common_charges)) - } - dense_row_positions = np.sort( - np.concatenate(list(dense_row_blocks.values()))) - row_lookup = np.zeros(dense_row_positions[-1] + 1, dtype=np.int64) - row_lookup[dense_row_positions] = start_positions - return row_lookup - column_lookup = _compute_sparse_lookup(charges[partition:], - flows[partition:], - common_charges * (-1)) - data = np.empty(len(self.data), dtype=self.data.dtype) - cs, dense_blocks = _find_diagonal_dense_blocks( - [charges[n] for n in order[0:tr_partition]], - [charges[n] for n in order[tr_partition:]], - [flows[n] for n in order[0:tr_partition]], - [flows[n] for n in order[tr_partition:]], - row_strides=strides[order[0:tr_partition]], - column_strides=strides[order[tr_partition:]]) - - column_dim = np.prod( - [len(charges[n]) for n in range(partition, len(charges))]) - transposed_positions = {} - for n in range(len(dense_blocks)): - b = dense_blocks[n] - rinds, cinds = np.divmod(b[0], column_dim) - start_pos = row_lookup[rinds] - transposed_positions[cs.get_item( - n)] = row_lookup[rinds] + column_lookup[cinds] - #self.data[transposed_positions] - - return transposed_positions - - def transpose_b( - self, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order`. This routine currently shuffles - data. - Args: - order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. - Returns: - BlockSparseTensor: The transposed tensor. - """ - strides = _get_strides(self.dense_shape) - dims = self.dense_shape - charges = self.charges - flows = self.flows - partition = _find_best_partition(charges, flows, return_charges=False) - tr_partition = _find_best_partition([charges[n] for n in order], - [flows[n] for n in order], - return_charges=False) - - # unique_row_charges = compute_unique_fused_charges(charges[0:partition], - # flows[0:partition]) - # unique_column_charges, column_dims = compute_fused_charge_degeneracies( - # charges[partition:], flows[partition:]) - - # common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - # column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - # sp_row_blocks = find_sparse_positions(charges[0:partition], - # flows[0:partition], common_charges) - - # degeneracy_vector = np.empty( - # np.sum([len(v) for v in sp_row_blocks.values()]), dtype=np.int64) - # for c in common_charges: - # degeneracy_vector[sp_row_blocks[c]] = column_degeneracies[c] - # start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector - - # dense_row_blocks = { - # common_charges.get_item(n): find_dense_positions( - # charges[0:partition], - # flows[0:partition], - # common_charges[n], - # return_sorted=False) for n in range(len(common_charges)) - # } - # dense_column_blocks = { - # common_charges.get_item(n): find_dense_positions( - # charges[partition:], - # flows[partition:], - # common_charges[n] * (-1), - # return_sorted=False) for n in range(len(common_charges)) - # } - - row_lookup, column_lookup = _compute_sparse_lookups( - charges[0:partition], flows[0:partition], charges[partition:], - flows[partition:]) - return row_lookup - data = np.empty(len(self.data), dtype=self.data.dtype) - cs, dense_blocks = _find_diagonal_dense_blocks( - [charges[n] for n in order[0:tr_partition]], - [charges[n] for n in order[tr_partition:]], - [flows[n] for n in order[0:tr_partition]], - [flows[n] for n in order[tr_partition:]], - row_strides=strides[order[0:tr_partition]], - column_strides=strides[order[tr_partition:]]) - - column_dim = np.prod( - [len(charges[n]) for n in range(partition, len(charges))]) - transposed_positions = {} - for n in range(len(dense_blocks)): - b = dense_blocks[n] - rinds, cinds = np.divmod(b[0], column_dim) - start_pos = row_lookup[rinds] - transposed_positions[cs.get_item( - n)] = row_lookup[rinds] + column_lookup[cinds] - #self.data[transposed_positions] - - return transposed_positions - - # stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(len(dims))] - # tr_linear_positions = find_dense_positions([charges[n] for n in order], - # [flows[n] for n in order], - # charges[0].zero_charge) - # tr_stride_arrays = [stride_arrays[n] for n in order] - # dense_permutation = _find_values_in_fused( - # tr_linear_positions, fuse_ndarrays(tr_stride_arrays[0:tr_partition]), - # fuse_ndarrays(tr_stride_arrays[partition:])) - #rinds, cinds = np.divmod(dense_permutation, column_dim) - # transposed_positions = row_lookup[rinds] + column_lookup[cinds] - # self.data[transposed_positions] - - def transpose_2( + def transpose( self, order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + ) -> "BlockSparseTensor": """ - Transpose the tensor into the new order `order`. This routine currently shuffles - data. + Transpose the tensor in place into the new order `order`. Args: order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. Returns: BlockSparseTensor: The transposed tensor. """ - if (permutation is not None) and (len(permutation) != len(self.data)): - raise ValueError("len(permutation) != len(tensor.data).") if len(order) != self.rank: raise ValueError( @@ -1616,170 +1137,22 @@ def transpose_2( #check for trivial permutation if np.all(order == np.arange(len(order))): - if return_permutation: - return np.arange(len(self.data)) - return - - #we use elementary indices here because it is - #more efficient to get the fused charges using - #the best partition - if permutation is None: - elementary_indices = {} - flat_elementary_indices = [] - for n in range(len(self.indices)): - elementary_indices[n] = self.indices[n].get_elementary_indices() - flat_elementary_indices.extend(elementary_indices[n]) - flat_index_list = np.arange(len(flat_elementary_indices)) - cum_num_legs = np.append( - 0, - np.cumsum( - [len(elementary_indices[n]) for n in range(len(self.indices))])) - - flat_charges = [i.charges for i in flat_elementary_indices] - flat_flows = [i.flow for i in flat_elementary_indices] - flat_dims = [len(c) for c in flat_charges] - flat_strides = _get_strides( - flat_dims - ) #np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - flat_order = np.concatenate( - [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - flat_charges, flat_flows) - linear_positions = find_dense_positions([left_charges, right_charges], - [1, 1], left_charges.zero_charge) - - flat_tr_charges = [flat_charges[n] for n in flat_order] - flat_tr_flows = [flat_flows[n] for n in flat_order] - flat_tr_strides = [flat_strides[n] for n in flat_order] - flat_tr_dims = [flat_dims[n] for n in flat_order] - - tr_left_charges, tr_right_charges, partition = _find_best_partition( - flat_tr_charges, flat_tr_flows) - t1 = time.time() - tr_linear_positions = find_dense_positions( - [tr_left_charges, tr_right_charges], [1, 1], - tr_left_charges.zero_charge) - print('finding dense positions in the transposed tensor: {}s'.format( - time.time() - t1)) - - stride_arrays = [ - np.arange(flat_tr_dims[n]) * flat_tr_strides[n] - for n in range(len(flat_tr_dims)) - ] - - dense_permutation = _find_values_in_fused( - tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), - fuse_ndarrays(stride_arrays[partition::])) - #t1 = time.time() - #print(len(linear_positions), len(dense_permutation)) - permutation = np.searchsorted(linear_positions, dense_permutation) - #print('finding the permutation with argsort: {}s'.format(time.time() - - #t1)) - - self.indices = [self.indices[n] for n in order] - self.data = self.data[permutation] - if return_permutation: - return permutation - - # def transpose( - # self, - # order: Union[List[int], np.ndarray], - # permutation: Optional[np.ndarray] = None, - # return_permutation: Optional[bool] = False) -> "BlockSparseTensor": - # """ - # Transpose the tensor into the new order `order`. This routine currently shuffles - # data. - # Args: - # order: The new order of indices. - # permutation: An np.ndarray of int for reshuffling the data, - # typically the output of a prior call to `transpose`. Passing `permutation` - # can greatly speed up the transposition. - # return_permutation: If `True`, return the the permutation data. - # Returns: - # BlockSparseTensor: The transposed tensor. - # """ - # if (permutation is not None) and (len(permutation) != len(self.data)): - # raise ValueError("len(permutation) != len(tensor.data).") - - # if len(order) != self.rank: - # raise ValueError( - # "`len(order)={}` is different form `self.rank={}`".format( - # len(order), self.rank)) - - # #check for trivial permutation - # if np.all(order == np.arange(len(order))): - # if return_permutation: - # return np.arange(len(self.data)) - # return - - # #we use elementary indices here because it is - # #more efficient to get the fused charges using - # #the best partition - # if permutation is None: - # elementary_indices = {} - # flat_elementary_indices = [] - # for n in range(len(self.indices)): - # elementary_indices[n] = self.indices[n].get_elementary_indices() - # flat_elementary_indices.extend(elementary_indices[n]) - # flat_index_list = np.arange(len(flat_elementary_indices)) - # cum_num_legs = np.append( - # 0, - # np.cumsum( - # [len(elementary_indices[n]) for n in range(len(self.indices))])) - - # flat_charges = [i.charges for i in flat_elementary_indices] - # flat_flows = [i.flow for i in flat_elementary_indices] - # flat_dims = [len(c) for c in flat_charges] - # flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - # flat_order = np.concatenate( - # [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - # #find the best partition into left and right charges - # left_charges, right_charges, _ = _find_best_partition( - # flat_charges, flat_flows) - # #find the index-positions of the elements in the fusion - # #of `left_charges` and `right_charges` that have `0` - # #total charge (those are the only non-zero elements). - # t1 = time.time() - # linear_positions = find_dense_positions( - # left_charges, - # 1, - # right_charges, - # 1, - # target_charge=flat_charges[0].zero_charge) - # print('finding dense positions in the original tensor: {}s'.format( - # time.time() - t1)) - # flat_tr_charges = [flat_charges[n] for n in flat_order] - # flat_tr_flows = [flat_flows[n] for n in flat_order] - # flat_tr_strides = [flat_strides[n] for n in flat_order] - # flat_tr_dims = [flat_dims[n] for n in flat_order] - - # tr_left_charges, tr_right_charges, partition = _find_best_partition( - # flat_tr_charges, flat_tr_flows) - # t1 = time.time() - # tr_linear_positions = find_dense_positions( - # tr_left_charges, 1, tr_right_charges, 1, tr_left_charges.zero_charge) - # print('finding dense positions in the transposed tensor: {}s'.format( - # time.time() - t1)) - - # stride_arrays = [ - # np.arange(flat_tr_dims[n]) * flat_tr_strides[n] - # for n in range(len(flat_tr_dims)) - # ] - - # dense_permutation = _find_values_in_fused( - # tr_linear_positions, fuse_ndarrays(stride_arrays[0:partition]), - # fuse_ndarrays(stride_arrays[partition::])) - # t1 = time.time() - # print(len(linear_positions), len(dense_permutation)) - # permutation = np.searchsorted(linear_positions, dense_permutation) - # print( - # 'finding the permutation with argsort: {}s'.format(time.time() - t1)) - - # self.indices = [self.indices[n] for n in order] - # self.data = self.data[permutation] - # if return_permutation: - # return permutation + return self + _, tr_data, tr_partition = _compute_transposition_data(self, order) + flat_charges, flat_flows, _, flat_order = flatten_meta_data( + self.indices, order) + + cs, sparse_blocks, _, _, _ = _find_diagonal_sparse_blocks( + [], [flat_charges[n] for n in flat_order[0:tr_partition]], + [flat_charges[n] for n in flat_order[tr_partition:]], + [flat_flows[n] for n in flat_order[0:tr_partition]], + [flat_flows[n] for n in flat_order[tr_partition:]], + return_data=False) + for n in range(len(sparse_blocks)): + sparse_block = sparse_blocks[n] + self.data[sparse_block[0]] = self.data[tr_data[cs.get_item(n)][0]] + + return self def reset_shape(self) -> None: """ @@ -1964,11 +1337,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose( - tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": +def transpose(tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False + ) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. @@ -1999,9 +1372,8 @@ def transpose( def tensordot(tensor1: BlockSparseTensor, tensor2: BlockSparseTensor, axes: Sequence[Sequence[int]], - permutation1: Optional[np.ndarray] = None, - permutation2: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False): + final_order: Optional[Union[List, np.ndarray]] = None + ) -> BlockSparseTensor: """ Contract two `BlockSparseTensor`s along `axes`. Args: @@ -2033,6 +1405,11 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError( "rank of `tensor1` is smaller than `max(axes1) = {}.`".format( max(axes1))) + + if max(axes2) >= len(tensor2.shape): + raise ValueError( + "rank of `tensor2` is smaller than `max(axes2) = {}`".format( + max(axes1))) elementary_1, elementary_2 = [], [] for a in axes1: elementary_1.extend(tensor1.indices[a].get_elementary_indices()) @@ -2043,70 +1420,49 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == - (-1) * np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == (-1) * + np.array([i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), np.array([i.flow for i in elementary_2]))) - if max(axes2) >= len(tensor2.shape): - raise ValueError( - "rank of `tensor2` is smaller than `max(axes2) = {}`".format( - max(axes1))) free_axes1 = sorted(set(np.arange(len(tensor1.shape))) - set(axes1)) free_axes2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) + if (final_order is not None) and (len(final_order) != + len(free_axes1) + len(free_axes2)): + raise ValueError("`final_order = {}` is not a valid order for " + "a final tensor of rank {}".format( + final_order, + len(free_axes1) + len(free_axes2))) + + if (final_order is not None) and not np.all( + np.sort(final_order) == np.arange(len(final_order))): + raise ValueError( + "`final_order = {}` is not a valid permutation of {} ".format( + final_order, np.arange(len(final_order)))) + new_order1 = free_axes1 + list(axes1) new_order2 = list(axes2) + free_axes2 t1 = time.time() - tr1 = transpose( - tensor=tensor1, - order=new_order1, - permutation=permutation1, - return_permutation=return_permutation) - if return_permutation: - permutation1 = tr1[1] - tr1 = tr1[0] - print('transposing tensor1: {}s'.format(time.time() - t1)) - - trshape1 = tr1.dense_shape - Dl1 = np.prod([trshape1[n] for n in range(len(free_axes1))]) - Dr1 = np.prod([trshape1[n] for n in range(len(free_axes1), len(trshape1))]) - - tmp1 = reshape(tr1, (Dl1, Dr1)) - t1 = time.time() - tr2 = transpose( - tensor=tensor2, - order=new_order2, - permutation=permutation2, - return_permutation=return_permutation) - if return_permutation: - permutation2 = tr2[1] - tr2 = tr2[0] - print('transposing tensor2: {}s'.format(time.time() - t1)) - trshape2 = tr2.dense_shape - Dl2 = np.prod([trshape2[n] for n in range(len(axes2))]) - Dr2 = np.prod([trshape2[n] for n in range(len(axes2), len(trshape2))]) - - tmp2 = reshape(tr2, (Dl2, Dr2)) + charges1, tr_data_1, tr_partition1 = _compute_transposition_data( + tensor1, new_order1, len(free_axes1)) + charges2, tr_data_2, tr_partition2 = _compute_transposition_data( + tensor2, new_order2, len(axes2)) - #avoid data-copying here by setting `return_data=False` - t1 = time.time() - column_charges1, data1, start_positions, row_locations, _ = tmp1._get_diagonal_blocks( - return_data=False) - row_charges2, data2, _, _, column_degeneracies = tmp2._get_diagonal_blocks( - return_data=False) + print('compute transposition data', time.time() - t1) + common_charges = charges1.intersect(charges2) - print('finding diagonal blocks: {}s'.format(time.time() - t1)) - #get common charges between rows and columns - # tmp_charges, cnts = column_charges1.concatenate(row_charges2).unique( - # return_counts=True) - # common_charges = tmp_charges[cnts == 2] - common_charges = column_charges1.intersect(row_charges2) #get the flattened indices for the output tensor - indices = [] - indices.extend(tmp1.indices[0].get_elementary_indices()) - indices.extend(tmp2.indices[1].get_elementary_indices()) + left_indices = [] + right_indices = [] + for n in free_axes1: + left_indices.extend(tensor1.indices[n].get_elementary_indices()) + for n in free_axes2: + right_indices.extend(tensor2.indices[n].get_elementary_indices()) + indices = left_indices + right_indices + if final_order is not None: + indices = [indices[n] for n in final_order] index_names = [i.name for i in indices] unique = np.unique(index_names) #rename indices if they are not unique @@ -2114,39 +1470,111 @@ def tensordot(tensor1: BlockSparseTensor, for n, i in enumerate(indices): i.name = 'index_{}'.format(n) - #initialize the data-vector of the output with zeros - num_nonzero_elements = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) + #initialize the data-vector of the output with zeros; + #Note that empty is not a viable choice here. + ts = [] + t1 = time.time() + cs, sparse_blocks, _, _, _ = _find_diagonal_sparse_blocks( + [], [i.charges for i in left_indices], [i.charges for i in right_indices], + [i.flow for i in left_indices], [i.flow for i in right_indices], + return_data=False) + print('finding sparse positions', time.time() - t1) + num_nonzero_elements = np.sum([len(v[0]) for v in sparse_blocks]) data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) - ts = [] - for c in common_charges: - rlocs = row_locations[c] - cdegs = column_degeneracies[c] - a = np.expand_dims(start_positions[rlocs], 1) - b = np.expand_dims(np.arange(cdegs), 0) - new_locations = np.reshape(a + b, len(rlocs) * cdegs) - i1 = np.nonzero(column_charges1 == c)[0][0] - i2 = np.nonzero(row_charges2 == c)[0][0] - - try: - #place the result of the block-matrix multiplication - #into the new data-vector - t1 = time.time() - data[new_locations] = np.matmul( - np.reshape(tensor1.data[data1[i1][0]], data1[i1][1]), - np.reshape(tensor2.data[data2[i2][0]], data2[i2][1])).flat - ts.append(time.time() - t1) - except ValueError: - raise ValueError("for quantum number {}, shapes {} and {} " - "of left and right blocks have " - "incompatible shapes".format(c, data1[i1].shape, - data2[i2].shape)) - print('totalnumpy', np.sum(ts)) - out = BlockSparseTensor(data=data, indices=indices) - resulting_shape = [trshape1[n] for n in range(len(free_axes1)) - ] + [trshape2[n] for n in range(len(axes2), len(trshape2))] - out.reshape(resulting_shape) - if return_permutation: - return out, permutation1, permutation2 - return out + + for n in range(len(common_charges)): + c = common_charges.get_item(n) + permutation1 = tr_data_1[c] + permutation2 = tr_data_2[c] + sparse_block = sparse_blocks[n] + b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) + b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) + res = np.matmul(b1, b2) + data[sparse_block[0]] = res.flat + print('tensordot', time.time() - t1) + return BlockSparseTensor(data=data, indices=indices) + + +def flatten_meta_data(indices, order): + elementary_indices = {} + flat_elementary_indices = [] + for n in range(len(indices)): + elementary_indices[n] = indices[n].get_elementary_indices() + flat_elementary_indices.extend(elementary_indices[n]) + flat_index_list = np.arange(len(flat_elementary_indices)) + cum_num_legs = np.append( + 0, np.cumsum([len(elementary_indices[n]) for n in range(len(indices))])) + + flat_charges = [i.charges for i in flat_elementary_indices] + flat_flows = [i.flow for i in flat_elementary_indices] + flat_dims = [len(c) for c in flat_charges] + flat_strides = _get_strides(flat_dims) + flat_order = np.concatenate( + [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + return flat_charges, flat_flows, flat_strides, flat_order + + +def _compute_transposition_data( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + transposed_partition: Optional[int] = None +) -> Tuple[Union[BaseCharge, ChargeCollection], Dict, int]: + """ + Args: + tensor: A symmetric tensor. + order: The new order of indices. + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` + can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. + Returns: + BlockSparseTensor: The transposed tensor. + """ + if len(order) != tensor.rank: + raise ValueError( + "`len(order)={}` is different form `tensor.rank={}`".format( + len(order), tensor.rank)) + + #check for trivial permutation + if np.all(order == np.arange(len(order))): + return + + #we use flat meta data because it is + #more efficient to get the fused charges using + #the best partition + flat_charges, flat_flows, flat_strides, flat_order = flatten_meta_data( + tensor.indices, order) + partition = _find_best_partition( + flat_charges, flat_flows, return_charges=False) + if transposed_partition is None: + transposed_partition = _find_best_partition( + [flat_charges[n] for n in flat_order], + [flat_flows[n] for n in flat_order], + return_charges=False) + t1 = time.time() + row_lookup, column_lookup = _compute_sparse_lookups(flat_charges[0:partition], + flat_flows[0:partition], + flat_charges[partition:], + flat_flows[partition:]) + print('lookup', time.time() - t1) + t1 = time.time() + cs, dense_blocks = _find_diagonal_dense_blocks( + [flat_charges[n] for n in flat_order[0:transposed_partition]], + [flat_charges[n] for n in flat_order[transposed_partition:]], + [flat_flows[n] for n in flat_order[0:transposed_partition]], + [flat_flows[n] for n in flat_order[transposed_partition:]], + row_strides=flat_strides[flat_order[0:transposed_partition]], + column_strides=flat_strides[flat_order[transposed_partition:]]) + print('diagonal dense blocks', time.time() - t1) + column_dim = np.prod( + [len(flat_charges[n]) for n in range(partition, len(flat_charges))]) + transposed_positions = {} + for n in range(len(dense_blocks)): + b = dense_blocks[n] + rinds, cinds = np.divmod(b[0], column_dim) + start_pos = row_lookup[rinds] + transposed_positions[cs.get_item(n)] = [ + row_lookup[rinds] + column_lookup[cinds], b[1] + ] + return cs, transposed_positions, transposed_partition From 6478f03bc69e76487b04e69c11f29bbde7ded2e6 Mon Sep 17 00:00:00 2001 From: Chase Roberts Date: Fri, 17 Jan 2020 15:22:17 -0800 Subject: [PATCH 163/212] Update requirements_travis.txt (#426) --- requirements_travis.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_travis.txt b/requirements_travis.txt index 2b281ebb7..7d226c1a3 100644 --- a/requirements_travis.txt +++ b/requirements_travis.txt @@ -1,6 +1,6 @@ tensorflow>=2.0.0 pytype==2019.06.21 pytest -torch>=1.1 +torch==1.3.1 jax>=0.1.0 jaxlib>=0.1.27 From 707f38dec3977824a06f5d39c0bdcf3c6a892f10 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 20 Jan 2020 09:29:50 -0500 Subject: [PATCH 164/212] rewrote find_dense_positions to take multipe charges avoids a for loop in _find_diagonal_dense_blocks and speeds up the code --- tensornetwork/block_tensor/block_tensor.py | 874 +++++++++++++++------ 1 file changed, 633 insertions(+), 241 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 0d88dd444..2b3a071c6 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -201,9 +201,8 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (charges[0] * - flows[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): #list of unique charges and list of their degeneracies #on the next unfused leg of the tensor @@ -461,7 +460,7 @@ def _find_diagonal_dense_blocks( is a tuple corresponding to the blocks' matrix shape """ - flows = row_flows.copy() + flows = list(row_flows).copy() flows.extend(column_flows) _check_flows(flows) if len(flows) != (len(row_charges) + len(column_charges)): @@ -472,13 +471,21 @@ def _find_diagonal_dense_blocks( #we only care about their degeneracies, not their order; that's much faster #to compute since we don't have to fuse all charges explicitly #`compute_fused_charge_degeneracies` multiplies flows into the column_charges - t1 = time.time() unique_column_charges = compute_unique_fused_charges(column_charges, column_flows) + unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) #get the charges common to rows and columns (only those matter) + fused = unique_row_charges + unique_column_charges + li, ri = np.divmod( + np.nonzero(fused == unique_column_charges.zero_charge)[0], + len(unique_row_charges)) common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) + # print(unique_row_charges.charges[li]) + # print(unique_column_charges.charges[ri]) + # print(common_charges.charges) + # return + #print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) if ((row_strides is None) and (column_strides is not None)) or ((row_strides is not None) and (column_strides is None)): @@ -486,172 +493,505 @@ def _find_diagonal_dense_blocks( "have to be passed simultaneously." " Found `row_strides={}` and " "`column_strides={}`".format(row_strides, column_strides)) - t1 = time.time() if row_strides is not None: - row_locations = { - common_charges.get_item(n): _find_transposed_dense_positions( - charges=row_charges, - flows=row_flows, - target_charge=common_charges[n], - strides=row_strides) for n in range(len(common_charges)) - } + row_locations = find_dense_positions( + charges=row_charges, + flows=row_flows, + target_charges=unique_row_charges[li], + strides=row_strides) + else: column_dim = np.prod([len(c) for c in column_charges]) - row_locations = { - common_charges.get_item(n): column_dim * find_dense_positions( - charges=row_charges, - flows=row_flows, - target_charge=common_charges[n]) - for n in range(len(common_charges)) - } + row_locations = find_dense_positions( + charges=row_charges, + flows=row_flows, + target_charges=unique_row_charges[li]) + for v in row_locations.values(): + v *= column_dim + # row_locations = { + # common_charges.get_item(n): column_dim * find_dense_positions( + # charges=row_charges, + # flows=row_flows, + # target_charge=common_charges[n]) + # for n in range(len(common_charges)) + # } if column_strides is not None: - column_locations = { - common_charges.get_item(n): _find_transposed_dense_positions( - charges=column_charges, - flows=column_flows, - target_charge=common_charges[n] * (-1), - strides=column_strides) for n in range(len(common_charges)) - } + column_locations = find_dense_positions( + charges=column_charges, + flows=column_flows, + target_charges=unique_column_charges[ri], + strides=column_strides, + store_dual=False) + else: - column_locations = { - common_charges.get_item(n): find_dense_positions( - charges=column_charges, - flows=column_flows, - target_charge=common_charges[n] * (-1)) - for n in range(len(common_charges)) - } - print('finding locations:', time.time() - t1) + column_locations = find_dense_positions( + charges=column_charges, + flows=column_flows, + target_charges=unique_column_charges[ri], + store_dual=False) + + # column_locations = { + # common_charges.get_item(n): find_dense_positions( + # charges=column_charges, + # flows=column_flows, + # target_charge=common_charges[n] * (-1)) + # for n in range(len(common_charges)) + # } blocks = [] - for c in common_charges: + for c in unique_row_charges[li]: #numpy broadcasting is substantially faster than kron! rlocs = np.expand_dims(row_locations[c], 1) clocs = np.expand_dims(column_locations[c], 0) inds = np.reshape(rlocs + clocs, rlocs.shape[0] * clocs.shape[1]) blocks.append([inds, (rlocs.shape[0], clocs.shape[1])]) - return common_charges, blocks + return unique_row_charges[li], blocks + + +# def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], +# flows: List[Union[int, bool]], +# target_charge: Union[BaseCharge, ChargeCollection], +# order: Optional[np.ndarray] = None, +# return_sorted: Optional[bool] = True) -> np.ndarray: +# """ +# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) +# in the vector of `fused_charges` resulting from fusing all elements of `charges` +# that have a value of `target_charge`. +# For example, given +# ``` +# charges = [[-2,0,1,0,0],[-1,0,2,1]] +# target_charge = 0 +# fused_charges = fuse_charges(charges,[1,1]) +# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] +# ``` +# we want to find the all different blocks +# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, +# together with their corresponding index-values of the data in the dense array. +# `find_dense_blocks` returns an np.ndarray containing the indices-positions of +# these elements. +# For the above example, we get: +# * for `charge[0]` = -2 and `charge[1]` = 2 we get an array [2]. Thus, `fused_charges[2]` +# was obtained from fusing -2 and 2. +# * for `charge[0]` = 0 and `charge[1]` = 0 we get an array [5, 13, 17]. Thus, +# `fused_charges[5,13,17]` were obtained from fusing 0 and 0. +# * for `charge[0]` = 1 and `charge[1]` = -1 we get an array [8]. Thus, `fused_charges[8]` +# was obtained from fusing 1 and -1. +# Args: +# charges: A list of BaseCharge or ChargeCollection. +# flows: The flow directions of the `charges`. +# target_charge: The target charge. +# order: An optional order for the elements in `charges`. +# Useful for finding dense positions in a permuted tensor +# with respect to the unpermuted order. +# Returns: +# np.ndarray: The index-positions within the dense data array +# of the elements fusing to `target_charge`. +# """ +# if order is not None: +# if len(order) != len(charges): +# raise ValueError("len(order) ={} != len(charges) = {}".format( +# len(order), len(charges))) + +# if not np.all(np.sort(order) == np.arange(len(order))): +# raise ValueError("order = {} is not a valid permutation of {}".format( +# order, np.arange(len(order)))) + +# _check_flows(flows) +# if len(charges) == 1: +# fused_charges = charges[0] * flows[0] +# return np.nonzero(fused_charges == target_charge)[0] + +# if order is not None: +# left_charges, right_charges, partition = _find_best_partition( +# [charges[n] for n in order], [flows[n] for n in order]) + +# dims = [len(c) for c in charges] +# strides = _get_strides( +# dims) #np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) +# stride_arrays = [np.arange(dims[n]) * strides[n] for n in order] +# permuted_row_inds = fuse_ndarrays(stride_arrays[0:partition]) +# permuted_column_inds = fuse_ndarrays(stride_arrays[partition:]) +# else: +# left_charges, right_charges, partition = _find_best_partition( +# charges, flows) +# t1 = time.time() +# unique_left = left_charges.unique() +# unique_right = right_charges.unique() + +# tmp_left_charges = (target_charge + (unique_right * (-1))) +# relevant_left_charges = unique_left.intersect(tmp_left_charges) +# right_locations = {} +# len_right_charges = len(right_charges) +# dense_inds = [] +# left_inds = [] +# index_table = [] + +# for n in range(len(relevant_left_charges)): +# c = relevant_left_charges[n] +# left_ind = np.nonzero(left_charges == c)[0] +# if return_sorted: +# index_table.append( +# np.stack([ +# np.arange(len(left_ind)), +# np.full(len(left_ind), n, dtype=np.int64) +# ], +# axis=1)) +# left_inds.append(left_ind) +# right_charge = (target_charge + (c * (-1))) +# if order is None: +# dim_array = np.expand_dims(len_right_charges * left_ind, 1) +# right_inds = np.nonzero(right_charges == right_charge)[0] +# mat = np.tile(right_inds, (len(dim_array), 1)) + +# else: +# dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) +# right_inds = permuted_column_inds[np.nonzero( +# right_charges == right_charge)[0]] +# mat = np.tile(right_inds, (len(dim_array), 1)) +# if return_sorted: +# dense_inds.append(mat + dim_array) +# else: +# dense_inds.append(np.reshape(mat + dim_array, np.prod(mat.shape))) +# if return_sorted: +# if len(index_table) > 0: +# it = np.concatenate(index_table) +# ind_sort = np.argsort(np.concatenate(left_inds)) +# table = it[ind_sort, :] +# return np.concatenate([ +# dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) +# ]) +# return np.array([]) +# return np.concatenate(dense_inds) + +# def find_dense_positions_new( +# charges: List[Union[BaseCharge, ChargeCollection]], +# flows: List[Union[int, bool]], +# target_charges: Union[BaseCharge, ChargeCollection], +# order: Optional[np.ndarray] = None, +# return_sorted: Optional[bool] = True) -> np.ndarray: +# """ +# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) +# in the vector of `fused_charges` resulting from fusing all elements of `charges` +# that have a value of `target_charge`. +# For example, given +# ``` +# charges = [[-2,0,1,0,0],[-1,0,2,1]] +# target_charge = 0 +# fused_charges = fuse_charges(charges,[1,1]) +# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] +# ``` +# we want to find the all different blocks +# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, +# together with their corresponding index-values of the data in the dense array. +# `find_dense_blocks` returns an np.ndarray containing the indices-positions of +# these elements. +# For the above example, we get: +# * for `charge[0]` = -2 and `charge[1]` = 2 we get an array [2]. Thus, `fused_charges[2]` +# was obtained from fusing -2 and 2. +# * for `charge[0]` = 0 and `charge[1]` = 0 we get an array [5, 13, 17]. Thus, +# `fused_charges[5,13,17]` were obtained from fusing 0 and 0. +# * for `charge[0]` = 1 and `charge[1]` = -1 we get an array [8]. Thus, `fused_charges[8]` +# was obtained from fusing 1 and -1. +# Args: +# charges: A list of BaseCharge or ChargeCollection. +# flows: The flow directions of the `charges`. +# target_charge: The target charge. +# order: An optional order for the elements in `charges`. +# Useful for finding dense positions in a permuted tensor +# with respect to the unpermuted order. +# Returns: +# np.ndarray: The index-positions within the dense data array +# of the elements fusing to `target_charge`. +# """ +# if order is not None: +# if len(order) != len(charges): +# raise ValueError("len(order) ={} != len(charges) = {}".format( +# len(order), len(charges))) + +# if not np.all(np.sort(order) == np.arange(len(order))): +# raise ValueError("order = {} is not a valid permutation of {}".format( +# order, np.arange(len(order)))) + +# _check_flows(flows) +# if len(charges) == 1: +# fused_charges = charges[0] * flows[0] +# return np.nonzero(fused_charges == target_charge)[0] + +# if order is not None: +# raise NotImplementedError() +# ts = [] +# t00 = time.time() +# left_charges, right_charges, partition = _find_best_partition(charges, flows) + +# target_charges = target_charges.unique() +# unique_left, left_inverse = left_charges.unique(return_inverse=True) +# unique_right, right_inverse = right_charges.unique(return_inverse=True) + +# fused_unique = unique_left + unique_right +# relevant_positions = np.nonzero(fused_unique.isin(target_charges))[0] + +# relevant_unique_left_inds, relevant_unique_right_inds = np.divmod( +# relevant_positions, len(unique_right)) +# left_charge_labels = np.nonzero( +# np.expand_dims(left_inverse, 1) == np.expand_dims( +# relevant_unique_left_inds, 0)) + +# right_charge_labels = np.nonzero( +# np.expand_dims(right_inverse, 1) == np.expand_dims( +# relevant_unique_right_inds, 0)) +# # t01 = time.time() +# # ts.append(t01 - t00) +# #print(ts[-1]) + +# dense_left_pos = {} + +# len_right = len(right_charges) +# for n, li in enumerate(relevant_unique_left_inds): +# dense_left_pos[unique_left.get_item(li)] = left_charge_labels[0][ +# left_charge_labels[1] == n] + +# dense_right_pos = {} +# #for m in range(len(relevant_unique_right_inds)): +# for m, ri in enumerate(relevant_unique_right_inds): +# #right_charge = unique_right.get_item(relevant_unique_right_inds[m]) +# dense_right_pos[unique_right.get_item(ri)] = right_charge_labels[0][ +# right_charge_labels[1] == m] +# # t02 = time.time() +# # ts.append(t02 - t01) +# #print(ts[-1]) + +# # print(dense_right_pos[m]) +# # print(np.nonzero(right_charges == unique_right[m] * (-1))[0]) +# # print(unique_right[ri].charges) +# # print( +# # np.all(dense_right_pos[unique_right.get_item(ri)] == np.nonzero( +# # right_charges == unique_right[ri])[0])) + +# # for n in range(len(relevant_unique_right_inds)): +# # print((unique_left[n] + unique_right[n] * (-1)).charges) + +# #fused_semi_unique = unique_left + right_charges +# #relevant_positions = np.nonzero(fused_semi_unique.isin(target_charges))[0] +# #left_inds, right_inds = np.divmod(relevant_positions, len(right_charges)) +# blocks = {} + +# #t1 = time.time() +# for n, li in enumerate(relevant_unique_left_inds): +# ri = relevant_unique_right_inds[n] +# target_charge = unique_left[li] + unique_right[ri] +# blocks[target_charge.get_item(0)] = [] +# # t2 = time.time() +# # ts.append(t2 - t1) +# #print(ts[-1]) +# for n, li in enumerate(relevant_unique_left_inds): +# ri = relevant_unique_right_inds[n] +# target_charge = unique_left[li] + unique_right[ri] +# inds = np.asarray( +# (np.expand_dims(dense_left_pos[unique_left.get_item(li)], 1) + +# np.expand_dims(dense_right_pos[unique_right.get_item(ri)], 0))) +# blocks[target_charge.get_item(0)].append( +# np.reshape(inds, np.prod(inds.shape))) +# # t3 = time.time() +# # ts.append(t3 - t2) +# #print(ts[-1]) +# sorted_blocks = {} +# for k in blocks.keys(): +# sorted_blocks[k] = np.sort(np.concatenate(blocks[k])) +# #ts.append(time.time() - t3) +# #print(ts[-1]) +# #print('total', np.sum(ts)) +# return sorted_blocks + +# ##################################################### +# ##################################################### +# ##################################################### +# unique_left = left_charges.unique() +# unique_right = right_charges.unique() + +# tmp_left_charges = (target_charge + (unique_right * (-1))) +# relevant_left_charges = unique_left.intersect(tmp_left_charges) +# right_locations = {} +# len_right_charges = len(right_charges) +# dense_inds = [] +# left_inds = [] +# index_table = [] + +# for n in range(len(relevant_left_charges)): +# c = relevant_left_charges[n] +# left_ind = np.nonzero(left_charges == c)[0] +# if return_sorted: +# index_table.append( +# np.stack([ +# np.arange(len(left_ind)), +# np.full(len(left_ind), n, dtype=np.int64) +# ], +# axis=1)) +# left_inds.append(left_ind) +# right_charge = (target_charge + (c * (-1))) +# if order is None: +# dim_array = np.expand_dims(len_right_charges * left_ind, 1) +# right_inds = np.nonzero(right_charges == right_charge)[0] +# mat = np.tile(right_inds, (len(dim_array), 1)) + +# else: +# dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) +# right_inds = permuted_column_inds[np.nonzero( +# right_charges == right_charge)[0]] +# mat = np.tile(right_inds, (len(dim_array), 1)) +# if return_sorted: +# dense_inds.append(mat + dim_array) +# else: +# dense_inds.append(np.reshape(mat + dim_array, np.prod(mat.shape))) +# if return_sorted: +# if len(index_table) > 0: +# it = np.concatenate(index_table) +# ind_sort = np.argsort(np.concatenate(left_inds)) +# table = it[ind_sort, :] +# return np.concatenate([ +# dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) +# ]) +# return np.array([]) +# return np.concatenate(dense_inds) + +# def _find_transposed_dense_positions( +# charges: List[Union[BaseCharge, ChargeCollection]], +# flows: List[Union[int, bool]], +# target_charge: Union[BaseCharge, ChargeCollection], +# strides: Optional[np.ndarray] = None) -> np.ndarray: + +# #print('###########################################') +# """ +# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) +# in the vector of `fused_charges` resulting from fusing all elements of `charges` +# that have a value of `target_charge`. +# For example, given +# ``` +# charges = [[-2,0,1,0,0],[-1,0,2,1]] +# target_charge = 0 +# fused_charges = fuse_charges(charges,[1,1]) +# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] +# ``` +# we want to find the index-positions of charges +# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, +# within the dense array. As one additional wrinkle, `charges` +# is a subset of the permuted charges of a tensor with rank R > len(charges), +# and `stride_arrays` are their corresponding range of strides, i.e. + +# ``` +# R=5 +# D = [2,3,4,5,6] +# tensor_flows = np.random.randint(-1,2,R) +# tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] +# order = np.arange(R) +# np.random.shuffle(order) +# tensor_strides = [360, 120, 30, 6, 1] + +# charges = [tensor_charges[order[n]] for n in range(3)] +# flows = [tensor_flows[order[n]] for n in range(len(3))] +# strides = [tensor_stride[order[n]] for n in range(3)] +# _ = _find_transposed_dense_positions(charges, flows, 0, strides) + +# ``` +# `_find_transposed_dense_blocks` returns an np.ndarray containing the +# index-positions of these elements calculated using `stride_arrays`. +# The result only makes sense in conjuction with the complementary +# data computed from the complementary +# elements in`tensor_charges`, +# `tensor_strides` and `tensor_flows`. +# This routine is mainly used in `_find_diagonal_dense_blocks`. + +# Args: +# charges: A list of BaseCharge or ChargeCollection. +# flows: The flow directions of the `charges`. +# target_charge: The target charge. +# strides: The strides for the `charges` subset. +# if `None`, natural stride ordering is assumed. + +# Returns: +# np.ndarray: The index-positions within the dense data array +# of the elements fusing to `target_charge`. +# """ + +# _check_flows(flows) +# if len(charges) == 1: +# fused_charges = charges[0] * flows[0] +# inds = np.nonzero(fused_charges == target_charge)[0] +# if strides is not None: +# permuted_inds = strides[0] * np.arange(len(charges[0])) +# return permuted_inds[inds] +# return inds +# #t1 = time.time() +# left_charges, right_charges, partition = _find_best_partition(charges, flows) +# #print('in _find_transposed_dense_blocks: find_best_partition:', +# #time.time() - t1) +# #t1 = time.time() +# if strides is not None: +# stride_arrays = [ +# np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) +# ] +# permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) +# permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) + +# #print('in _find_transposed_dense_blocks: fusing ndarrays:', time.time() - t1) +# #t1 = time.time() +# unique_left = left_charges.unique() +# unique_right = right_charges.unique() +# #print('in _find_transposed_dense_blocks: finding unique:', time.time() - t1) +# #t1 = time.time() +# tmp_left_charges = (target_charge + (unique_right * (-1))) +# relevant_left_charges = unique_left.intersect(tmp_left_charges) +# right_locations = {} +# len_right_charges = len(right_charges) +# dense_inds = [] +# left_inds = [] +# index_table = [] +# # print('in _find_transposed_dense_blocks: finding relevant charges:', +# # time.time() - t1) +# #t1 = time.time() +# for n in range(len(relevant_left_charges)): +# c = relevant_left_charges[n] +# left_ind = np.nonzero(left_charges == c)[0] +# index_table.append( +# np.stack([ +# np.arange(len(left_ind)), +# np.full(len(left_ind), n, dtype=np.int64) +# ], +# axis=1)) +# left_inds.append(left_ind) +# right_charge = (target_charge + (c * (-1))) +# if strides is None: +# dim_array = np.expand_dims(len_right_charges * left_ind, 1) +# right_inds = np.expand_dims( +# np.nonzero(right_charges == right_charge)[0], 0) + +# else: +# dim_array = np.expand_dims(permuted_left_inds[left_ind], 1) +# right_inds = np.expand_dims( +# permuted_right_inds[np.nonzero(right_charges == right_charge)[0]], 0) +# mat = np.tile(right_inds, (len(dim_array), 1)) +# dense_inds.append(mat + dim_array) +# # print('in _find_transposed_dense_blocks: running nested for loop:', +# # time.time() - t1) +# # t1 = time.time() + +# if len(index_table) > 0: +# it = np.concatenate(index_table) +# ind_sort = np.argsort(np.concatenate(left_inds)) +# table = it[ind_sort, :] +# return np.concatenate([ +# dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) +# ]) +# # print('in _find_transposed_dense_blocks: finding the sorted indices:', +# # time.time() - t1) +# # t1 = time.time() + +# return np.array([]) def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], flows: List[Union[int, bool]], - target_charge: Union[BaseCharge, ChargeCollection], - order: Optional[np.ndarray] = None, - return_sorted: Optional[bool] = True) -> np.ndarray: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector of `fused_charges` resulting from fusing all elements of `charges` - that have a value of `target_charge`. - For example, given - ``` - charges = [[-2,0,1,0,0],[-1,0,2,1]] - target_charge = 0 - fused_charges = fuse_charges(charges,[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the all different blocks - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - together with their corresponding index-values of the data in the dense array. - `find_dense_blocks` returns an np.ndarray containing the indices-positions of - these elements. - For the above example, we get: - * for `charge[0]` = -2 and `charge[1]` = 2 we get an array [2]. Thus, `fused_charges[2]` - was obtained from fusing -2 and 2. - * for `charge[0]` = 0 and `charge[1]` = 0 we get an array [5, 13, 17]. Thus, - `fused_charges[5,13,17]` were obtained from fusing 0 and 0. - * for `charge[0]` = 1 and `charge[1]` = -1 we get an array [8]. Thus, `fused_charges[8]` - was obtained from fusing 1 and -1. - Args: - charges: A list of BaseCharge or ChargeCollection. - flows: The flow directions of the `charges`. - target_charge: The target charge. - order: An optional order for the elements in `charges`. - Useful for finding dense positions in a permuted tensor - with respect to the unpermuted order. - Returns: - np.ndarray: The index-positions within the dense data array - of the elements fusing to `target_charge`. - """ - if order is not None: - if len(order) != len(charges): - raise ValueError("len(order) ={} != len(charges) = {}".format( - len(order), len(charges))) - - if not np.all(np.sort(order) == np.arange(len(order))): - raise ValueError("order = {} is not a valid permutation of {}".format( - order, np.arange(len(order)))) - - _check_flows(flows) - if len(charges) == 1: - fused_charges = charges[0] * flows[0] - return np.nonzero(fused_charges == target_charge)[0] - - if order is not None: - left_charges, right_charges, partition = _find_best_partition( - [charges[n] for n in order], [flows[n] for n in order]) - - dims = [len(c) for c in charges] - strides = _get_strides( - dims) #np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - stride_arrays = [np.arange(dims[n]) * strides[n] for n in order] - permuted_row_inds = fuse_ndarrays(stride_arrays[0:partition]) - permuted_column_inds = fuse_ndarrays(stride_arrays[partition:]) - else: - left_charges, right_charges, partition = _find_best_partition( - charges, flows) - - unique_left = left_charges.unique() - unique_right = right_charges.unique() - - tmp_left_charges = (target_charge + (unique_right * (-1))) - relevant_left_charges = unique_left.intersect(tmp_left_charges) - right_locations = {} - len_right_charges = len(right_charges) - dense_inds = [] - left_inds = [] - index_table = [] - - for n in range(len(relevant_left_charges)): - c = relevant_left_charges[n] - left_ind = np.nonzero(left_charges == c)[0] - if return_sorted: - index_table.append( - np.stack([ - np.arange(len(left_ind)), - np.full(len(left_ind), n, dtype=np.int64) - ], - axis=1)) - left_inds.append(left_ind) - right_charge = (target_charge + (c * (-1))) - if order is None: - dim_array = np.expand_dims(len_right_charges * left_ind, 1) - right_inds = np.nonzero(right_charges == right_charge)[0] - mat = np.tile(right_inds, (len(dim_array), 1)) - - else: - dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) - right_inds = permuted_column_inds[np.nonzero( - right_charges == right_charge)[0]] - mat = np.tile(right_inds, (len(dim_array), 1)) - if return_sorted: - dense_inds.append(mat + dim_array) - else: - dense_inds.append(np.reshape(mat + dim_array, np.prod(mat.shape))) - if return_sorted: - if len(index_table) > 0: - it = np.concatenate(index_table) - ind_sort = np.argsort(np.concatenate(left_inds)) - table = it[ind_sort, :] - return np.concatenate([ - dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) - ]) - return np.array([]) - return np.concatenate(dense_inds) - - -def _find_transposed_dense_positions( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charge: Union[BaseCharge, ChargeCollection], - strides: Optional[np.ndarray] = None) -> np.ndarray: + target_charges: Union[BaseCharge, ChargeCollection], + strides: Optional[np.ndarray] = None, + store_dual: Optional[bool] = False) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector of `fused_charges` resulting from fusing all elements of `charges` @@ -705,13 +1045,25 @@ def _find_transposed_dense_positions( """ _check_flows(flows) + out = {} + if store_dual: + store_charges = target_charges * (-1) + else: + store_charges = target_charges + if len(charges) == 1: fused_charges = charges[0] * flows[0] - inds = np.nonzero(fused_charges == target_charge)[0] - if strides is not None: - permuted_inds = strides[0] * np.arange(len(charges[0])) - return permuted_inds[inds] - return inds + inds = np.nonzero(fused_charges == target_charges) + for n in range(len(target_charges)): + i = inds[0][inds[1] == n] + if len(i) == 0: + continue + if strides is not None: + permuted_inds = strides[0] * np.arange(len(charges[0])) + out[store_charges.get_item(n)] = permuted_inds[i] + else: + out[store_charges.get_item(n)] = i + return out left_charges, right_charges, partition = _find_best_partition(charges, flows) if strides is not None: @@ -721,54 +1073,87 @@ def _find_transposed_dense_positions( permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) - unique_left = left_charges.unique() - unique_right = right_charges.unique() + target_charges = target_charges.unique() + unique_left, left_inverse = left_charges.unique(return_inverse=True) + unique_right, right_inverse = right_charges.unique(return_inverse=True) + + fused_unique = unique_left + unique_right + unique_inds = np.nonzero(fused_unique == target_charges) + + relevant_positions = unique_inds[0] + tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, + len(unique_right)) + + relevant_unique_left_inds = np.unique(tmp_inds_left) + left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.int64) + left_lookup[relevant_unique_left_inds] = np.arange( + len(relevant_unique_left_inds)) + relevant_unique_right_inds = np.unique(tmp_inds_right) + right_lookup = np.empty( + np.max(relevant_unique_right_inds) + 1, dtype=np.int64) + right_lookup[relevant_unique_right_inds] = np.arange( + len(relevant_unique_right_inds)) + + left_charge_labels = np.nonzero( + np.expand_dims(left_inverse, 1) == np.expand_dims( + relevant_unique_left_inds, 0)) + right_charge_labels = np.nonzero( + np.expand_dims(right_inverse, 1) == np.expand_dims( + relevant_unique_right_inds, 0)) + + len_right = len(right_charges) + + for n in range(len(target_charges)): + if len(unique_inds) > 1: + lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], + len(unique_right)) + else: + lis, ris = np.divmod(unique_inds[0], len(unique_right)) + dense_positions = [] + left_positions = [] + lookup = [] + for m in range(len(lis)): + li = lis[m] + ri = ris[m] + dense_left_positions = left_charge_labels[0][left_charge_labels[1] == + left_lookup[li]] + dense_right_positions = right_charge_labels[0][right_charge_labels[1] == + right_lookup[ri]] + if strides is None: + positions = np.expand_dims(dense_left_positions * len_right, + 1) + np.expand_dims(dense_right_positions, 0) + else: + positions = np.expand_dims( + permuted_left_inds[dense_left_positions], 1) + np.expand_dims( + permuted_right_inds[dense_right_positions], 0) - tmp_left_charges = (target_charge + (unique_right * (-1))) - relevant_left_charges = unique_left.intersect(tmp_left_charges) - right_locations = {} - len_right_charges = len(right_charges) - dense_inds = [] - left_inds = [] - index_table = [] - - for n in range(len(relevant_left_charges)): - c = relevant_left_charges[n] - left_ind = np.nonzero(left_charges == c)[0] - index_table.append( - np.stack([ - np.arange(len(left_ind)), - np.full(len(left_ind), n, dtype=np.int64) - ], - axis=1)) - left_inds.append(left_ind) - right_charge = (target_charge + (c * (-1))) - if stride_arrays is None: - dim_array = np.expand_dims(len_right_charges * left_ind, 1) - right_inds = np.nonzero(right_charges == right_charge)[0] - mat = np.tile(right_inds, (len(dim_array), 1)) + dense_positions.append(positions) + left_positions.append(dense_left_positions) + lookup.append( + np.stack([ + np.arange(len(dense_left_positions)), + np.full(len(dense_left_positions), fill_value=m, dtype=np.int64) + ], + axis=1)) + if len(lookup) > 0: + ind_sort = np.argsort(np.concatenate(left_positions)) + it = np.concatenate(lookup, axis=0) + table = it[ind_sort, :] + out[store_charges.get_item(n)] = np.concatenate([ + dense_positions[table[n, 1]][table[n, 0], :] + for n in range(table.shape[0]) + ]) else: - dim_array = np.expand_dims(permuted_left_inds[left_ind], 1) - right_inds = permuted_right_inds[np.nonzero( - right_charges == right_charge)[0]] - mat = np.tile(right_inds, (len(dim_array), 1)) - dense_inds.append(mat + dim_array) - - if len(index_table) > 0: - it = np.concatenate(index_table) - ind_sort = np.argsort(np.concatenate(left_inds)) - table = it[ind_sort, :] - return np.concatenate([ - dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) - ]) - return np.array([]) - - -def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection] - ) -> Dict: + out[store_charges.get_item(n)] = np.array([]) + + return out + + +def find_sparse_positions( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: """ Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) in the vector `fused_charges` (resulting from @@ -862,8 +1247,8 @@ def find_sparse_positions(charges: List[Union[BaseCharge, ChargeCollection]], target_charge = target_charges[n] right_indices[(left_charge.get_item(0), target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == (target_charge + - left_charge * (-1)))[0] + tmp_relevant_right_charges == ( + target_charge + left_charge * (-1)))[0] degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy @@ -931,7 +1316,7 @@ def compute_dense_to_sparse_mapping( #note: left_charges and right_charges have been fused from RIGHT to LEFT left_charges, right_charges, partition = _find_best_partition(charges, flows) nz_indices = find_dense_positions([left_charges], [1], [right_charges], [1], - target_charge=target_charge) + target_charges=target_charge) if len(nz_indices) == 0: raise ValueError( @@ -1337,11 +1722,11 @@ def reshape(tensor: BlockSparseTensor, return result -def transpose(tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False - ) -> "BlockSparseTensor": +def transpose( + tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray], + permutation: Optional[np.ndarray] = None, + return_permutation: Optional[bool] = False) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. @@ -1369,11 +1754,11 @@ def transpose(tensor: BlockSparseTensor, return result -def tensordot(tensor1: BlockSparseTensor, - tensor2: BlockSparseTensor, - axes: Sequence[Sequence[int]], - final_order: Optional[Union[List, np.ndarray]] = None - ) -> BlockSparseTensor: +def tensordot( + tensor1: BlockSparseTensor, + tensor2: BlockSparseTensor, + axes: Sequence[Sequence[int]], + final_order: Optional[Union[List, np.ndarray]] = None) -> BlockSparseTensor: """ Contract two `BlockSparseTensor`s along `axes`. Args: @@ -1420,8 +1805,8 @@ def tensordot(tensor1: BlockSparseTensor, raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == (-1) * - np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == + (-1) * np.array([i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), @@ -1444,13 +1829,12 @@ def tensordot(tensor1: BlockSparseTensor, new_order1 = free_axes1 + list(axes1) new_order2 = list(axes2) + free_axes2 - t1 = time.time() + #t1 = time.time() charges1, tr_data_1, tr_partition1 = _compute_transposition_data( tensor1, new_order1, len(free_axes1)) charges2, tr_data_2, tr_partition2 = _compute_transposition_data( tensor2, new_order2, len(axes2)) - - print('compute transposition data', time.time() - t1) + #print('compute transposition data', time.time() - t1) common_charges = charges1.intersect(charges2) #get the flattened indices for the output tensor @@ -1472,13 +1856,13 @@ def tensordot(tensor1: BlockSparseTensor, #initialize the data-vector of the output with zeros; #Note that empty is not a viable choice here. - ts = [] - t1 = time.time() + #ts = [] + #t1 = time.time() cs, sparse_blocks, _, _, _ = _find_diagonal_sparse_blocks( [], [i.charges for i in left_indices], [i.charges for i in right_indices], [i.flow for i in left_indices], [i.flow for i in right_indices], return_data=False) - print('finding sparse positions', time.time() - t1) + #print('finding sparse positions', time.time() - t1) num_nonzero_elements = np.sum([len(v[0]) for v in sparse_blocks]) data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) @@ -1492,7 +1876,7 @@ def tensordot(tensor1: BlockSparseTensor, b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) res = np.matmul(b1, b2) data[sparse_block[0]] = res.flat - print('tensordot', time.time() - t1) + #print('tensordot', time.time() - t1) return BlockSparseTensor(data=data, indices=indices) @@ -1536,29 +1920,30 @@ def _compute_transposition_data( "`len(order)={}` is different form `tensor.rank={}`".format( len(order), tensor.rank)) - #check for trivial permutation - if np.all(order == np.arange(len(order))): - return - #we use flat meta data because it is #more efficient to get the fused charges using #the best partition flat_charges, flat_flows, flat_strides, flat_order = flatten_meta_data( tensor.indices, order) + #t0 = time.time() partition = _find_best_partition( flat_charges, flat_flows, return_charges=False) + # ts = [] + # t1 = time.time() + + # ts.append(t1 - t0) + # print('in _compute_transposition_data: finding best partition', ts[-1]) if transposed_partition is None: transposed_partition = _find_best_partition( [flat_charges[n] for n in flat_order], [flat_flows[n] for n in flat_order], return_charges=False) - t1 = time.time() - row_lookup, column_lookup = _compute_sparse_lookups(flat_charges[0:partition], - flat_flows[0:partition], - flat_charges[partition:], - flat_flows[partition:]) - print('lookup', time.time() - t1) - t1 = time.time() + row_lookup, column_lookup = _compute_sparse_lookups( + flat_charges[0:partition], flat_flows[0:partition], + flat_charges[partition:], flat_flows[partition:]) + # t2 = time.time() + # ts.append(t2 - t1) + # print('in _compute_transposition_data: computing lookup tables', ts[-1]) cs, dense_blocks = _find_diagonal_dense_blocks( [flat_charges[n] for n in flat_order[0:transposed_partition]], [flat_charges[n] for n in flat_order[transposed_partition:]], @@ -1566,10 +1951,13 @@ def _compute_transposition_data( [flat_flows[n] for n in flat_order[transposed_partition:]], row_strides=flat_strides[flat_order[0:transposed_partition]], column_strides=flat_strides[flat_order[transposed_partition:]]) - print('diagonal dense blocks', time.time() - t1) + # t3 = time.time() + # ts.append(t3 - t2) + # print('in _compute_transposition_data: finding dense blocks', ts[-1]) column_dim = np.prod( [len(flat_charges[n]) for n in range(partition, len(flat_charges))]) transposed_positions = {} + for n in range(len(dense_blocks)): b = dense_blocks[n] rinds, cinds = np.divmod(b[0], column_dim) @@ -1577,4 +1965,8 @@ def _compute_transposition_data( transposed_positions[cs.get_item(n)] = [ row_lookup[rinds] + column_lookup[cinds], b[1] ] + # t4 = time.time() + # ts.append(t4 - t3) + + # print('in _compute_transposition_data: computing the new positions', ts[-1]) return cs, transposed_positions, transposed_partition From 16105ed8170ec9531139c096d5c86e3fa3bee7ac Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 20 Jan 2020 13:57:58 -0500 Subject: [PATCH 165/212] find_sparse_positions update to be a tiny bit faster --- tensornetwork/block_tensor/block_tensor.py | 729 ++++++--------------- 1 file changed, 202 insertions(+), 527 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 2b3a071c6..85b035844 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -63,8 +63,8 @@ def _compute_sparse_lookups(row_charges, row_flows, column_charges, column_lookup[col_ind_sort[col_start_positions[ comm_col[n]]:col_start_positions[comm_col[n] + 1]]] = np.arange( col_charge_degeneracies[comm_col[n]]) - row_start_positions[comm_row[n]] - row_start_positions[comm_row[n] + 1] + # row_start_positions[comm_row[n]] + # row_start_positions[comm_row[n] + 1] row_lookup[ row_ind_sort[row_start_positions[comm_row[n]]:row_start_positions[ comm_row[n] + 1]]] = col_charge_degeneracies[comm_col[n]] @@ -481,10 +481,6 @@ def _find_diagonal_dense_blocks( np.nonzero(fused == unique_column_charges.zero_charge)[0], len(unique_row_charges)) common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - # print(unique_row_charges.charges[li]) - # print(unique_column_charges.charges[ri]) - # print(common_charges.charges) - # return #print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) if ((row_strides is None) and (column_strides is not None)) or ((row_strides is not None) and @@ -508,35 +504,20 @@ def _find_diagonal_dense_blocks( target_charges=unique_row_charges[li]) for v in row_locations.values(): v *= column_dim - # row_locations = { - # common_charges.get_item(n): column_dim * find_dense_positions( - # charges=row_charges, - # flows=row_flows, - # target_charge=common_charges[n]) - # for n in range(len(common_charges)) - # } if column_strides is not None: column_locations = find_dense_positions( charges=column_charges, flows=column_flows, target_charges=unique_column_charges[ri], strides=column_strides, - store_dual=False) + store_dual=True) else: column_locations = find_dense_positions( charges=column_charges, flows=column_flows, target_charges=unique_column_charges[ri], - store_dual=False) - - # column_locations = { - # common_charges.get_item(n): find_dense_positions( - # charges=column_charges, - # flows=column_flows, - # target_charge=common_charges[n] * (-1)) - # for n in range(len(common_charges)) - # } + store_dual=True) blocks = [] for c in unique_row_charges[li]: #numpy broadcasting is substantially faster than kron! @@ -547,446 +528,6 @@ def _find_diagonal_dense_blocks( return unique_row_charges[li], blocks -# def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], -# flows: List[Union[int, bool]], -# target_charge: Union[BaseCharge, ChargeCollection], -# order: Optional[np.ndarray] = None, -# return_sorted: Optional[bool] = True) -> np.ndarray: -# """ -# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) -# in the vector of `fused_charges` resulting from fusing all elements of `charges` -# that have a value of `target_charge`. -# For example, given -# ``` -# charges = [[-2,0,1,0,0],[-1,0,2,1]] -# target_charge = 0 -# fused_charges = fuse_charges(charges,[1,1]) -# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] -# ``` -# we want to find the all different blocks -# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, -# together with their corresponding index-values of the data in the dense array. -# `find_dense_blocks` returns an np.ndarray containing the indices-positions of -# these elements. -# For the above example, we get: -# * for `charge[0]` = -2 and `charge[1]` = 2 we get an array [2]. Thus, `fused_charges[2]` -# was obtained from fusing -2 and 2. -# * for `charge[0]` = 0 and `charge[1]` = 0 we get an array [5, 13, 17]. Thus, -# `fused_charges[5,13,17]` were obtained from fusing 0 and 0. -# * for `charge[0]` = 1 and `charge[1]` = -1 we get an array [8]. Thus, `fused_charges[8]` -# was obtained from fusing 1 and -1. -# Args: -# charges: A list of BaseCharge or ChargeCollection. -# flows: The flow directions of the `charges`. -# target_charge: The target charge. -# order: An optional order for the elements in `charges`. -# Useful for finding dense positions in a permuted tensor -# with respect to the unpermuted order. -# Returns: -# np.ndarray: The index-positions within the dense data array -# of the elements fusing to `target_charge`. -# """ -# if order is not None: -# if len(order) != len(charges): -# raise ValueError("len(order) ={} != len(charges) = {}".format( -# len(order), len(charges))) - -# if not np.all(np.sort(order) == np.arange(len(order))): -# raise ValueError("order = {} is not a valid permutation of {}".format( -# order, np.arange(len(order)))) - -# _check_flows(flows) -# if len(charges) == 1: -# fused_charges = charges[0] * flows[0] -# return np.nonzero(fused_charges == target_charge)[0] - -# if order is not None: -# left_charges, right_charges, partition = _find_best_partition( -# [charges[n] for n in order], [flows[n] for n in order]) - -# dims = [len(c) for c in charges] -# strides = _get_strides( -# dims) #np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) -# stride_arrays = [np.arange(dims[n]) * strides[n] for n in order] -# permuted_row_inds = fuse_ndarrays(stride_arrays[0:partition]) -# permuted_column_inds = fuse_ndarrays(stride_arrays[partition:]) -# else: -# left_charges, right_charges, partition = _find_best_partition( -# charges, flows) -# t1 = time.time() -# unique_left = left_charges.unique() -# unique_right = right_charges.unique() - -# tmp_left_charges = (target_charge + (unique_right * (-1))) -# relevant_left_charges = unique_left.intersect(tmp_left_charges) -# right_locations = {} -# len_right_charges = len(right_charges) -# dense_inds = [] -# left_inds = [] -# index_table = [] - -# for n in range(len(relevant_left_charges)): -# c = relevant_left_charges[n] -# left_ind = np.nonzero(left_charges == c)[0] -# if return_sorted: -# index_table.append( -# np.stack([ -# np.arange(len(left_ind)), -# np.full(len(left_ind), n, dtype=np.int64) -# ], -# axis=1)) -# left_inds.append(left_ind) -# right_charge = (target_charge + (c * (-1))) -# if order is None: -# dim_array = np.expand_dims(len_right_charges * left_ind, 1) -# right_inds = np.nonzero(right_charges == right_charge)[0] -# mat = np.tile(right_inds, (len(dim_array), 1)) - -# else: -# dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) -# right_inds = permuted_column_inds[np.nonzero( -# right_charges == right_charge)[0]] -# mat = np.tile(right_inds, (len(dim_array), 1)) -# if return_sorted: -# dense_inds.append(mat + dim_array) -# else: -# dense_inds.append(np.reshape(mat + dim_array, np.prod(mat.shape))) -# if return_sorted: -# if len(index_table) > 0: -# it = np.concatenate(index_table) -# ind_sort = np.argsort(np.concatenate(left_inds)) -# table = it[ind_sort, :] -# return np.concatenate([ -# dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) -# ]) -# return np.array([]) -# return np.concatenate(dense_inds) - -# def find_dense_positions_new( -# charges: List[Union[BaseCharge, ChargeCollection]], -# flows: List[Union[int, bool]], -# target_charges: Union[BaseCharge, ChargeCollection], -# order: Optional[np.ndarray] = None, -# return_sorted: Optional[bool] = True) -> np.ndarray: -# """ -# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) -# in the vector of `fused_charges` resulting from fusing all elements of `charges` -# that have a value of `target_charge`. -# For example, given -# ``` -# charges = [[-2,0,1,0,0],[-1,0,2,1]] -# target_charge = 0 -# fused_charges = fuse_charges(charges,[1,1]) -# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] -# ``` -# we want to find the all different blocks -# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, -# together with their corresponding index-values of the data in the dense array. -# `find_dense_blocks` returns an np.ndarray containing the indices-positions of -# these elements. -# For the above example, we get: -# * for `charge[0]` = -2 and `charge[1]` = 2 we get an array [2]. Thus, `fused_charges[2]` -# was obtained from fusing -2 and 2. -# * for `charge[0]` = 0 and `charge[1]` = 0 we get an array [5, 13, 17]. Thus, -# `fused_charges[5,13,17]` were obtained from fusing 0 and 0. -# * for `charge[0]` = 1 and `charge[1]` = -1 we get an array [8]. Thus, `fused_charges[8]` -# was obtained from fusing 1 and -1. -# Args: -# charges: A list of BaseCharge or ChargeCollection. -# flows: The flow directions of the `charges`. -# target_charge: The target charge. -# order: An optional order for the elements in `charges`. -# Useful for finding dense positions in a permuted tensor -# with respect to the unpermuted order. -# Returns: -# np.ndarray: The index-positions within the dense data array -# of the elements fusing to `target_charge`. -# """ -# if order is not None: -# if len(order) != len(charges): -# raise ValueError("len(order) ={} != len(charges) = {}".format( -# len(order), len(charges))) - -# if not np.all(np.sort(order) == np.arange(len(order))): -# raise ValueError("order = {} is not a valid permutation of {}".format( -# order, np.arange(len(order)))) - -# _check_flows(flows) -# if len(charges) == 1: -# fused_charges = charges[0] * flows[0] -# return np.nonzero(fused_charges == target_charge)[0] - -# if order is not None: -# raise NotImplementedError() -# ts = [] -# t00 = time.time() -# left_charges, right_charges, partition = _find_best_partition(charges, flows) - -# target_charges = target_charges.unique() -# unique_left, left_inverse = left_charges.unique(return_inverse=True) -# unique_right, right_inverse = right_charges.unique(return_inverse=True) - -# fused_unique = unique_left + unique_right -# relevant_positions = np.nonzero(fused_unique.isin(target_charges))[0] - -# relevant_unique_left_inds, relevant_unique_right_inds = np.divmod( -# relevant_positions, len(unique_right)) -# left_charge_labels = np.nonzero( -# np.expand_dims(left_inverse, 1) == np.expand_dims( -# relevant_unique_left_inds, 0)) - -# right_charge_labels = np.nonzero( -# np.expand_dims(right_inverse, 1) == np.expand_dims( -# relevant_unique_right_inds, 0)) -# # t01 = time.time() -# # ts.append(t01 - t00) -# #print(ts[-1]) - -# dense_left_pos = {} - -# len_right = len(right_charges) -# for n, li in enumerate(relevant_unique_left_inds): -# dense_left_pos[unique_left.get_item(li)] = left_charge_labels[0][ -# left_charge_labels[1] == n] - -# dense_right_pos = {} -# #for m in range(len(relevant_unique_right_inds)): -# for m, ri in enumerate(relevant_unique_right_inds): -# #right_charge = unique_right.get_item(relevant_unique_right_inds[m]) -# dense_right_pos[unique_right.get_item(ri)] = right_charge_labels[0][ -# right_charge_labels[1] == m] -# # t02 = time.time() -# # ts.append(t02 - t01) -# #print(ts[-1]) - -# # print(dense_right_pos[m]) -# # print(np.nonzero(right_charges == unique_right[m] * (-1))[0]) -# # print(unique_right[ri].charges) -# # print( -# # np.all(dense_right_pos[unique_right.get_item(ri)] == np.nonzero( -# # right_charges == unique_right[ri])[0])) - -# # for n in range(len(relevant_unique_right_inds)): -# # print((unique_left[n] + unique_right[n] * (-1)).charges) - -# #fused_semi_unique = unique_left + right_charges -# #relevant_positions = np.nonzero(fused_semi_unique.isin(target_charges))[0] -# #left_inds, right_inds = np.divmod(relevant_positions, len(right_charges)) -# blocks = {} - -# #t1 = time.time() -# for n, li in enumerate(relevant_unique_left_inds): -# ri = relevant_unique_right_inds[n] -# target_charge = unique_left[li] + unique_right[ri] -# blocks[target_charge.get_item(0)] = [] -# # t2 = time.time() -# # ts.append(t2 - t1) -# #print(ts[-1]) -# for n, li in enumerate(relevant_unique_left_inds): -# ri = relevant_unique_right_inds[n] -# target_charge = unique_left[li] + unique_right[ri] -# inds = np.asarray( -# (np.expand_dims(dense_left_pos[unique_left.get_item(li)], 1) + -# np.expand_dims(dense_right_pos[unique_right.get_item(ri)], 0))) -# blocks[target_charge.get_item(0)].append( -# np.reshape(inds, np.prod(inds.shape))) -# # t3 = time.time() -# # ts.append(t3 - t2) -# #print(ts[-1]) -# sorted_blocks = {} -# for k in blocks.keys(): -# sorted_blocks[k] = np.sort(np.concatenate(blocks[k])) -# #ts.append(time.time() - t3) -# #print(ts[-1]) -# #print('total', np.sum(ts)) -# return sorted_blocks - -# ##################################################### -# ##################################################### -# ##################################################### -# unique_left = left_charges.unique() -# unique_right = right_charges.unique() - -# tmp_left_charges = (target_charge + (unique_right * (-1))) -# relevant_left_charges = unique_left.intersect(tmp_left_charges) -# right_locations = {} -# len_right_charges = len(right_charges) -# dense_inds = [] -# left_inds = [] -# index_table = [] - -# for n in range(len(relevant_left_charges)): -# c = relevant_left_charges[n] -# left_ind = np.nonzero(left_charges == c)[0] -# if return_sorted: -# index_table.append( -# np.stack([ -# np.arange(len(left_ind)), -# np.full(len(left_ind), n, dtype=np.int64) -# ], -# axis=1)) -# left_inds.append(left_ind) -# right_charge = (target_charge + (c * (-1))) -# if order is None: -# dim_array = np.expand_dims(len_right_charges * left_ind, 1) -# right_inds = np.nonzero(right_charges == right_charge)[0] -# mat = np.tile(right_inds, (len(dim_array), 1)) - -# else: -# dim_array = np.expand_dims(permuted_row_inds[left_ind], 1) -# right_inds = permuted_column_inds[np.nonzero( -# right_charges == right_charge)[0]] -# mat = np.tile(right_inds, (len(dim_array), 1)) -# if return_sorted: -# dense_inds.append(mat + dim_array) -# else: -# dense_inds.append(np.reshape(mat + dim_array, np.prod(mat.shape))) -# if return_sorted: -# if len(index_table) > 0: -# it = np.concatenate(index_table) -# ind_sort = np.argsort(np.concatenate(left_inds)) -# table = it[ind_sort, :] -# return np.concatenate([ -# dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) -# ]) -# return np.array([]) -# return np.concatenate(dense_inds) - -# def _find_transposed_dense_positions( -# charges: List[Union[BaseCharge, ChargeCollection]], -# flows: List[Union[int, bool]], -# target_charge: Union[BaseCharge, ChargeCollection], -# strides: Optional[np.ndarray] = None) -> np.ndarray: - -# #print('###########################################') -# """ -# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) -# in the vector of `fused_charges` resulting from fusing all elements of `charges` -# that have a value of `target_charge`. -# For example, given -# ``` -# charges = [[-2,0,1,0,0],[-1,0,2,1]] -# target_charge = 0 -# fused_charges = fuse_charges(charges,[1,1]) -# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] -# ``` -# we want to find the index-positions of charges -# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, -# within the dense array. As one additional wrinkle, `charges` -# is a subset of the permuted charges of a tensor with rank R > len(charges), -# and `stride_arrays` are their corresponding range of strides, i.e. - -# ``` -# R=5 -# D = [2,3,4,5,6] -# tensor_flows = np.random.randint(-1,2,R) -# tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] -# order = np.arange(R) -# np.random.shuffle(order) -# tensor_strides = [360, 120, 30, 6, 1] - -# charges = [tensor_charges[order[n]] for n in range(3)] -# flows = [tensor_flows[order[n]] for n in range(len(3))] -# strides = [tensor_stride[order[n]] for n in range(3)] -# _ = _find_transposed_dense_positions(charges, flows, 0, strides) - -# ``` -# `_find_transposed_dense_blocks` returns an np.ndarray containing the -# index-positions of these elements calculated using `stride_arrays`. -# The result only makes sense in conjuction with the complementary -# data computed from the complementary -# elements in`tensor_charges`, -# `tensor_strides` and `tensor_flows`. -# This routine is mainly used in `_find_diagonal_dense_blocks`. - -# Args: -# charges: A list of BaseCharge or ChargeCollection. -# flows: The flow directions of the `charges`. -# target_charge: The target charge. -# strides: The strides for the `charges` subset. -# if `None`, natural stride ordering is assumed. - -# Returns: -# np.ndarray: The index-positions within the dense data array -# of the elements fusing to `target_charge`. -# """ - -# _check_flows(flows) -# if len(charges) == 1: -# fused_charges = charges[0] * flows[0] -# inds = np.nonzero(fused_charges == target_charge)[0] -# if strides is not None: -# permuted_inds = strides[0] * np.arange(len(charges[0])) -# return permuted_inds[inds] -# return inds -# #t1 = time.time() -# left_charges, right_charges, partition = _find_best_partition(charges, flows) -# #print('in _find_transposed_dense_blocks: find_best_partition:', -# #time.time() - t1) -# #t1 = time.time() -# if strides is not None: -# stride_arrays = [ -# np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) -# ] -# permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) -# permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) - -# #print('in _find_transposed_dense_blocks: fusing ndarrays:', time.time() - t1) -# #t1 = time.time() -# unique_left = left_charges.unique() -# unique_right = right_charges.unique() -# #print('in _find_transposed_dense_blocks: finding unique:', time.time() - t1) -# #t1 = time.time() -# tmp_left_charges = (target_charge + (unique_right * (-1))) -# relevant_left_charges = unique_left.intersect(tmp_left_charges) -# right_locations = {} -# len_right_charges = len(right_charges) -# dense_inds = [] -# left_inds = [] -# index_table = [] -# # print('in _find_transposed_dense_blocks: finding relevant charges:', -# # time.time() - t1) -# #t1 = time.time() -# for n in range(len(relevant_left_charges)): -# c = relevant_left_charges[n] -# left_ind = np.nonzero(left_charges == c)[0] -# index_table.append( -# np.stack([ -# np.arange(len(left_ind)), -# np.full(len(left_ind), n, dtype=np.int64) -# ], -# axis=1)) -# left_inds.append(left_ind) -# right_charge = (target_charge + (c * (-1))) -# if strides is None: -# dim_array = np.expand_dims(len_right_charges * left_ind, 1) -# right_inds = np.expand_dims( -# np.nonzero(right_charges == right_charge)[0], 0) - -# else: -# dim_array = np.expand_dims(permuted_left_inds[left_ind], 1) -# right_inds = np.expand_dims( -# permuted_right_inds[np.nonzero(right_charges == right_charge)[0]], 0) -# mat = np.tile(right_inds, (len(dim_array), 1)) -# dense_inds.append(mat + dim_array) -# # print('in _find_transposed_dense_blocks: running nested for loop:', -# # time.time() - t1) -# # t1 = time.time() - -# if len(index_table) > 0: -# it = np.concatenate(index_table) -# ind_sort = np.argsort(np.concatenate(left_inds)) -# table = it[ind_sort, :] -# return np.concatenate([ -# dense_inds[table[n, 1]][table[n, 0], :] for n in range(table.shape[0]) -# ]) -# # print('in _find_transposed_dense_blocks: finding the sorted indices:', -# # time.time() - t1) -# # t1 = time.time() - -# return np.array([]) - - def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], flows: List[Union[int, bool]], target_charges: Union[BaseCharge, ChargeCollection], @@ -1054,16 +595,24 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], if len(charges) == 1: fused_charges = charges[0] * flows[0] inds = np.nonzero(fused_charges == target_charges) - for n in range(len(target_charges)): - i = inds[0][inds[1] == n] - if len(i) == 0: - continue + if len(target_charges) > 1: + for n in range(len(target_charges)): + i = inds[0][inds[1] == n] + if len(i) == 0: + continue + if strides is not None: + permuted_inds = strides[0] * np.arange(len(charges[0])) + out[store_charges.get_item(n)] = permuted_inds[i] + else: + out[store_charges.get_item(n)] = i + return out + else: if strides is not None: permuted_inds = strides[0] * np.arange(len(charges[0])) - out[store_charges.get_item(n)] = permuted_inds[i] + out[store_charges.get_item(n)] = permuted_inds[inds[0]] else: - out[store_charges.get_item(n)] = i - return out + out[store_charges.get_item(n)] = inds[0] + return out left_charges, right_charges, partition = _find_best_partition(charges, flows) if strides is not None: @@ -1073,7 +622,8 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) - target_charges = target_charges.unique() + # unique_target_charges, inds = target_charges.unique(return_index=True) + # target_charges = target_charges[np.sort(inds)] unique_left, left_inverse = left_charges.unique(return_inverse=True) unique_right, right_inverse = right_charges.unique(return_inverse=True) @@ -1205,71 +755,196 @@ def find_sparse_positions( left_charges, right_charges, partition = _find_best_partition(charges, flows) - target_charges = target_charges.unique() - unique_left = left_charges.unique() - unique_right = right_charges.unique() - fused = unique_left + unique_right - - #compute all unique charges that can add up to - #target_charges - left_inds, right_inds = [], [] - for target_charge in target_charges: - li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) - left_inds.append(li) - right_inds.append(ri) - - #now compute the relevant unique left and right charges - unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] - unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] - - #only keep those charges that are relevant - relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] - relevant_right_charges = right_charges[right_charges.isin( - unique_right_charges)] - - unique_right_charges, right_dims = relevant_right_charges.unique( - return_counts=True) - right_degeneracies = dict(zip(unique_right_charges, right_dims)) - #generate a degeneracy vector which for each value r in relevant_right_charges - #holds the corresponding number of non-zero elements `relevant_right_charges` - #that can add up to `target_charges`. - degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) - right_indices = {} + # unique_target_charges, inds = target_charges.unique(return_index=True) + # target_charges = target_charges[np.sort(inds)] + unique_left, left_inverse = left_charges.unique(return_inverse=True) + unique_right, right_inverse = right_charges.unique( + return_inverse=True, return_counts=True) - for n in range(len(unique_left_charges)): - left_charge = unique_left_charges[n] - total_charge = left_charge + unique_right_charges - total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) - tmp_relevant_right_charges = relevant_right_charges[ - relevant_right_charges.isin((target_charges + left_charge * (-1)))] + fused_unique = unique_left + unique_right + unique_inds = np.nonzero(fused_unique == target_charges) + relevant_positions = unique_inds[0] + tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, + len(unique_right)) - for n in range(len(target_charges)): - target_charge = target_charges[n] - right_indices[(left_charge.get_item(0), - target_charge.get_item(0))] = np.nonzero( - tmp_relevant_right_charges == ( - target_charge + left_charge * (-1)))[0] + relevant_unique_left_inds = np.unique(tmp_inds_left) + left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.int64) + left_lookup[relevant_unique_left_inds] = np.arange( + len(relevant_unique_left_inds)) + relevant_unique_right_inds = np.unique(tmp_inds_right) + right_lookup = np.empty( + np.max(relevant_unique_right_inds) + 1, dtype=np.int64) + right_lookup[relevant_unique_right_inds] = np.arange( + len(relevant_unique_right_inds)) + + left_charge_labels = np.nonzero( + np.expand_dims(left_inverse, 1) == np.expand_dims( + relevant_unique_left_inds, 0)) + relevant_left_inverse = np.arange(len(left_charge_labels[0])) + + right_charge_labels = np.expand_dims(right_inverse, 1) == np.expand_dims( + relevant_unique_right_inds, 0) + right_block_information = {} + for n in relevant_unique_left_inds: + ri = np.nonzero((unique_left[n] + unique_right).isin(target_charges))[0] + tmp_inds = np.nonzero(right_charge_labels[:, right_lookup[ri]]) + right_block_information[n] = [ri, np.arange(len(tmp_inds[0])), tmp_inds[1]] - degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy + relevant_right_inverse = np.arange(len(right_charge_labels[0])) + #generate a degeneracy vector which for each value r in relevant_right_charges + #holds the corresponding number of non-zero elements `relevant_right_charges` + #that can add up to `target_charges`. + degeneracy_vector = np.empty(len(left_charge_labels[0]), dtype=np.int64) + for n in range(len(relevant_unique_left_inds)): + degeneracy_vector[relevant_left_inverse[ + left_charge_labels[1] == n]] = np.sum(right_dims[tmp_inds_right[ + tmp_inds_left == relevant_unique_left_inds[n]]]) start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector - blocks = {t: [] for t in target_charges} - # iterator returns tuple of `int` for ChargeCollection objects - # and `int` for Ba seCharge objects (both hashable) - for left_charge in unique_left_charges: - a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) - for target_charge in target_charges: - ri = right_indices[(left_charge, target_charge)] - if len(ri) != 0: - b = np.expand_dims(ri, 1) - tmp = a + b - blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) out = {} - for target_charge in target_charges: - out[target_charge] = np.concatenate(blocks[target_charge]) + for n in range(len(target_charges)): + block = [] + if len(unique_inds) > 1: + lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], + len(unique_right)) + else: + lis, ris = np.divmod(unique_inds[0], len(unique_right)) + + for m in range(len(lis)): + a = np.expand_dims( + start_positions[relevant_left_inverse[left_charge_labels[1] == + left_lookup[lis[m]]]], 0) + + ri_tmp, arange, tmp_inds = right_block_information[lis[m]] + b = np.expand_dims(arange[tmp_inds == np.nonzero(ri_tmp == ris[m])[0]], 1) + inds = a + b + block.append(np.reshape(inds, np.prod(inds.shape))) + out[target_charges.get_item(n)] = np.concatenate(block) return out +# def find_sparse_positions_2( +# charges: List[Union[BaseCharge, ChargeCollection]], +# flows: List[Union[int, bool]], +# target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: +# """ +# Find the sparse locations of elements (i.e. the index-values within +# the SPARSE tensor) in the vector `fused_charges` (resulting from +# fusing `left_charges` and `right_charges`) +# that have a value of `target_charges`, assuming that all elements +# different from `target_charges` are `0`. +# For example, given +# ``` +# left_charges = [-2,0,1,0,0] +# right_charges = [-1,0,2,1] +# target_charges = [0,1] +# fused_charges = fuse_charges([left_charges, right_charges],[1,1]) +# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] +# ``` 0 1 2 3 4 5 6 7 8 +# we want to find the all different blocks +# that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, +# together with their corresponding sparse index-values of the data in the sparse array, +# assuming that all elements in `fused_charges` different from `target_charges` are 0. + +# `find_sparse_blocks` returns a dict mapping integers `target_charge` +# to an array of integers denoting the sparse locations of elements within +# `fused_charges`. +# For the above example, we get: +# * `target_charge=0`: [0,1,3,5,7] +# * `target_charge=1`: [2,4,6,8] +# Args: +# left_charges: An np.ndarray of integer charges. +# left_flow: The flow direction of the left charges. +# right_charges: An np.ndarray of integer charges. +# right_flow: The flow direction of the right charges. +# target_charge: The target charge. +# Returns: +# dict: Mapping integers to np.ndarray of integers. +# """ +# #FIXME: this is probably still not optimal + +# _check_flows(flows) +# if len(charges) == 1: +# fused_charges = charges[0] * flows[0] +# unique_charges = fused_charges.unique() +# target_charges = target_charges.unique() +# relevant_target_charges = unique_charges.intersect(target_charges) +# relevant_fused_charges = fused_charges[fused_charges.isin( +# relevant_target_charges)] +# return { +# c: np.nonzero(relevant_fused_charges == c)[0] +# for c in relevant_target_charges +# } + +# left_charges, right_charges, partition = _find_best_partition(charges, flows) + +# unique_target_charges, inds = target_charges.unique(return_index=True) +# target_charges = target_charges[np.sort(inds)] + +# unique_left = left_charges.unique() +# unique_right = right_charges.unique() +# fused = unique_left + unique_right + +# #compute all unique charges that can add up to +# #target_charges +# left_inds, right_inds = [], [] +# for target_charge in target_charges: +# li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) +# left_inds.append(li) +# right_inds.append(ri) + +# #now compute the relevant unique left and right charges +# unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] +# unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] + +# #only keep those charges that are relevant +# relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] +# relevant_right_charges = right_charges[right_charges.isin( +# unique_right_charges)] + +# unique_right_charges, right_dims = relevant_right_charges.unique( +# return_counts=True) +# right_degeneracies = dict(zip(unique_right_charges, right_dims)) +# #generate a degeneracy vector which for each value r in relevant_right_charges +# #holds the corresponding number of non-zero elements `relevant_right_charges` +# #that can add up to `target_charges`. +# degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) +# right_indices = {} + +# for n in range(len(unique_left_charges)): +# left_charge = unique_left_charges[n] +# total_charge = left_charge + unique_right_charges +# total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) +# tmp_relevant_right_charges = relevant_right_charges[ +# relevant_right_charges.isin((target_charges + left_charge * (-1)))] + +# for n in range(len(target_charges)): +# target_charge = target_charges[n] +# right_indices[(left_charge.get_item(0), +# target_charge.get_item(0))] = np.nonzero( +# tmp_relevant_right_charges == ( +# target_charge + left_charge * (-1)))[0] + +# degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy + +# start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector +# blocks = {t: [] for t in target_charges} +# # iterator returns tuple of `int` for ChargeCollection objects +# # and `int` for Ba seCharge objects (both hashable) +# for left_charge in unique_left_charges: +# a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) +# for target_charge in target_charges: +# ri = right_indices[(left_charge, target_charge)] +# if len(ri) != 0: +# b = np.expand_dims(ri, 1) +# tmp = a + b +# blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) +# out = {} +# for target_charge in target_charges: +# out[target_charge] = np.concatenate(blocks[target_charge]) +# return out + + def compute_dense_to_sparse_mapping( charges: List[Union[BaseCharge, ChargeCollection]], flows: List[Union[bool, int]], From 3be9a817e4ff9c24bf1bcda08d57431a99c00fd2 Mon Sep 17 00:00:00 2001 From: Kshithij Iyer Date: Tue, 21 Jan 2020 00:34:45 +0530 Subject: [PATCH 166/212] Remove duplicate Dockerfile from root directory (#431) --- Dockerfile | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 78d3f8839..000000000 --- a/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM ubuntu - -# Install basic tools. -RUN DEBIAN_FRONTEND=noninteractive apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ - python3-pip python3-tk git emacs vim locales - -# Configure UTF-8 encoding. -RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 - -# Make python3 default -RUN rm -f /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python - -# Install Tensor Network with the needed Python libraries. -RUN pip3 install tensornetwork - -WORKDIR /TensorNetwork/examples - -EXPOSE 8888 From 83ff4af11dcd03d6c0db70e139c7137154dc015d Mon Sep 17 00:00:00 2001 From: "Hyunbyung, Park" Date: Mon, 20 Jan 2020 11:34:03 -0800 Subject: [PATCH 167/212] BaseNode / Edge class name type check protection add (#424) * BaseNode / Edge class text input protection added (#423) BaseNode class - Add protection to name, axis_names *Protected in 3 place *Initialize stage - __init__ *Function use setting - set_name / add_axis_names *Property - Add @property to name to protect direct adding node.name = 123 Edge class - Add protection to name *Protected in 3 place *Initialize stage - __init__ *Function use setting - set_name *Property * BaseNode / Edge class text input protection code revise (#423) *if type(name) != str *if not isinstance(name, str) *change using type to isinstance to follow pylint Co-authored-by: Chase Roberts --- tensornetwork/network_components.py | 36 ++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index 38f51a995..b6569872f 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -69,10 +69,18 @@ def __init__(self, """ self.is_disabled = False - self.name = name if name is not None else '__unnamed_node__' + if not name: + name = '__unnamed_node__' + else: + if not isinstance(name, str): + raise TypeError("Node name should be str type") + self.name = name self.backend = backend self._shape = shape if axis_names is not None: + for axis_name in axis_names: + if not isinstance(axis_name, str): + raise TypeError("axis_names should be str type") self._edges = [ Edge(node1=self, axis1=i, name=edge_name) for i, edge_name in enumerate(axis_names) @@ -125,6 +133,9 @@ def add_axis_names(self, axis_names: List[Text]) -> None: raise ValueError("axis_names is not the same length as the tensor shape." "axis_names length: {}, tensor.shape length: {}".format( len(axis_names), len(self.shape))) + for axis_name in axis_names: + if not isinstance(axis_name, str): + raise TypeError("axis_names should be str type") self.axis_names = axis_names[:] def add_edge(self, @@ -312,6 +323,8 @@ def get_all_dangling(self) -> Set["Edge"]: return {edge for edge in self.edges if edge.is_dangling()} def set_name(self, name) -> None: + if not isinstance(name, str): + raise TypeError("Node name should be str type") self.name = name def has_nondangling_edge(self) -> bool: @@ -373,6 +386,16 @@ def edges(self, edges: List) -> None: self.name)) self._edges = edges + @property + def name(self) -> Text: + return self._name + + @name.setter + def name(self, name) -> None: + if not isinstance(name, str): + raise TypeError("Node name should be str type") + self._name = name + @property def axis_names(self) -> List[Text]: return self._axis_names @@ -382,8 +405,12 @@ def axis_names(self, axis_names: List[Text]) -> None: if len(axis_names) != len(self.shape): raise ValueError("Expected {} names, only got {}.".format( len(self.shape), len(axis_names))) + for axis_name in axis_names: + if not isinstance(axis_name, str): + raise TypeError("axis_names should be str type") self._axis_names = axis_names + @property def signature(self) -> Optional[int]: if self.is_disabled: @@ -810,6 +837,9 @@ def __init__(self, self.is_disabled = False if not name: name = '__unnamed_edge__' + else: + if not isinstance(name, str): + raise TypeError("Edge name should be str type") self._name = name self.node1 = node1 self._axis1 = axis1 @@ -844,6 +874,8 @@ def name(self, name) -> None: if self.is_disabled: raise ValueError( 'Edge has been disabled, setting its name is no longer possible') + if not isinstance(name, str): + raise TypeError("Edge name should be str type") self._name = name @property @@ -988,6 +1020,8 @@ def is_being_used(self) -> bool: return result def set_name(self, name: Text) -> None: + if not isinstance(name, str): + raise TypeError("Edge name should be str type") self.name = name def _save_edge(self, edge_group: h5py.Group) -> None: From b7c7e7cfbc026fcde5c956e9458da7e9c58d7810 Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 20 Jan 2020 14:44:37 -0500 Subject: [PATCH 168/212] fix bug in _get_diagonal_dense_blocks --- tensornetwork/block_tensor/block_tensor.py | 29 +++++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 85b035844..46d49984b 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -30,9 +30,15 @@ Tensor = Any -def _compute_sparse_lookups(row_charges, row_flows, column_charges, - column_flows): +def _compute_sparse_lookups(row_charges: Union[BaseCharge, ChargeCollection], + row_flows, column_charges, column_flows): + """ + Compute lookup tables for looking up how dense index positions map + to sparse index positions for the diagonal blocks a symmetric matrix. + Args: + row_charges: + """ column_flows = list(-np.asarray(column_flows)) fused_column_charges = fuse_charges(column_charges, column_flows) fused_row_charges = fuse_charges(row_charges, row_flows) @@ -479,7 +485,7 @@ def _find_diagonal_dense_blocks( fused = unique_row_charges + unique_column_charges li, ri = np.divmod( np.nonzero(fused == unique_column_charges.zero_charge)[0], - len(unique_row_charges)) + len(unique_column_charges)) common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) #print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) if ((row_strides is None) and @@ -758,7 +764,7 @@ def find_sparse_positions( # unique_target_charges, inds = target_charges.unique(return_index=True) # target_charges = target_charges[np.sort(inds)] unique_left, left_inverse = left_charges.unique(return_inverse=True) - unique_right, right_inverse = right_charges.unique( + unique_right, right_inverse, right_dims = right_charges.unique( return_inverse=True, return_counts=True) fused_unique = unique_left + unique_right @@ -1059,6 +1065,18 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: self.data = np.asarray(data.flat) #do not copy data + def todense(self) -> np.ndarray: + """ + Map the sparse tensor to dense storage. + + """ + out = np.asarray(np.zeros(self.dense_shape, dtype=self.dtype).flat) + + charges = self.charges + out[np.nonzero(fuse_charges(charges, self.flows) == charges[0].zero_charge) + [0]] = self.data + return np.reshape(out, self.dense_shape) + @classmethod def randn(cls, indices: List[Index], dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": @@ -1522,6 +1540,9 @@ def tensordot( indices = left_indices + right_indices if final_order is not None: indices = [indices[n] for n in final_order] + + for n, i in enumerate(indices): + i.name = 'index_{}'.format(n) if i.name is None else i.name index_names = [i.name for i in indices] unique = np.unique(index_names) #rename indices if they are not unique From 2df212e1ee86df452b4f3754c651d24775633d2e Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 20 Jan 2020 15:18:12 -0500 Subject: [PATCH 169/212] fix bug --- tensornetwork/block_tensor/block_tensor.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 46d49984b..35f71ee08 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -1562,12 +1562,11 @@ def tensordot( num_nonzero_elements = np.sum([len(v[0]) for v in sparse_blocks]) data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) - for n in range(len(common_charges)): c = common_charges.get_item(n) permutation1 = tr_data_1[c] permutation2 = tr_data_2[c] - sparse_block = sparse_blocks[n] + sparse_block = sparse_blocks[np.nonzero(cs == c)[0][0]] b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) res = np.matmul(b1, b2) From bbfe1f0ca3baa38bab497d8793b26459939187fe Mon Sep 17 00:00:00 2001 From: mganahl Date: Mon, 20 Jan 2020 15:40:15 -0500 Subject: [PATCH 170/212] fixed bug in transpose --- tensornetwork/block_tensor/block_tensor.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 35f71ee08..4b8847d59 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -303,7 +303,7 @@ def compute_num_nonzero(charges: List[np.ndarray], raise ValueError( "given leg-charges `charges` and flows `flows` are incompatible " "with a symmetric tensor") - return accumulated_degeneracies[res][0] + return np.squeeze(accumulated_degeneracies[res][0]) def _find_diagonal_sparse_blocks( @@ -417,7 +417,7 @@ def _find_diagonal_sparse_blocks( blocks.append([inds, (len(rlocs), cdegs)]) else: blocks.append(np.reshape(data[inds], (len(rlocs), cdegs))) - return common_charges, blocks, start_positions, row_locations, column_degeneracies + return common_charges, blocks #, start_positions, row_locations, column_degeneracies def _find_diagonal_dense_blocks( @@ -1146,6 +1146,7 @@ def random(cls, indices: List[Index], flows = [i.flow for i in indices] num_non_zero_elements = compute_num_nonzero(charges, flows) + dtype = dtype if dtype is not None else np.float64 def init_random(): @@ -1220,16 +1221,19 @@ def transpose( flat_charges, flat_flows, _, flat_order = flatten_meta_data( self.indices, order) - cs, sparse_blocks, _, _, _ = _find_diagonal_sparse_blocks( + cs, sparse_blocks = _find_diagonal_sparse_blocks( [], [flat_charges[n] for n in flat_order[0:tr_partition]], [flat_charges[n] for n in flat_order[tr_partition:]], [flat_flows[n] for n in flat_order[0:tr_partition]], [flat_flows[n] for n in flat_order[tr_partition:]], return_data=False) + + data = np.empty(len(self.data), dtype=self.dtype) for n in range(len(sparse_blocks)): sparse_block = sparse_blocks[n] - self.data[sparse_block[0]] = self.data[tr_data[cs.get_item(n)][0]] - + data[sparse_block[0]] = self.data[tr_data[cs.get_item(n)][0]] + self.indices = [self.indices[o] for o in order] + self.data = data return self def reset_shape(self) -> None: @@ -1554,7 +1558,8 @@ def tensordot( #Note that empty is not a viable choice here. #ts = [] #t1 = time.time() - cs, sparse_blocks, _, _, _ = _find_diagonal_sparse_blocks( + #Note: `cs` may contain charges that are not present in `common_charges` + cs, sparse_blocks = _find_diagonal_sparse_blocks( [], [i.charges for i in left_indices], [i.charges for i in right_indices], [i.flow for i in left_indices], [i.flow for i in right_indices], return_data=False) From a814de89a6f665531c9fcf27c8b0bfdd197a1bbb Mon Sep 17 00:00:00 2001 From: MichaelMarien Date: Tue, 21 Jan 2020 18:37:44 +0100 Subject: [PATCH 171/212] Test network operations (#441) * added test for mps switch backend * added switch backend method to MPS * added test for network operations switch backend * make sure switch_backend not only fixes tensor but also node property * added switch_backend to init * added missing tests for network operations * some linting issues --- .../tests/network_operations_test.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/tensornetwork/tests/network_operations_test.py b/tensornetwork/tests/network_operations_test.py index 189c08594..4b13d5656 100644 --- a/tensornetwork/tests/network_operations_test.py +++ b/tensornetwork/tests/network_operations_test.py @@ -15,6 +15,7 @@ import tensornetwork as tn import pytest import numpy as np +from tensornetwork.backends.base_backend import BaseBackend def test_split_node_full_svd_names(backend): @@ -334,3 +335,89 @@ def test_switch_backend(backend): nodes = [a, b, c] tn.switch_backend(nodes, backend) assert nodes[0].backend.name == backend + + +def test_norm_of_node_without_backend_raises_error(): + node = np.random.rand(3, 3, 3) + with pytest.raises(AttributeError): + tn.norm(node) + + +def test_conj_of_node_without_backend_raises_error(): + node = np.random.rand(3, 3, 3) + with pytest.raises(AttributeError): + tn.conj(node) + + +def test_transpose_of_node_without_backend_raises_error(): + node = np.random.rand(3, 3, 3) + with pytest.raises(AttributeError): + tn.transpose(node, permutation=[]) + + +def test_split_node_of_node_without_backend_raises_error(): + node = np.random.rand(3, 3, 3) + with pytest.raises(AttributeError): + tn.split_node(node, left_edges=[], right_edges=[]) + + +def test_split_node_qr_of_node_without_backend_raises_error(): + node = np.random.rand(3, 3, 3) + with pytest.raises(AttributeError): + tn.split_node_qr(node, left_edges=[], right_edges=[]) + + +def test_split_node_rq_of_node_without_backend_raises_error(): + node = np.random.rand(3, 3, 3) + with pytest.raises(AttributeError): + tn.split_node_rq(node, left_edges=[], right_edges=[]) + + +def test_split_node_full_svd_of_node_without_backend_raises_error(): + node = np.random.rand(3, 3, 3) + with pytest.raises(AttributeError): + tn.split_node_full_svd(node, left_edges=[], right_edges=[]) + + +def test_reachable_raises_value_error(): + with pytest.raises(ValueError): + tn.reachable({}) + + +def test_check_correct_raises_value_error_1(backend): + a = tn.Node(np.random.rand(3, 3, 3), backend=backend) + b = tn.Node(np.random.rand(3, 3, 3), backend=backend) + edge = a.edges[0] + edge.node1 = b + edge.node2 = b + with pytest.raises(ValueError): + tn.check_correct({a, b}) + + +def test_check_correct_raises_value_error_2(backend): + a = tn.Node(np.random.rand(3, 3, 3), backend=backend) + b = tn.Node(np.random.rand(3, 3, 3), backend=backend) + edge = a.edges[0] + edge.axis1 = -1 + with pytest.raises(ValueError): + tn.check_correct({a, b}) + + +def test_get_all_nodes(backend): + a = tn.Node(np.random.rand(3, 3, 3), backend=backend) + b = tn.Node(np.random.rand(3, 3, 3), backend=backend) + edge = tn.connect(a[0], b[0]) + assert tn.get_all_nodes({edge}) == {a, b} + + +def test_contract_trace_edges(backend): + a = tn.Node(np.random.rand(3, 3, 3), backend=backend) + with pytest.raises(ValueError): + tn.contract_trace_edges(a) + + +def test_switch_backend_raises_error(backend): + a = tn.Node(np.random.rand(3, 3, 3)) + a.backend = BaseBackend() + with pytest.raises(NotImplementedError): + tn.switch_backend({a}, backend) From 1d5c53cbdfbdfb8d2d5797615381ae72cc1b6e91 Mon Sep 17 00:00:00 2001 From: kosehy Date: Tue, 21 Jan 2020 09:43:45 -0800 Subject: [PATCH 172/212] Rename backend shape methods (google#355) (#436) concat function * rename from cocate to shape_concat shape function * rename from shape to shape_tensor prod function * rename from prod to shape_prod * function name is duplicated in shell_backend.py * rename existing shape_prod function to shape_product * need to change the name later Co-authored-by: Chase Roberts --- tensornetwork/backends/base_backend.py | 6 ++--- tensornetwork/backends/jax/jax_backend.py | 2 +- .../backends/jax/jax_backend_test.py | 14 ++++++------ tensornetwork/backends/numpy/numpy_backend.py | 6 ++--- .../backends/numpy/numpy_backend_test.py | 14 ++++++------ .../backends/pytorch/pytorch_backend.py | 6 ++--- .../backends/pytorch/pytorch_backend_test.py | 14 ++++++------ tensornetwork/backends/shell/shell_backend.py | 10 ++++----- .../backends/shell/shell_backend_test.py | 16 +++++++------- .../backends/tensorflow/tensorflow_backend.py | 6 ++--- .../tensorflow/tensorflow_backend_test.py | 14 ++++++------ tensornetwork/network_components.py | 22 ++++++++++--------- tensornetwork/network_operations.py | 4 ++-- 13 files changed, 68 insertions(+), 66 deletions(-) diff --git a/tensornetwork/backends/base_backend.py b/tensornetwork/backends/base_backend.py index 1d9d249b7..0368e6d6b 100644 --- a/tensornetwork/backends/base_backend.py +++ b/tensornetwork/backends/base_backend.py @@ -135,12 +135,12 @@ def rq_decomposition( raise NotImplementedError( "Backend '{}' has not implemented rq_decomposition.".format(self.name)) - def concat(self, values: Sequence[Tensor], axis) -> Tensor: + def shape_concat(self, values: Sequence[Tensor], axis) -> Tensor: """Concatenate a sequence of tensors together about the given axis.""" raise NotImplementedError("Backend '{}' has not implemented concat.".format( self.name)) - def shape(self, tensor: Tensor) -> Tensor: + def shape_tensor(self, tensor: Tensor) -> Tensor: """Get the shape of a tensor. Args: @@ -163,7 +163,7 @@ def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]: raise NotImplementedError( "Backend '{}' has not implemented shape_tuple.".format(self.name)) - def prod(self, values: Tensor) -> Tensor: + def shape_prod(self, values: Tensor) -> Tensor: """Take the product of all of the elements in values""" raise NotImplementedError("Backend '{}' has not implemented prod.".format( self.name)) diff --git a/tensornetwork/backends/jax/jax_backend.py b/tensornetwork/backends/jax/jax_backend.py index 9773f6026..a3064912f 100644 --- a/tensornetwork/backends/jax/jax_backend.py +++ b/tensornetwork/backends/jax/jax_backend.py @@ -39,7 +39,7 @@ def convert_to_tensor(self, tensor: Tensor) -> Tensor: result = self.jax.jit(lambda x: x)(tensor) return result - def concat(self, values: Tensor, axis: int) -> Tensor: + def shape_concat(self, values: Tensor, axis: int) -> Tensor: return np.concatenate(values, axis) def randn(self, diff --git a/tensornetwork/backends/jax/jax_backend_test.py b/tensornetwork/backends/jax/jax_backend_test.py index 08b21059d..330dbe224 100644 --- a/tensornetwork/backends/jax/jax_backend_test.py +++ b/tensornetwork/backends/jax/jax_backend_test.py @@ -35,20 +35,20 @@ def test_transpose(): np.testing.assert_allclose(expected, actual) -def test_concat(): +def test_shape_concat(): backend = jax_backend.JaxBackend() a = backend.convert_to_tensor(2 * np.ones((1, 3, 1))) b = backend.convert_to_tensor(np.ones((1, 2, 1))) - expected = backend.concat((a, b), axis=1) + expected = backend.shape_concat((a, b), axis=1) actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]]) np.testing.assert_allclose(expected, actual) -def test_shape(): +def test_shape_tensor(): backend = jax_backend.JaxBackend() a = backend.convert_to_tensor(np.ones([2, 3, 4])) - assert isinstance(backend.shape(a), tuple) - actual = backend.shape(a) + assert isinstance(backend.shape_tensor(a), tuple) + actual = backend.shape_tensor(a) expected = np.array([2, 3, 4]) np.testing.assert_allclose(expected, actual) @@ -60,10 +60,10 @@ def test_shape_tuple(): assert actual == (2, 3, 4) -def test_prod(): +def test_shape_prod(): backend = jax_backend.JaxBackend() a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4])) - actual = np.array(backend.prod(a)) + actual = np.array(backend.shape_prod(a)) assert actual == 2**24 diff --git a/tensornetwork/backends/numpy/numpy_backend.py b/tensornetwork/backends/numpy/numpy_backend.py index e7c4e8ecd..41a0061c7 100644 --- a/tensornetwork/backends/numpy/numpy_backend.py +++ b/tensornetwork/backends/numpy/numpy_backend.py @@ -61,16 +61,16 @@ def rq_decomposition( ) -> Tuple[Tensor, Tensor]: return decompositions.rq_decomposition(self.np, tensor, split_axis) - def concat(self, values: Tensor, axis: int) -> Tensor: + def shape_concat(self, values: Tensor, axis: int) -> Tensor: return self.np.concatenate(values, axis) - def shape(self, tensor: Tensor) -> Tensor: + def shape_tensor(self, tensor: Tensor) -> Tensor: return tensor.shape def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]: return tensor.shape - def prod(self, values: Tensor) -> Tensor: + def shape_prod(self, values: Tensor) -> Tensor: return self.np.prod(values) def sqrt(self, tensor: Tensor) -> Tensor: diff --git a/tensornetwork/backends/numpy/numpy_backend_test.py b/tensornetwork/backends/numpy/numpy_backend_test.py index 49645d876..e8688ce62 100644 --- a/tensornetwork/backends/numpy/numpy_backend_test.py +++ b/tensornetwork/backends/numpy/numpy_backend_test.py @@ -33,20 +33,20 @@ def test_transpose(): np.testing.assert_allclose(expected, actual) -def test_concat(): +def test_shape_concat(): backend = numpy_backend.NumPyBackend() a = backend.convert_to_tensor(2 * np.ones((1, 3, 1))) b = backend.convert_to_tensor(np.ones((1, 2, 1))) - expected = backend.concat((a, b), axis=1) + expected = backend.shape_concat((a, b), axis=1) actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]]) np.testing.assert_allclose(expected, actual) -def test_shape(): +def test_shape_tensor(): backend = numpy_backend.NumPyBackend() a = backend.convert_to_tensor(np.ones([2, 3, 4])) - assert isinstance(backend.shape(a), tuple) - actual = backend.shape(a) + assert isinstance(backend.shape_tensor(a), tuple) + actual = backend.shape_tensor(a) expected = np.array([2, 3, 4]) np.testing.assert_allclose(expected, actual) @@ -58,10 +58,10 @@ def test_shape_tuple(): assert actual == (2, 3, 4) -def test_prod(): +def test_shape_prod(): backend = numpy_backend.NumPyBackend() a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4])) - actual = np.array(backend.prod(a)) + actual = np.array(backend.shape_prod(a)) assert actual == 2**24 diff --git a/tensornetwork/backends/pytorch/pytorch_backend.py b/tensornetwork/backends/pytorch/pytorch_backend.py index 0caba598a..b8e9a1ed7 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend.py +++ b/tensornetwork/backends/pytorch/pytorch_backend.py @@ -69,16 +69,16 @@ def rq_decomposition( ) -> Tuple[Tensor, Tensor]: return decompositions.rq_decomposition(self.torch, tensor, split_axis) - def concat(self, values: Tensor, axis: int) -> Tensor: + def shape_concat(self, values: Tensor, axis: int) -> Tensor: return np.concatenate(values, axis) - def shape(self, tensor: Tensor) -> Tensor: + def shape_tensor(self, tensor: Tensor) -> Tensor: return self.torch.tensor(list(tensor.shape)) def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]: return tuple(tensor.shape) - def prod(self, values: Tensor) -> int: + def shape_prod(self, values: Tensor) -> int: return np.prod(np.array(values)) def sqrt(self, tensor: Tensor) -> Tensor: diff --git a/tensornetwork/backends/pytorch/pytorch_backend_test.py b/tensornetwork/backends/pytorch/pytorch_backend_test.py index ca0cd92f3..e55d71b4b 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend_test.py +++ b/tensornetwork/backends/pytorch/pytorch_backend_test.py @@ -34,20 +34,20 @@ def test_transpose(): np.testing.assert_allclose(expected, actual) -def test_concat(): +def test_shape_concat(): backend = pytorch_backend.PyTorchBackend() a = backend.convert_to_tensor(2 * np.ones((1, 3, 1))) b = backend.convert_to_tensor(np.ones((1, 2, 1))) - expected = backend.concat((a, b), axis=1) + expected = backend.shape_concat((a, b), axis=1) actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]]) np.testing.assert_allclose(expected, actual) -def test_shape(): +def test_shape_tensor(): backend = pytorch_backend.PyTorchBackend() a = backend.convert_to_tensor(np.ones([2, 3, 4])) - assert isinstance(backend.shape(a), torch.Tensor) - actual = backend.shape(a) + assert isinstance(backend.shape_tensor(a), torch.Tensor) + actual = backend.shape_tensor(a) expected = np.array([2, 3, 4]) np.testing.assert_allclose(expected, actual) @@ -59,10 +59,10 @@ def test_shape_tuple(): assert actual == (2, 3, 4) -def test_prod(): +def test_shape_prod(): backend = pytorch_backend.PyTorchBackend() a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4])) - actual = np.array(backend.prod(a)) + actual = np.array(backend.shape_prod(a)) assert actual == 2**24 diff --git a/tensornetwork/backends/shell/shell_backend.py b/tensornetwork/backends/shell/shell_backend.py index 3365fae5e..33b30a99c 100644 --- a/tensornetwork/backends/shell/shell_backend.py +++ b/tensornetwork/backends/shell/shell_backend.py @@ -107,7 +107,7 @@ def rq_decomposition(self, tensor: Tensor, r = ShellTensor((center_dim,) + right_dims) return q, r - def concat(self, values: Sequence[Tensor], axis: int) -> Tensor: + def shape_concat(self, values: Sequence[Tensor], axis: int) -> Tensor: shape = values[0].shape if axis < 0: axis += len(shape) @@ -119,20 +119,20 @@ def concat_shape(self, values) -> Sequence: tuple_values = (tuple(v) for v in values) return functools.reduce(operator.concat, tuple_values) - def shape(self, tensor: Tensor) -> Tuple: + def shape_tensor(self, tensor: Tensor) -> Tuple: return tensor.shape def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]: return tensor.shape - def prod(self, values: Tensor) -> int: + def shape_prod(self, values: Tensor) -> int: # This is different from the BaseBackend prod! # prod calculates the product of tensor elements and cannot implemented # for shell tensors # This returns the product of sizes instead - return self.shape_prod(values.shape) + return self.shape_product(values.shape) - def shape_prod(self, shape: Sequence[int]) -> int: + def shape_product(self, shape: Sequence[int]) -> int: return functools.reduce(operator.mul, shape) def sqrt(self, tensor: Tensor) -> Tensor: diff --git a/tensornetwork/backends/shell/shell_backend_test.py b/tensornetwork/backends/shell/shell_backend_test.py index 3974dc1f7..af17c3354 100644 --- a/tensornetwork/backends/shell/shell_backend_test.py +++ b/tensornetwork/backends/shell/shell_backend_test.py @@ -62,16 +62,16 @@ def test_svd_decomposition_with_max_values(): assert x.shape == y.shape -def test_concat(): +def test_shape_concat(): args = { "values": [np.ones([3, 2, 5]), np.zeros([3, 2, 5]), np.ones([3, 3, 5])] } args["axis"] = 1 - assertBackendsAgree("concat", args) + assertBackendsAgree("shape_concat", args) args["axis"] = -2 - assertBackendsAgree("concat", args) + assertBackendsAgree("shape_concat", args) def test_concat_shape(): @@ -80,10 +80,10 @@ def test_concat_shape(): assert result == (5, 2, 3, 4, 6) -def test_shape(): +def test_shape_tensor(): tensor = np.ones([3, 5, 2]) - np_result = numpy_backend.NumPyBackend().shape(tensor) - sh_result = shell_backend.ShellBackend().shape(tensor) + np_result = numpy_backend.NumPyBackend().shape_tensor(tensor) + sh_result = shell_backend.ShellBackend().shape_tensor(tensor) assert np_result == sh_result @@ -94,8 +94,8 @@ def test_shape_tuple(): assert np_result == sh_result -def test_prod(): - result = shell_backend.ShellBackend().prod(np.ones([3, 5, 2])) +def test_shape_prod(): + result = shell_backend.ShellBackend().shape_prod(np.ones([3, 5, 2])) assert result == 30 diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend.py b/tensornetwork/backends/tensorflow/tensorflow_backend.py index 5f7cd1201..c87d464c5 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend.py @@ -64,16 +64,16 @@ def rq_decomposition(self, tensor: Tensor, split_axis: int) -> Tuple[Tensor, Tensor]: return decompositions.rq_decomposition(self.tf, tensor, split_axis) - def concat(self, values: Tensor, axis: int) -> Tensor: + def shape_concat(self, values: Tensor, axis: int) -> Tensor: return self.tf.concat(values, axis) - def shape(self, tensor: Tensor) -> Tensor: + def shape_tensor(self, tensor: Tensor) -> Tensor: return self.tf.shape(tensor) def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]: return tuple(tensor.shape.as_list()) - def prod(self, values: Tensor) -> Tensor: + def shape_prod(self, values: Tensor) -> Tensor: return self.tf.reduce_prod(values) def sqrt(self, tensor: Tensor) -> Tensor: diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py index 25110d66c..838771598 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py @@ -34,20 +34,20 @@ def test_transpose(): np.testing.assert_allclose(expected, actual) -def test_concat(): +def test_shape_concat(): backend = tensorflow_backend.TensorFlowBackend() a = backend.convert_to_tensor(2 * np.ones((1, 3, 1))) b = backend.convert_to_tensor(np.ones((1, 2, 1))) - expected = backend.concat((a, b), axis=1) + expected = backend.shape_concat((a, b), axis=1) actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]]) np.testing.assert_allclose(expected, actual) -def test_shape(): +def test_shape_tensor(): backend = tensorflow_backend.TensorFlowBackend() a = backend.convert_to_tensor(np.ones([2, 3, 4])) - assert isinstance(backend.shape(a), type(a)) - actual = backend.shape(a) + assert isinstance(backend.shape_tensor(a), type(a)) + actual = backend.shape_tensor(a) expected = np.array([2, 3, 4]) np.testing.assert_allclose(expected, actual) @@ -59,10 +59,10 @@ def test_shape_tuple(): assert actual == (2, 3, 4) -def test_prod(): +def test_shape_prod(): backend = tensorflow_backend.TensorFlowBackend() a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4])) - actual = np.array(backend.prod(a)) + actual = np.array(backend.shape_prod(a)) assert actual == 2**24 diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index b6569872f..1f6de8917 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -1196,10 +1196,12 @@ def _flatten_trace_edges(edges: List[Edge], perm_front = set(range(len(node.edges))) - set(perm_back) perm_front = sorted(perm_front) perm = perm_front + perm_back - new_dim = backend.prod([backend.shape(node.tensor)[e.axis1] for e in edges]) + new_dim = backend.shape_prod( + [backend.shape_tensor(node.tensor)[e.axis1] for e in edges]) node.reorder_axes(perm) - unaffected_shape = backend.shape(node.tensor)[:len(perm_front)] - new_shape = backend.concat([unaffected_shape, [new_dim, new_dim]], axis=-1) + unaffected_shape = backend.shape_tensor(node.tensor)[:len(perm_front)] + new_shape = backend.shape_concat( + [unaffected_shape, [new_dim, new_dim]], axis=-1) node.tensor = backend.reshape(node.tensor, new_shape) edge1 = Edge(node1=node, axis1=len(perm_front), name="TraceFront") edge2 = Edge(node1=node, axis1=len(perm_front) + 1, name="TraceBack") @@ -1271,11 +1273,11 @@ def flatten_edges(edges: List[Edge], perm_back.append(node.edges.index(edge)) perm_front = sorted(set(range(len(node.edges))) - set(perm_back)) node.reorder_axes(perm_front + perm_back) - old_tensor_shape = backend.shape(node.tensor) + old_tensor_shape = backend.shape_tensor(node.tensor) # Calculate the new axis dimension as a product of the other # axes dimensions. - flattened_axis_dim = backend.prod(old_tensor_shape[len(perm_front):]) - new_tensor_shape = backend.concat( + flattened_axis_dim = backend.shape_prod(old_tensor_shape[len(perm_front):]) + new_tensor_shape = backend.shape_concat( [old_tensor_shape[:len(perm_front)], [flattened_axis_dim]], axis=-1) new_tensor = backend.reshape(node.tensor, new_tensor_shape) # Modify the node in place. Currently, this is they only method that @@ -1363,8 +1365,8 @@ def _split_trace_edge( perm_front = set(range(len(node.edges))) - set(perm_back) perm_front = sorted(perm_front) node.reorder_axes(perm_front + perm_back) - unaffected_shape = backend.shape(node.tensor)[:len(perm_front)] - new_shape = backend.concat([unaffected_shape, shape, shape], axis=-1) + unaffected_shape = backend.shape_tensor(node.tensor)[:len(perm_front)] + new_shape = backend.shape_concat([unaffected_shape, shape, shape], axis=-1) node.tensor = backend.reshape(node.tensor, new_shape) # Trim edges and add placeholder edges for new axes. node.edges = node.edges[:len(perm_front)] + 2 * len(shape) * [None] @@ -1438,8 +1440,8 @@ def split_edge(edge: Edge, perm_front = set(range(len(node.edges))) - set(perm_back) perm_front = sorted(perm_front) node.reorder_axes(perm_front + perm_back) - unaffected_shape = backend.shape(node.tensor)[:len(perm_front)] - new_shape = backend.concat([unaffected_shape, shape], axis=-1) + unaffected_shape = backend.shape_tensor(node.tensor)[:len(perm_front)] + new_shape = backend.shape_concat([unaffected_shape, shape], axis=-1) node.tensor = backend.reshape(node.tensor, new_shape) # in-place update # Trim edges. node.edges = node.edges[:len(perm_front)] diff --git a/tensornetwork/network_operations.py b/tensornetwork/network_operations.py index fa718a430..5aea214db 100644 --- a/tensornetwork/network_operations.py +++ b/tensornetwork/network_operations.py @@ -295,8 +295,8 @@ def split_node( # the first axis of vh. If we don't, it's possible one of the other axes of # vh will be the same size as sqrt_s and would multiply across that axis # instead, which is bad. - sqrt_s_broadcast_shape = backend.concat( - [backend.shape(sqrt_s), [1] * (len(vh.shape) - 1)], axis=-1) + sqrt_s_broadcast_shape = backend.shape_concat( + [backend.shape_tensor(sqrt_s), [1] * (len(vh.shape) - 1)], axis=-1) vh_s = vh * backend.reshape(sqrt_s, sqrt_s_broadcast_shape) left_node = Node( u_s, name=left_name, axis_names=left_axis_names, backend=backend) From d8772bb8833b761265f1b56430d33f161b8bd315 Mon Sep 17 00:00:00 2001 From: mganahl Date: Tue, 21 Jan 2020 15:04:23 -0500 Subject: [PATCH 173/212] fixed final_order passing for tensordot --- tensornetwork/block_tensor/block_tensor.py | 124 ++++++++++++--------- 1 file changed, 69 insertions(+), 55 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 4b8847d59..36de10f83 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -1217,7 +1217,9 @@ def transpose( #check for trivial permutation if np.all(order == np.arange(len(order))): return self - _, tr_data, tr_partition = _compute_transposition_data(self, order) + #TODO: flatten_meta_data is called within _compute_transposition_data + #as well. reuse it. + _, tr_data, tr_partition = _compute_transposition_data(self.indices, order) flat_charges, flat_flows, _, flat_order = flatten_meta_data( self.indices, order) @@ -1527,12 +1529,6 @@ def tensordot( new_order1 = free_axes1 + list(axes1) new_order2 = list(axes2) + free_axes2 #t1 = time.time() - charges1, tr_data_1, tr_partition1 = _compute_transposition_data( - tensor1, new_order1, len(free_axes1)) - charges2, tr_data_2, tr_partition2 = _compute_transposition_data( - tensor2, new_order2, len(axes2)) - #print('compute transposition data', time.time() - t1) - common_charges = charges1.intersect(charges2) #get the flattened indices for the output tensor left_indices = [] @@ -1542,8 +1538,6 @@ def tensordot( for n in free_axes2: right_indices.extend(tensor2.indices[n].get_elementary_indices()) indices = left_indices + right_indices - if final_order is not None: - indices = [indices[n] for n in final_order] for n, i in enumerate(indices): i.name = 'index_{}'.format(n) if i.name is None else i.name @@ -1554,33 +1548,66 @@ def tensordot( for n, i in enumerate(indices): i.name = 'index_{}'.format(n) + charges1, tr_data_1, tr_partition1 = _compute_transposition_data( + tensor1.indices, new_order1, len(free_axes1)) + charges2, tr_data_2, tr_partition2 = _compute_transposition_data( + tensor2.indices, new_order2, len(axes2)) + + common_charges = charges1.intersect(charges2) + #initialize the data-vector of the output with zeros; - #Note that empty is not a viable choice here. - #ts = [] - #t1 = time.time() - #Note: `cs` may contain charges that are not present in `common_charges` - cs, sparse_blocks = _find_diagonal_sparse_blocks( - [], [i.charges for i in left_indices], [i.charges for i in right_indices], - [i.flow for i in left_indices], [i.flow for i in right_indices], - return_data=False) - #print('finding sparse positions', time.time() - t1) - num_nonzero_elements = np.sum([len(v[0]) for v in sparse_blocks]) - data = np.zeros( - num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) - for n in range(len(common_charges)): - c = common_charges.get_item(n) - permutation1 = tr_data_1[c] - permutation2 = tr_data_2[c] - sparse_block = sparse_blocks[np.nonzero(cs == c)[0][0]] - b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) - b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) - res = np.matmul(b1, b2) - data[sparse_block[0]] = res.flat - #print('tensordot', time.time() - t1) - return BlockSparseTensor(data=data, indices=indices) + if final_order is not None: + #in this case we view the result of the diagonal multiplication + #as a transposition of the final tensor + final_indices = [indices[n] for n in final_order] + _, reverse_order = np.unique(final_order, return_index=True) + + charges_final, tr_data_final, tr_partition_final = _compute_transposition_data( + final_indices, reverse_order, len(free_axes1)) + num_nonzero_elements = np.sum([len(t[0]) for t in tr_data_final.values()]) + data = np.zeros( + num_nonzero_elements, + dtype=np.result_type(tensor1.dtype, tensor2.dtype)) + + for n in range(len(common_charges)): + c = common_charges.get_item(n) + permutation1 = tr_data_1[c] + permutation2 = tr_data_2[c] + permutationfinal = tr_data_final[c] + b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) + b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) + res = np.matmul(b1, b2) + data[permutationfinal[0]] = res.flat + return BlockSparseTensor(data=data, indices=final_indices) + else: + #Note: `cs` may contain charges that are not present in `common_charges` + cs, sparse_blocks = _find_diagonal_sparse_blocks( + [], [i.charges for i in left_indices], + [i.charges for i in right_indices], [i.flow for i in left_indices], + [i.flow for i in right_indices], + return_data=False) + #print('finding sparse positions', time.time() - t1) + num_nonzero_elements = np.sum([len(v[0]) for v in sparse_blocks]) + #Note that empty is not a viable choice here. + data = np.zeros( + num_nonzero_elements, + dtype=np.result_type(tensor1.dtype, tensor2.dtype)) + for n in range(len(common_charges)): + c = common_charges.get_item(n) + permutation1 = tr_data_1[c] + permutation2 = tr_data_2[c] + sparse_block = sparse_blocks[np.nonzero(cs == c)[0][0]] + b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) + b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) + res = np.matmul(b1, b2) + data[sparse_block[0]] = res.flat + #print('tensordot', time.time() - t1) + return BlockSparseTensor(data=data, indices=indices) def flatten_meta_data(indices, order): + for n, i in enumerate(indices): + i.name = 'index_{}'.format(n) elementary_indices = {} flat_elementary_indices = [] for n in range(len(indices)): @@ -1596,43 +1623,39 @@ def flatten_meta_data(indices, order): flat_strides = _get_strides(flat_dims) flat_order = np.concatenate( [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + return flat_charges, flat_flows, flat_strides, flat_order def _compute_transposition_data( - tensor: BlockSparseTensor, + indices: BlockSparseTensor, order: Union[List[int], np.ndarray], transposed_partition: Optional[int] = None ) -> Tuple[Union[BaseCharge, ChargeCollection], Dict, int]: """ Args: - tensor: A symmetric tensor. + indices: A symmetric tensor. order: The new order of indices. permutation: An np.ndarray of int for reshuffling the data, typically the output of a prior call to `transpose`. Passing `permutation` can greatly speed up the transposition. return_permutation: If `True`, return the the permutation data. Returns: - BlockSparseTensor: The transposed tensor. + """ - if len(order) != tensor.rank: + if len(order) != len(indices): raise ValueError( - "`len(order)={}` is different form `tensor.rank={}`".format( - len(order), tensor.rank)) + "`len(order)={}` is different form `len(indices)={}`".format( + len(order), len(indices))) #we use flat meta data because it is #more efficient to get the fused charges using #the best partition flat_charges, flat_flows, flat_strides, flat_order = flatten_meta_data( - tensor.indices, order) - #t0 = time.time() + indices, order) partition = _find_best_partition( flat_charges, flat_flows, return_charges=False) - # ts = [] - # t1 = time.time() - # ts.append(t1 - t0) - # print('in _compute_transposition_data: finding best partition', ts[-1]) if transposed_partition is None: transposed_partition = _find_best_partition( [flat_charges[n] for n in flat_order], @@ -1641,9 +1664,6 @@ def _compute_transposition_data( row_lookup, column_lookup = _compute_sparse_lookups( flat_charges[0:partition], flat_flows[0:partition], flat_charges[partition:], flat_flows[partition:]) - # t2 = time.time() - # ts.append(t2 - t1) - # print('in _compute_transposition_data: computing lookup tables', ts[-1]) cs, dense_blocks = _find_diagonal_dense_blocks( [flat_charges[n] for n in flat_order[0:transposed_partition]], [flat_charges[n] for n in flat_order[transposed_partition:]], @@ -1651,9 +1671,7 @@ def _compute_transposition_data( [flat_flows[n] for n in flat_order[transposed_partition:]], row_strides=flat_strides[flat_order[0:transposed_partition]], column_strides=flat_strides[flat_order[transposed_partition:]]) - # t3 = time.time() - # ts.append(t3 - t2) - # print('in _compute_transposition_data: finding dense blocks', ts[-1]) + column_dim = np.prod( [len(flat_charges[n]) for n in range(partition, len(flat_charges))]) transposed_positions = {} @@ -1661,12 +1679,8 @@ def _compute_transposition_data( for n in range(len(dense_blocks)): b = dense_blocks[n] rinds, cinds = np.divmod(b[0], column_dim) - start_pos = row_lookup[rinds] transposed_positions[cs.get_item(n)] = [ row_lookup[rinds] + column_lookup[cinds], b[1] ] - # t4 = time.time() - # ts.append(t4 - t3) - - # print('in _compute_transposition_data: computing the new positions', ts[-1]) + #return row_lookup, column_lookup, cs, dense_blocks return cs, transposed_positions, transposed_partition From e213de9ceae4a4003469cb65f78879bcb9e02514 Mon Sep 17 00:00:00 2001 From: Olga Okrut <46659064+olgOk@users.noreply.github.com> Date: Tue, 21 Jan 2020 14:41:17 -0800 Subject: [PATCH 174/212] Added SAT Tutorial (#438) * Add files via upload Added SAT Tutorials * Update SATTutorial.ipynb * Update SATTutorial.ipynb * Update SATTutorial.ipynb * Update SATTutorial.ipynb * License changed * Created using Colaboratory Co-authored-by: Chase Roberts --- examples/sat/SATTutorial.ipynb | 465 +++++++++++++++++++++++++++++++++ 1 file changed, 465 insertions(+) create mode 100644 examples/sat/SATTutorial.ipynb diff --git a/examples/sat/SATTutorial.ipynb b/examples/sat/SATTutorial.ipynb new file mode 100644 index 000000000..2b6dc9bd9 --- /dev/null +++ b/examples/sat/SATTutorial.ipynb @@ -0,0 +1,465 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "SATTutorial.ipynb", + "provenance": [], + "collapsed_sections": [], + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iJCTaVM1JRVw", + "colab_type": "text" + }, + "source": [ + "# SAT Problem with TensorNetwork\n", + "by Volha Okrut\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9tNIRZfiKn7Z", + "colab_type": "text" + }, + "source": [ + "## Boolean Logic\n", + "\n", + "Suppose we have a simple [CFN expression](https://en.wikipedia.org/wiki/Conjunctive_normal_form), a logical expression based on logical AND (called conjunction) and logical OR (disjunctions). Strictly defined, CFN expression is a conjunction (AND) of several disjunctions (OR) of logical literals (*Xi*).\n", + "\n", + "Let me come up with the following example of CFN expression:\n", + "\n", + "(True AND False) OR (NOT True AND True)\n", + "\n", + "Now let's simplify it:\n", + "\n", + "False OR False\n", + "\n", + "And at the end we get:\n", + "\n", + "False\n", + "\n", + "That's simple!\n", + "\n", + "Now, instead of logical AND I will use ∨ notation, instead of logical OR - ∧. Additionally, if I want to say NOT True I use ¬True. This is just a formality, and yet it allows us to write these expressions in a more clearer and readable form.\n", + "\n", + "Of course, the concept of boolean expressions would be pretty useless if always you had to start with the same positions of True and False. So let me introduce variables (known as literals) into the formula - this allows me to have the same expression evaluating to different end-results.\n", + "Now instead of our initial formula we have:\n", + "\n", + "(X1 ∨ X2) ∧ (¬X1 ∨ X3)\n", + "\n", + "You can notice that if we place *X1* to be True, *X2* to be False, and *X3* to be True, we would have the same expression as in the begining of the article. If we assign the variables different values, then we of course will get different result." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Q8CjHN1bWo05", + "colab_type": "text" + }, + "source": [ + "## SAT Problem and real-life example\n", + "\n", + "So what is a SAT Problem? SAT Problem - short from *SATISFABILITY* problem - concerns with the number of ways in which you can arrange the given literals in order for the whole expression to be evaluated to True. \n", + "\n", + "Let's start by jumping in with an example of a SAT problem. Suppose that you need to go grocery shopping, and need to visit three stores: Costco, Home Depot, and Walmart. Costco is open in the morning and evening, Home Depot is open in the evening only, and Walmart is open in the morning only. You can only be in one place at a time, and shopping at a given store takes up the entire morning or evening. Can you go to all three stores in a day?\n", + "To a human, it is intuitively obvious that the answer is no. Since Home Depot and Walmart offer us only one time option (evening and morning, respectively), then we have to go there at those times. However, this leaves no time for a Costco trip, so it's evident that this \"puzzle\" has no solution.\n", + "Now suppose instead of three stores, you were given three thousand (each with its own schedule), and instead of two times, you were given all the hours of a day? At this point, the problem becomes intractable for a human. Luckily, though, cruching numbers and analyzing thousands of different options are what computers excel at.\n", + "\n", + "So, how could we encode the above problem in a way that a computer could understand?\n", + "\n", + "One solution would be to re-write the problem involving boolean variables, which can either be true or false. For example, using the example of three stores and two times, let's make six variables:\n", + "\n", + "* *Ce*: Whether we go to Costco in the evening.\n", + "* *Cm*: Whether we go to Costco in the morning.\n", + "* *He*: Whether we go to Home Depot in the evening.\n", + "* *Hm*: Whether we go to Home Depot in the morning.\n", + "* *We*: Whether we go to Walmart in the evening.\n", + "* *Wm*: Whether we go to Walmart in the morning.\n", + "\n", + "Each of these variables if true (or 1) if we visit the store at the corresponding time, and false (or 0) otherwise. Next, we form some constraints on these variables, and express them in a unified form we could feed to a computer.\n", + "\n", + "First, we know that we can only be in one place at a given time. For example, if we are at Costco in the morning (that is, Cm=1\n", + "), then we cannot be at Home Depot or Walmart in the morning (and thus Hm=Wm=0). Using notation introduced above we can express that constrains as:\n", + "\n", + "*Cm ∨ ¬Hm ∨ ¬Wm*\n", + "\n", + "Of course, we know that at a given time, we could go to Costco, Home Depot, or Walmart, so Cm\n", + "doesn't have to be true. Thus, the constraint that we only go to one place in the evening can be represented as:\n", + "\n", + "*( Ce ∧ ¬He ∧ ¬We ) ∨ ( ¬Ce ∧ He ∧ ¬We ) ∨ ( ¬Ce ∧ ¬He ∧ We )*\n", + "\n", + "Similarly, the constraint that we only go to one place in the morning is:\n", + "\n", + "*( Cm ∧ ¬Hm ∧ ¬Wm ) ∨ ( ¬Cm ∧ Hm ∧ ¬Wm ) ∨ ( ¬Cm ∧ ¬Hm ∧ Wm )*\n", + "\n", + "Next, we need a constraint that we go to Costco in either the morning or evening, which we can represent as *Cm ∨ Ce*: either we go to Costco in the morning, or in the evening. We have similar constraints for Walmart and Home Depot, yielding the following constraint to represent that we must go to each store:\n", + "\n", + "*( Cm ∨ Ce) ∧ ( Hm ∨ He ) ∧ ( Wm ∨ We )*\n", + "\n", + "Thus, the full set of constraints for our problem is\n", + "\n", + "( Cm ∨ Ce) ∧ ( Hm ∨ He ) ∧ ( Wm ∨ We ) ∧ ( Cm ∧ ¬Hm ∧ ¬Wm ) ∨ ( ¬Cm ∧ Hm ∧ ¬Wm ) ∨ ( ¬Cm ∧ ¬Hm ∧ Wm ) ∧ ( Ce ∧ ¬He ∧ ¬We ) ∨ ( ¬Ce ∧ He ∧ ¬We ) ∨ ( ¬Ce ∧ ¬He ∧ We )\n", + "\n", + "\n", + "To find out whether we can complete our shopping trip, we must find a set of true or false values for all our boolean variables such that the constraints are satisfied. This type of problem is known as the boolean satisfiability problem, often abbreviated to just \"SAT\". A program that finds solutions to these problems is known as a SAT solver.\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4Itv5A30Stxv", + "colab_type": "text" + }, + "source": [ + "## SAT Problem\n", + "\n", + "SAT problem has been viewed from many different ways, in this tutorial we will learn how to solve this problem using tensors and TensorNetwork library. To be comfartable with tensors you have to know some basics about *Penrose’s Graphical Notation*. Check this nice article on [Medium](https://medium.com/analytics-vidhya/penroses-graphical-notation-fe4c2f24cf3b) that covers this topic extensively.\n", + "\n", + "Suppose we are given four variables: X1, X2, X3, X4. We want to find truth values to all four Xi literals so that the CNF expression is true:\n", + "\n", + "( ¬X1 ∨ ¬X3 ∨ ¬X4 ) ∧ ( X2 ∨ X3 ∨ ¬X4 ) ∧ ( X1 ∨ ¬X2 ∨ X4 ) ∧ ( X1 ∨ X3 ∨ X4 ) ∧ ( ¬X1 ∨ X2 ∨ ¬X3 )\n", + "\n", + "First, we need to define how we encode our input CNF expressions that we want to satisfy:\n", + "\n", + "* Each logical literal is represented as either a positive or negative integer, where i and -i correpond to the logical literals xi and ¬xi, respectively.\n", + "* Each clause in the expression, i.e., disjunction of literals, is represented as a tuple of such encoding of literals, e.g., (-1, 2, -3) represents the disjunction ( ¬x1 ∨ x2 ∨ ¬x3 ).\n", + "* The entire conjunctive expression is a list of such tuples, e.g., the expression above would have encoding:\n", + "[(-1, -3, -4), (2, 3, -4), (1, -2, 4), (1, 3, 4), (-1, 2, -3)]\n", + "\n", + "It is worth to say that we can solve two problems here:\n", + "\n", + "\n", + "1. Find the exact number of all possible solutions to the given SAT problem if these solutions exist.\n", + "2. Find all possible solutions to a given SAT problem if these solutions exist.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "choohffKZatl", + "colab_type": "text" + }, + "source": [ + "## SAT solver using tensors and TensorNetwork\n", + "\n", + "### Finding all possible solutions to the given SAT \n", + "\n", + "First, we create a function\n", + "```\n", + "# sat_tn(clauses)\n", + "```\n", + "which solves the given 3SAT problem. \n", + "\n", + "We find the maximum indexed logical variable we have, and use that as our count of the number of logical variables. We iterate through each disjunction and calculate absolute value for each variable in the conjunction. The number of logical variables is the maximum element in *var_set* which is then stored in *num_vars*:\n", + "```\n", + "var_set = set()\n", + "for clause in clauses:\n", + " var_set |= {abs(x) for x in clause}\n", + "num_vars = max(var_set)\n", + "```\n", + "After iterating expression [(-1, -3, -4), (2, 3, -4), (1, -2, 4), (1, 3, 4), (-1, 2, -3)], \n", + "I should get the following result:\n", + "```\n", + "var_set final = {1, 3, 4, 2}\n", + "num_vars = 4\n", + "```\n", + "\n", + "Now, we will build the tensor network. Variable nodes (literals) will be represented as *num_vars* tensors with the shape(1,2) filled with ones:\n", + "```\n", + "node [1 1]\n", + "```\n", + "\n", + "This particular shape of nodes is needed for matrix multiplication. I will explain why we need it in just a moment. Since each of the variable nodes is a vector, each of them will have only one edge, which I will store as unconnected edges (dangling edges) in *var_edges*:\n", + "```\n", + "var_edges.append(new_node[0])\n", + "```\n", + "The second step is to create nodes for all clauses. For each clause we will create a tensor of third rank (a 3D matrix) with two fields in each dimenshion as we want as many fields as there are possible solutions (variation of initial literals) to this clause. Each logical variable *Xi* has two posible literals: itself (*Xi*), and its negation (*¬Xi*). Thus for each clause we have *2^3 = 8* solutions and each soltution can be accessed using the coditions of the variables (solution to clause (X1, X2, X3) with X1 = 1, X2 = 0 and X3 = 1 will be found under clause_tensor[1, 0, 1] field and will be 1 (True)).\n", + "The formula (-np.sign(x) + 1) // 2 gives us 0 or 1 depending on the sign of the variable (its negation).\n", + "\n", + "```\n", + "for clause in clauses:\n", + " a, b, c, = clause\n", + " clause_tensor = np.ones((2, 2, 2), dtype=np.int32)\n", + " clause_tensor[(-np.sign(a) + 1) // 2, (-np.sign(b) + 1) // 2,\n", + " (-np.sign(c) + 1) // 2] = 0\n", + " clause_node = tn.Node(clause_tensor)\n", + "```\n", + "\n", + "Now, with everything prepare, I can explain you why tensors are such an elegant solution to this problen. As initially we have several expressions that contain only OR operators unified under AND operator, it might be useful to view those operators as logical summation and multiplication respectively. In other words, for logical operator OR, it doesn't matter how many Falses you have - it takes only one True to bring the expression to True (same as summation). On contrary, while you might have all but one Trues in your expression, operator AND will evaluate it to False if there was at least one constituent set to False (same as multiplication).\n", + "\n", + "The same idea will be applied to tensores in the problem. We have constructed 3D matrices to clauses in such a way that they are filled in with 1 for all possible entries except one (think about it: when the clause consists only of logical OR (summation) it is false only with all of its constituents being evaluated to False). Now, if we are able to correctly multiply all the matricies with each other, all of the configuration that have at least one 0 in it will end up being 0 and the only ones left have all of the clauses being True - exactly what we need! The initial vectoes for literals had to be set in such a way in order for matrices to be reduced after being multiplied with them.\n", + "\n", + "Bear with me, the last step is to connect variable to the clause. Operator (^) is used as a shortcut for tn.connect(clause_node, tensor_node) - function for dot product between matrices introduced in Tensor Network library. The result is stored into the first variable.\n", + "\n", + "For now we just have our clause nodes and literal vectores in place. In order to connect them, for every edge of each clause (they all have three edges - by the numbers of literals in the clause) we will create a copy vectore with the same dimension (3D matrix, extending to two fields to each side). The zero numbered edge of the *copy_tensor_node* we will connect to the one of the edges of the clause matrix. The one numbered edge will be connected to one of the *var_edges* - so called dangling edges - edges that are not yet connected to any other edge. Finally, the last edge (numbered with two) will take the place of the edge from *var_edges* that was just paired up with the one numbered edge.\n", + "\n", + "```\n", + "for i, var in enumerate(clause):\n", + " copy_tensor_node = tn.CopyNode(3, 2)\n", + " clause_node[i] ^ copy_tensor_node[0]\n", + " var_edges[abs(var) - 1] ^ copy_tensor_node[1]\n", + " var_edges[abs(var) - 1] = copy_tensor_node[2]\n", + "```\n", + "\n", + "This process will be repeated until all the edges coming from clause tensores have been paired up with a copy tensor node. By the end of this, in *var_edges* we will have stored all the unconnected edges of this system of multiplied tensors. And that is exactly what will be returened from the function:\n", + "\n", + "```\n", + "return var_edges\n", + "```\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0ZWRMj1EkT5k", + "colab_type": "text" + }, + "source": [ + "Let gather all the said above in to one program and run with the given set of variables." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "OdbqdgkB3wXZ", + "colab_type": "code", + "outputId": "813405c4-21da-4781-e7e9-888a0b8fa6c5", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 119 + } + }, + "source": [ + "!pip3 install tensornetwork" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Requirement already satisfied: tensornetwork in /usr/local/lib/python3.6/dist-packages (0.2.0)\n", + "Requirement already satisfied: numpy>=1.16 in /usr/local/lib/python3.6/dist-packages (from tensornetwork) (1.17.5)\n", + "Requirement already satisfied: graphviz>=0.11.1 in /usr/local/lib/python3.6/dist-packages (from tensornetwork) (0.13.2)\n", + "Requirement already satisfied: opt-einsum>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from tensornetwork) (3.1.0)\n", + "Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.6/dist-packages (from tensornetwork) (2.10.0)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from h5py>=2.9.0->tensornetwork) (1.12.0)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "qbUls8WGQM6x", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import numpy as np\n", + "from typing import List, Tuple, Set\n", + "import tensornetwork as tn" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "L8IARMaFXwH0", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def sat_tn(clauses: List[Tuple[int, int, int]]\n", + " ) -> List[tn.Edge]:\n", + " \"\"\"Create a 3SAT TensorNetwork of the given 3SAT clauses.\n", + " After full contraction, this network will be a tensor of size (2, 2, ..., 2)\n", + " with the rank being the same as the number of variables. Each element of the\n", + " final tensor represents whether the given assignment satisfies all of the\n", + " clauses. For example, if final_node.get_tensor()[0][1][1] == 1, then the\n", + " assiment (False, True, True) satisfies all clauses.\n", + " Args:\n", + " clauses: A list of 3 int tuples. Each element in the tuple corresponds to a\n", + " variable in the clause. If that int is negative, that variable is negated\n", + " in the clause.\n", + " Returns:\n", + " net: The 3SAT TensorNetwork.\n", + " var_edges: The edges for the given variables.\n", + " Raises:\n", + " ValueError: If any of the clauses have a 0 in them.\n", + " \"\"\"\n", + " for clause in clauses:\n", + " if 0 in clause:\n", + " raise ValueError(\"0's are not allowed in the clauses.\")\n", + " var_set = set()\n", + " for clause in clauses:\n", + " var_set |= {abs(x) for x in clause}\n", + " num_vars = max(var_set)\n", + " var_nodes = []\n", + " var_edges = []\n", + "\n", + " # Prepare the variable nodes.\n", + " for _ in range(num_vars):\n", + " new_node = tn.Node(np.ones(2, dtype=np.int32))\n", + " var_nodes.append(new_node)\n", + " var_edges.append(new_node[0])\n", + "\n", + " # Create the nodes for each clause\n", + " for clause in clauses:\n", + " a, b, c, = clause\n", + " clause_tensor = np.ones((2, 2, 2), dtype=np.int32)\n", + " clause_tensor[(-np.sign(a) + 1) // 2, (-np.sign(b) + 1) // 2,\n", + " (-np.sign(c) + 1) // 2] = 0\n", + " clause_node = tn.Node(clause_tensor)\n", + "\n", + " # Connect the variable to the clause through a copy tensor.\n", + " for i, var in enumerate(clause):\n", + " copy_tensor_node = tn.CopyNode(3, 2)\n", + " clause_node[i] ^ copy_tensor_node[0]\n", + " var_edges[abs(var) - 1] ^ copy_tensor_node[1]\n", + " var_edges[abs(var) - 1] = copy_tensor_node[2]\n", + "\n", + " return var_edges" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AXpBEmktMfw6", + "colab_type": "text" + }, + "source": [ + "### Find the exact number of all possible solutions to the given SAT\n", + "\n", + "In order to find exact number of all possible solutions to the given SAT problem, we can do full contractions of the adges of the clauses. In other words, we have to calculate a trace of the tensor network we have build in the first part of the tutorial. \n", + "This is done by essentially creating the same tensor net and then connecting all the dangling edges of the first net to the dangling edges of the second." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "v6NvHpWd1AdI", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def sat_count_tn(clauses: List[Tuple[int, int, int]]):\n", + " \"\"\"Create a 3SAT Count TensorNetwork.\n", + " After full contraction, the final node will be the count of all possible\n", + " solutions to the given 3SAT problem.\n", + " Args:\n", + " clauses: A list of 3 int tuples. Each element in the tuple corresponds to a\n", + " variable in the clause. If that int is negative, that variable is negated\n", + " in the clause.\n", + " Returns:\n", + " nodes: The set of nodes\n", + " \"\"\"\n", + " var_edges1 = sat_tn(clauses)\n", + " var_edges2 = sat_tn(clauses)\n", + " for edge1, edge2 in zip(var_edges1, var_edges2):\n", + " edge1 ^ edge2\n", + " return tn.reachable(var_edges1[0].node1)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "W434kXjqTW1j", + "colab_type": "text" + }, + "source": [ + "Congratulations! You have now learned how to write SAT Solver program with TensorNetwork! Down below you can play with choosing different clauses as your starting points and then seeing with how many ways it can be solved." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kjxLkM_uOkfV", + "colab_type": "code", + "outputId": "477cf629-d4d6-4831-81a8-f73fea95f03c", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "import numpy as np\n", + "from typing import List, Tuple, Set\n", + "import tensornetwork as tn\n", + "\n", + "my_clause = [(-1, -3, -4), (2, 3, -4), (1, -2, 4), (1, 3, 4), (-1, 2, -3)]\n", + "nodes = sat_count_tn(my_clause)\n", + "count = tn.contractors.greedy(nodes).tensor\n", + "print(\"Number of solutions = \", count)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Number of solutions = 7.0\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d0OaWmpdJDze", + "colab_type": "text" + }, + "source": [ + "# References:\n", + "\n", + "\n", + "1. [SAT solver example ](http://www.tfinley.net/software/pyglpk/ex_sat.html)\n", + "2. [An exact tensor network for the 3SAT problem](https://arxiv.org/abs/1105.3201) \n", + "1. [Penrose’s Graphical Notation](https://medium.com/analytics-vidhya/penroses-graphical-notation-fe4c2f24cf3b)\n", + "1. https://github.com/google/TensorNetwork\n", + "1. [Writing a SAT Solver](http://andrew.gibiansky.com/blog/verification/writing-a-sat-solver/)\n", + "2. \n", + "\n", + "\n", + "\n", + "\n" + ] + } + ] +} \ No newline at end of file From c56c1fa98d0f2a6eeade7b022e7b8bb4a0e50f52 Mon Sep 17 00:00:00 2001 From: MichaelMarien Date: Wed, 22 Jan 2020 23:54:24 +0100 Subject: [PATCH 175/212] More Test! (#444) * added test for mps switch backend * added switch backend method to MPS * added test for network operations switch backend * make sure switch_backend not only fixes tensor but also node property * added switch_backend to init * added a lot of tests for network components * a lot more tests * some more tests * some linter things * added test base class instead of hack * disabled some pytype warnings * disabled some pylint warnings --- .../tests/network_components_free_test.py | 355 +++++++++++++++++- tensornetwork/tests/tensornetwork_test.py | 15 + 2 files changed, 366 insertions(+), 4 deletions(-) diff --git a/tensornetwork/tests/network_components_free_test.py b/tensornetwork/tests/network_components_free_test.py index 47632debe..a25993c4b 100644 --- a/tensornetwork/tests/network_components_free_test.py +++ b/tensornetwork/tests/network_components_free_test.py @@ -1,12 +1,14 @@ import numpy as np import tensorflow as tf import pytest +from unittest.mock import patch from collections import namedtuple import h5py import re #pylint: disable=line-too-long -from tensornetwork.network_components import Node, CopyNode, Edge, NodeCollection +from tensornetwork.network_components import Node, CopyNode, Edge, NodeCollection, BaseNode, _remove_trace_edge, _remove_edges import tensornetwork as tn +from tensornetwork.backends.base_backend import BaseBackend string_type = h5py.special_dtype(vlen=str) @@ -15,6 +17,34 @@ 'node1 node2 edge1 edge12 tensor') +class TestNode(BaseNode): + + def get_tensor(self): #pylint: disable=useless-super-delegation + return super().get_tensor() + + def set_tensor(self, tensor): #pylint: disable=useless-super-delegation + return super().set_tensor(tensor) + + @property + def shape(self): + return super().shape + + @property + def tensor(self): + return super().tensor + + @tensor.setter + def tensor(self, tensor): + return super(TestNode, type(self)).tensor.fset(self, tensor) + + def _load_node(self, node_data):# pylint: disable=useless-super-delegation + return super()._load_node(node_data) + + def _save_node(self, node_group): #pylint: disable=useless-super-delegation + return super()._save_node(node_group) + + + @pytest.fixture(name='single_node_edge') def fixture_single_node_edge(backend): tensor = np.ones((1, 2, 2)) @@ -249,6 +279,16 @@ def test_node_reorder_edges_raise_error_trace_edge(single_node_edge): assert "Edge reordering does not support trace edges." in str(e.value) +def test_node_reorder_edges_raise_error_no_tensor(single_node_edge): + node = single_node_edge.node + e2 = tn.connect(node[1], node[2]) + e3 = node[0] + del node._tensor + with pytest.raises(AttributeError) as e: + node.reorder_edges([e2, e3]) + assert "Please provide a valid tensor for this Node." in str(e.value) + + def test_node_magic_getitem(single_node_edge): node = single_node_edge.node edge = single_node_edge.edge @@ -279,13 +319,40 @@ def test_node_magic_lt(double_node_edge): def test_node_magic_lt_raises_error_not_node(single_node_edge): node = single_node_edge.node with pytest.raises(ValueError): - assert node < 0 + node < 0 def test_node_magic_matmul_raises_error_not_node(single_node_edge): node = single_node_edge.node with pytest.raises(TypeError): - assert node @ 0 + node @ 0 + + +def test_node_magic_matmul_raises_error_no_tensor(single_node_edge): + node = single_node_edge.node + del node._tensor + with pytest.raises(AttributeError): + node @ node + + +def test_node_magic_matmul_raises_error_disabled_node(single_node_edge): + node = single_node_edge.node + node.is_disabled = True + with pytest.raises(ValueError): + node @ node + + +def test_node_edges_getter_raises_error_disabled_node(single_node_edge): + node = single_node_edge.node + node.is_disabled = True + with pytest.raises(ValueError): + node.edges + +def test_node_edges_setter_raises_error_disabled_node(single_node_edge): + node = single_node_edge.node + node.is_disabled = True + with pytest.raises(ValueError): + node.edges = [] def test_node_magic_matmul_raises_error_different_network(single_node_edge): @@ -918,4 +985,284 @@ def test_repr_for_Nodes_and_Edges(double_node_edge): assert "[[[1.,1.],[1.,1.]]]" in str(node1) and str(node2) assert "Edge(DanglingEdge)[0]" in str(node1) and str(node2) assert "Edge('test_node1'[1]->'test_node2'[1])" in str(node1) and str(node2) - assert "Edge(DanglingEdge)[2]" in str(node1) and str(node2) \ No newline at end of file + assert "Edge(DanglingEdge)[2]" in str(node1) and str(node2) + + +def test_base_node_name_list_throws_error(): + with pytest.raises(TypeError,): + TestNode(name=["A"], axis_names=['a', 'b']) # pytype: disable=wrong-arg-types + + +def test_base_node_name_int_throws_error(): + with pytest.raises(TypeError): + TestNode(name=1, axis_names=['a', 'b']) # pytype: disable=wrong-arg-types + + +def test_base_node_axis_names_int_throws_error(): + with pytest.raises(TypeError): + TestNode(axis_names=[0, 1]) # pytype: disable=wrong-arg-types + + +def test_base_node_no_axis_names_no_shapes_throws_error(): + with pytest.raises(ValueError): + TestNode(name='a') + + +def test_node_add_axis_names_int_throws_error(): + n1 = Node(np.eye(2), axis_names=['a', 'b']) + with pytest.raises(TypeError): + n1.add_axis_names([0, 1]) # pytype: disable=wrong-arg-types + + +def test_node_axis_names_setter_throws_shape_large_mismatch_error(): + n1 = Node(np.eye(2), axis_names=['a', 'b']) + with pytest.raises(ValueError): + n1.axis_names = ['a', 'b', 'c'] + + +def test_node_axis_names_setter_throws_shape_small_mismatch_error(): + n1 = Node(np.eye(2), axis_names=['a', 'b']) + with pytest.raises(ValueError): + n1.axis_names = ['a'] + + +def test_node_axis_names_setter_throws_value_error(): + n1 = Node(np.eye(2), axis_names=['a', 'b']) + with pytest.raises(TypeError): + n1.axis_names = [0, 1] + + +def test_node_dtype(backend): + n1 = Node(np.random.rand(2), backend=backend) + assert n1.dtype == n1.tensor.dtype + + +@pytest.mark.parametrize("name", [1, ['1']]) +def test_node_set_name_raises_type_error(backend, name): + n1 = Node(np.random.rand(2), backend=backend) + with pytest.raises(TypeError): + n1.set_name(name) + + +@pytest.mark.parametrize("name", [1, ['1']]) +def test_node_name_setter_raises_type_error(backend, name): + n1 = Node(np.random.rand(2), backend=backend) + with pytest.raises(TypeError): + n1.name = name + + +def test_base_node_get_tensor(): + n1 = TestNode(name="n1", axis_names=['a'], shape=(1,)) + assert n1.get_tensor() is None + + +def test_base_node_set_tensor(): + n1 = TestNode(name="n1", axis_names=['a'], shape=(1,)) + assert n1.set_tensor(np.random.rand(2)) is None + assert n1.tensor is None + + +def test_base_node_shape(): + n1 = TestNode(name="n1", axis_names=['a'], shape=(1,)) + n1._shape = None + with pytest.raises(ValueError): + n1.shape + + +def test_base_node_tensor_getter(): + n1 = TestNode(name="n1", axis_names=['a'], shape=(1,)) + assert n1.tensor is None + + +def test_base_node_tensor_setter(): + n1 = TestNode(name="n1", axis_names=['a'], shape=(1,)) + n1.tensor = np.random.rand(2) + assert n1.tensor is None + + +def test_node_has_dangling_edge_false(double_node_edge): + node1 = double_node_edge.node1 + node2 = double_node_edge.node2 + tn.connect(node1["a"], node2["a"]) + tn.connect(node1["c"], node2["c"]) + assert not node1.has_dangling_edge() + + +def test_node_has_dangling_edge_true(single_node_edge): + assert single_node_edge.node.has_dangling_edge() + + +def test_node_get_item(single_node_edge): + node = single_node_edge.node + edge = single_node_edge.edge + node.add_edge(edge, axis=0) + assert node[0] == edge + assert edge in node[0:2] + + +def test_node_signature_getter_disabled_throws_error(single_node_edge): + node = single_node_edge.node + node.is_disabled = True + with pytest.raises(ValueError): + node.signature + + +def test_node_signature_setter_disabled_throws_error(single_node_edge): + node = single_node_edge.node + node.is_disabled = True + with pytest.raises(ValueError): + node.signature = "signature" + + +def test_node_disabled_disabled_throws_error(single_node_edge): + node = single_node_edge.node + node.is_disabled = True + with pytest.raises(ValueError): + node.disable() + + +def test_node_disabled_shape_throws_error(single_node_edge): + node = single_node_edge.node + node.is_disabled = True + with pytest.raises(ValueError): + node.shape + + +def test_copy_node_get_partners_with_trace(backend): + node1 = CopyNode(4, 2, backend=backend) + node2 = Node(np.random.rand(2, 2), backend=backend, name="node2") + tn.connect(node1[0], node1[1]) + tn.connect(node1[2], node2[0]) + tn.connect(node1[3], node2[1]) + assert node1.get_partners() == {node2: {0, 1}} + + +@pytest.mark.parametrize("name", [1, ['1']]) +def test_edge_name_throws_type_error(single_node_edge, name): + with pytest.raises(TypeError): + Edge(node1=single_node_edge.node, axis1=0, name=name) + + +def test_edge_name_setter_disabled_throws_error(single_node_edge): + edge = Edge(node1=single_node_edge.node, axis1=0) + edge.is_disabled = True + with pytest.raises(ValueError): + edge.name = 'edge' + + +def test_edge_name_getter_disabled_throws_error(single_node_edge): + edge = Edge(node1=single_node_edge.node, axis1=0) + edge.is_disabled = True + with pytest.raises(ValueError): + edge.name + + +@pytest.mark.parametrize("name", [1, ['1']]) +def test_edge_name_setter_throws_type_error(single_node_edge, name): + edge = Edge(node1=single_node_edge.node, axis1=0) + with pytest.raises(TypeError): + edge.name = name + + +def test_edge_signature_getter_disabled_throws_error(single_node_edge): + edge = Edge(node1=single_node_edge.node, axis1=0) + edge.is_disabled = True + with pytest.raises(ValueError): + edge.signature + + +def test_edge_signature_setter_disabled_throws_error(single_node_edge): + edge = Edge(node1=single_node_edge.node, axis1=0) + edge.is_disabled = True + with pytest.raises(ValueError): + edge.signature = "signature" + + +def test_edge_node1_throws_value_error(single_node_edge): + edge = Edge(node1=single_node_edge.node, axis1=0, name="edge") + edge._node1 = None + err_msg = "node1 for edge 'edge' no longer exists." + with pytest.raises(ValueError, match=err_msg): + edge.node1 + + + +def test_edge_node2_throws_value_error(single_node_edge): + edge = tn.connect(single_node_edge.node[1], single_node_edge.node[2]) + edge.name = 'edge' + edge._node2 = None + err_msg = "node2 for edge 'edge' no longer exists." + with pytest.raises(ValueError, match=err_msg): + edge.node2 + + +@pytest.mark.parametrize("name", [1, ['1']]) +def test_edge_set_name_throws_type_error(single_node_edge, name): + edge = Edge(node1=single_node_edge.node, axis1=0) + with pytest.raises(TypeError): + edge.set_name(name) + + +@patch.object(Edge, "name", None) +def test_edge_str(single_node_edge): + single_node_edge.edge.name = None + assert str(single_node_edge.edge) == "__unnamed_edge__" + + +def test_get_all_dangling_single_node(single_node_edge): + node = single_node_edge.node + assert set(tn.get_all_dangling({node})) == set(node.edges) + + +def test_get_all_dangling_double_node(double_node_edge): + node1 = double_node_edge.node1 + node2 = double_node_edge.node2 + assert set(tn.get_all_dangling({node1, node2})) == {node1[0], node1[2], + node2[0], node2[2]} + + +def test_flatten_edges_different_backend_raises_value_error(single_node_edge): + node1 = single_node_edge.node + node2 = tn.Node(np.random.rand(2, 2, 2)) + node2.backend = BaseBackend() + with pytest.raises(ValueError): + tn.flatten_edges(node1.get_all_edges()+node2.get_all_edges()) + + +def test_split_edge_trivial(single_node_edge): + edge = single_node_edge.edge + assert tn.split_edge(edge, (1,)) == [edge] + + +def test_split_edge_different_backend_raises_value_error(single_node_edge): + if single_node_edge.node.backend.name == "numpy": + pytest.skip("numpy comparing to all the others") + node1 = single_node_edge.node + node2 = tn.Node(np.random.rand(2, 2, 2), backend="numpy") + edge = tn.connect(node1[1], node2[1]) + with pytest.raises(ValueError, match="Not all backends are the same."): + tn.split_edge(edge, (2, 1)) + + +def test_remove_trace_edge_dangling_edge_raises_value_error(single_node_edge): + node = single_node_edge.node + edge = node[0] + edge.name = "e" + with pytest.raises(ValueError, match="Attempted to remove dangling edge 'e"): + _remove_trace_edge(edge, node) + + +def test_remove_trace_edge_non_trace_raises_value_error(double_node_edge): + node1 = double_node_edge.node1 + node2 = double_node_edge.node2 + edge = tn.connect(node1[0], node2[0]) + edge.name = "e" + with pytest.raises(ValueError, match="Edge 'e' is not a trace edge."): + _remove_trace_edge(edge, node1) + + +def test_remove_edges_trace_raises_value_error(single_node_edge): + node = single_node_edge.node + edge = tn.connect(node[1], node[2]) + with pytest.raises(ValueError): + _remove_edges(edge, node, node, node) # pytype: disable=wrong-arg-types \ No newline at end of file diff --git a/tensornetwork/tests/tensornetwork_test.py b/tensornetwork/tests/tensornetwork_test.py index 318d2053c..259aff3ac 100644 --- a/tensornetwork/tests/tensornetwork_test.py +++ b/tensornetwork/tests/tensornetwork_test.py @@ -345,6 +345,21 @@ def test_reorder_axes(backend): assert a.shape == (4, 2, 3) +def test_reorder_axes_raises_error_no_tensor(backend): + a = tn.Node(np.zeros((2, 3, 4)), backend=backend) + del a._tensor + with pytest.raises(AttributeError) as e: + a.reorder_axes([2, 0, 1]) + assert "Please provide a valid tensor for this Node." in str(e.value) + + +def test_reorder_axes_raises_error_bad_permutation(backend): + a = tn.Node(np.zeros((2, 3, 4)), backend=backend) + with pytest.raises(ValueError) as e: + a.reorder_axes([2, 0]) + assert "A full permutation was not passed." in str(e.value) + + def test_flatten_consistent_result(backend): a_val = np.ones((3, 5, 5, 6)) b_val = np.ones((5, 6, 4, 5)) From aa3caec319cacc62d8fb0806c816b4dc21ececb9 Mon Sep 17 00:00:00 2001 From: Aidan Dang Date: Thu, 23 Jan 2020 03:58:44 +0000 Subject: [PATCH 176/212] Return empty dict for empty sequence input to MPS left_envs and right_envs (#440) * Return empty dict for empty input to MPS envs * Add tests for empty sequence input to MPS envs * Use explicit sequences for MPS envs tests Co-authored-by: Chase Roberts --- .../matrixproductstates/finite_mps.py | 29 ++++++++++-------- .../matrixproductstates/finite_mps_test.py | 30 +++++++++++++++++++ 2 files changed, 47 insertions(+), 12 deletions(-) diff --git a/tensornetwork/matrixproductstates/finite_mps.py b/tensornetwork/matrixproductstates/finite_mps.py index 0fc308ee0..0e32d3e2e 100644 --- a/tensornetwork/matrixproductstates/finite_mps.py +++ b/tensornetwork/matrixproductstates/finite_mps.py @@ -29,25 +29,25 @@ class FiniteMPS(BaseMPS): """ - An MPS class for finite systems. + An MPS class for finite systems. MPS tensors are stored as a list of `Node` objects in the `FiniteMPS.nodes` attribute. - `FiniteMPS` has a central site, also called orthogonality center. - The position of this central site is stored in `FiniteMPS.center_position`, - and it can be be shifted using the `FiniteMPS.position` method. + `FiniteMPS` has a central site, also called orthogonality center. + The position of this central site is stored in `FiniteMPS.center_position`, + and it can be be shifted using the `FiniteMPS.position` method. `FiniteMPS.position` uses QR and RQ methods to shift `center_position`. - + `FiniteMPS` can be initialized either from a `list` of tensors, or by calling the classmethod `FiniteMPS.random`. - + By default, `FiniteMPS` is initialized in *canonical* form, i.e. - the state is normalized, and all tensors to the left of - `center_position` are left orthogonal, and all tensors + the state is normalized, and all tensors to the left of + `center_position` are left orthogonal, and all tensors to the right of `center_position` are right orthogonal. The tensor at `FiniteMPS.center_position` is neither left nor right orthogonal. - Note that canonicalization can be computationally relatively + Note that canonicalization can be computationally relatively costly and scales :math:`\\propto ND^3`. """ @@ -62,7 +62,7 @@ def __init__(self, tensors: A list of `Tensor` or `BaseNode` objects. center_position: The initial position of the center site. canonicalize: If `True` the mps is canonicalized at initialization. - backend: The name of the backend that should be used to perform + backend: The name of the backend that should be used to perform contractions. Available backends are currently 'numpy', 'tensorflow', 'pytorch', 'jax' """ @@ -160,10 +160,13 @@ def left_envs(self, sites: Sequence[int]) -> Dict: Args: sites (list of int): A list of sites of the MPS. Returns: - `dict` mapping `int` to `Tensor`: The left-reduced density matrices + `dict` mapping `int` to `Tensor`: The left-reduced density matrices at each site in `sites`. """ + if not sites: + return {} + n2 = max(sites) sites = np.array(sites) #enable logical indexing @@ -227,9 +230,11 @@ def right_envs(self, sites: Sequence[int]) -> Dict: Args: sites (list of int): A list of sites of the MPS. Returns: - `dict` mapping `int` to `Tensor`: The right-reduced density matrices + `dict` mapping `int` to `Tensor`: The right-reduced density matrices at each site in `sites`. """ + if not sites: + return {} n1 = min(sites) sites = np.array(sites) diff --git a/tensornetwork/matrixproductstates/finite_mps_test.py b/tensornetwork/matrixproductstates/finite_mps_test.py index 0afe0f4f6..9b779ad8c 100644 --- a/tensornetwork/matrixproductstates/finite_mps_test.py +++ b/tensornetwork/matrixproductstates/finite_mps_test.py @@ -121,3 +121,33 @@ def test_correlation_measurement_finite_mps(backend_dtype_values): actual[N // 2] = 0.25 np.testing.assert_almost_equal(result_1, actual) np.testing.assert_allclose(result_2, np.ones(N) * 0.25) + + +def test_left_envs_empty_seq(backend_dtype_values): + backend = backend_dtype_values[0] + dtype = backend_dtype_values[1] + + D, d, N = 1, 2, 10 + tensors = [np.ones((1, d, D), dtype=dtype)] + [ + np.ones((D, d, D), dtype=dtype) for _ in range(N - 2) + ] + [np.ones((D, d, 1), dtype=dtype)] + mps = FiniteMPS(tensors, center_position=0, backend=backend) + + assert mps.left_envs(()) == {} + assert mps.left_envs([]) == {} + assert mps.left_envs(range(0)) == {} + + +def test_right_envs_empty_seq(backend_dtype_values): + backend = backend_dtype_values[0] + dtype = backend_dtype_values[1] + + D, d, N = 1, 2, 10 + tensors = [np.ones((1, d, D), dtype=dtype)] + [ + np.ones((D, d, D), dtype=dtype) for _ in range(N - 2) + ] + [np.ones((D, d, 1), dtype=dtype)] + mps = FiniteMPS(tensors, center_position=0, backend=backend) + + assert mps.right_envs(()) == {} + assert mps.right_envs([]) == {} + assert mps.right_envs(range(0)) == {} From a1593931fd627e32f319bea4bf32d9fdae6b73db Mon Sep 17 00:00:00 2001 From: Tigran Katolikyan <43802339+katolikyan@users.noreply.github.com> Date: Thu, 23 Jan 2020 11:55:59 -0800 Subject: [PATCH 177/212] Issue #339. with tn.DefaultBackend(backend): support (#434) * A context manager support implementation for setting up a backend for Nodes. (Issue #339) * Stack-based backend context manager implementation * code styele fix * Added get_default_backend() function which returns top stack backend. Stack returns config.default_backend if there is nothing in stack. A little clean-up in test file. * - Moved `set_default_backend` to the `backend_contextmanager` - `default_backend` now is a property of `_DefaultBackendStack` - removed `config` imports as an unused file. - fixed some tests in `backend_contextmanager_test.py` * little code-style fix Co-authored-by: Chase Roberts --- tensornetwork/__init__.py | 6 +-- tensornetwork/backend_contextmanager.py | 41 +++++++++++++++++ tensornetwork/backends/backend_factory.py | 1 - tensornetwork/config.py | 2 - tensornetwork/ncon_interface.py | 8 ++-- tensornetwork/network_components.py | 28 +++++------ tensornetwork/network_operations.py | 1 - .../tests/backend_contextmanager_test.py | 46 +++++++++++++++++++ tensornetwork/tests/tensornetwork_test.py | 3 +- 9 files changed, 108 insertions(+), 28 deletions(-) create mode 100644 tensornetwork/backend_contextmanager.py create mode 100644 tensornetwork/tests/backend_contextmanager_test.py diff --git a/tensornetwork/__init__.py b/tensornetwork/__init__.py index fd6269031..96a490fe4 100644 --- a/tensornetwork/__init__.py +++ b/tensornetwork/__init__.py @@ -10,12 +10,8 @@ from tensornetwork.version import __version__ from tensornetwork.visualization.graphviz import to_graphviz from tensornetwork import contractors -from tensornetwork import config from typing import Text, Optional, Type, Union from tensornetwork.utils import load_nodes, save_nodes from tensornetwork.matrixproductstates.finite_mps import FiniteMPS from tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS - - -def set_default_backend(backend: Union[Text, BaseBackend]) -> None: - config.default_backend = backend +from tensornetwork.backend_contextmanager import DefaultBackend, set_default_backend diff --git a/tensornetwork/backend_contextmanager.py b/tensornetwork/backend_contextmanager.py new file mode 100644 index 000000000..814d6d7bf --- /dev/null +++ b/tensornetwork/backend_contextmanager.py @@ -0,0 +1,41 @@ +from typing import Text, Union +from tensornetwork.backends.base_backend import BaseBackend + +class DefaultBackend(): + """Context manager for setting up backend for nodes""" + + def __init__(self, backend: Union[Text, BaseBackend]) -> None: + if not isinstance(backend, (Text, BaseBackend)): + raise ValueError("Item passed to DefaultBackend " + "must be Text or BaseBackend") + self.backend = backend + + def __enter__(self): + _default_backend_stack.stack.append(self) + + def __exit__(self, exc_type, exc_val, exc_tb): + _default_backend_stack.stack.pop() + +class _DefaultBackendStack(): + """A stack to keep track default backends context manager""" + + def __init__(self): + self.stack = [] + self.default_backend = "numpy" + + def get_current_backend(self): + return self.stack[-1].backend if self.stack else self.default_backend + +_default_backend_stack = _DefaultBackendStack() + +def get_default_backend(): + return _default_backend_stack.get_current_backend() + +def set_default_backend(backend: Union[Text, BaseBackend]) -> None: + if _default_backend_stack.stack: + raise AssertionError("The default backend should not be changed " + "inside the backend context manager") + if not isinstance(backend, (Text, BaseBackend)): + raise ValueError("Item passed to set_default_backend " + "must be Text or BaseBackend") + _default_backend_stack.default_backend = backend diff --git a/tensornetwork/backends/backend_factory.py b/tensornetwork/backends/backend_factory.py index 52d0bfb2e..859d829a4 100644 --- a/tensornetwork/backends/backend_factory.py +++ b/tensornetwork/backends/backend_factory.py @@ -19,7 +19,6 @@ from tensornetwork.backends.shell import shell_backend from tensornetwork.backends.pytorch import pytorch_backend from tensornetwork.backends import base_backend -import tensornetwork.config as config_file _BACKENDS = { "tensorflow": tensorflow_backend.TensorFlowBackend, diff --git a/tensornetwork/config.py b/tensornetwork/config.py index 8c347b4fa..0a54b0cb6 100644 --- a/tensornetwork/config.py +++ b/tensornetwork/config.py @@ -11,5 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -default_backend = "numpy" diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py index f4dde1593..330bdc044 100644 --- a/tensornetwork/ncon_interface.py +++ b/tensornetwork/ncon_interface.py @@ -16,7 +16,7 @@ import warnings from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict from tensornetwork import network_components -from tensornetwork import config +from tensornetwork.backend_contextmanager import get_default_backend from tensornetwork.backends import backend_factory Tensor = Any @@ -67,8 +67,8 @@ def ncon(tensors: Sequence[Union[network_components.BaseNode, Tensor]], structure. con_order: List of edge labels specifying the contraction order. out_order: List of edge labels specifying the output order. - backend: String specifying the backend to use. Defaults to - `tensornetwork.config.default_backend`. + backend: String specifying the backend to use. Defaults to + `tensornetwork.backend_contextmanager.get_default_backend`. Returns: The result of the contraction. The result is returned as a `Node` @@ -78,7 +78,7 @@ def ncon(tensors: Sequence[Union[network_components.BaseNode, Tensor]], if backend and (backend not in backend_factory._BACKENDS): raise ValueError("Backend '{}' does not exist".format(backend)) if backend is None: - backend = config.default_backend + backend = get_default_backend() are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors] nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)} diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index 1f6de8917..8d564df5c 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -21,10 +21,10 @@ import h5py #pylint: disable=useless-import-alias -import tensornetwork.config as config from tensornetwork import ops from tensornetwork.backends import backend_factory from tensornetwork.backends.base_backend import BaseBackend +from tensornetwork.backend_contextmanager import get_default_backend string_type = h5py.special_dtype(vlen=str) Tensor = Any @@ -525,8 +525,8 @@ def __init__(self, """Create a node. Args: - tensor: The concrete that is represented by this node, or a `BaseNode` - object. If a tensor is passed, it can be + tensor: The concrete that is represented by this node, or a `BaseNode` + object. If a tensor is passed, it can be be either a numpy array or the tensor-type of the used backend. If a `BaseNode` is passed, the passed node has to have the same \ backend as given by `backend`. @@ -543,7 +543,7 @@ def __init__(self, backend = tensor.backend tensor = tensor.tensor if not backend: - backend = config.default_backend + backend = get_default_backend() if isinstance(backend, BaseBackend): backend_obj = backend else: @@ -633,13 +633,13 @@ def __init__(self, backend: An optional backend for the node. If `None`, a default backend is used dtype: The dtype used to initialize a numpy-copy node. - Note that this dtype has to be a numpy dtype, and it has to be + Note that this dtype has to be a numpy dtype, and it has to be compatible with the dtype of the backend, e.g. for a tensorflow backend with a tf.Dtype=tf.floa32, `dtype` has to be `np.float32`. """ if not backend: - backend = config.default_backend + backend = get_default_backend() backend_obj = backend_factory.get_backend(backend) self.rank = rank @@ -1092,14 +1092,14 @@ def disconnect(self, edge2_name: Optional[Text] = None) -> Tuple["Edge", "Edge"]: """ Break an existing non-dangling edge. - This updates both Edge.node1 and Edge.node2 by removing the + This updates both Edge.node1 and Edge.node2 by removing the connecting edge from `Edge.node1.edges` and `Edge.node2.edges` and adding new dangling edges instead Args: edge1_name: A name for the new dangling edge at `self.node1` edge2_name: A name for the new dangling edge at `self.node2` Returns: - (new_edge1, new_edge2): The new `Edge` objects of + (new_edge1, new_edge2): The new `Edge` objects of `self.node1` and `self.node2` """ if self.is_dangling(): @@ -1155,7 +1155,7 @@ def get_parallel_edges(edge: Edge) -> Set[Edge]: edge: The given edge. Returns: - A `set` of all of the edges parallel to the given edge + A `set` of all of the edges parallel to the given edge (including the given edge). """ return get_shared_edges(edge.node1, edge.node2) @@ -1389,8 +1389,8 @@ def split_edge(edge: Edge, shape: Tuple[int, ...], new_edge_names: Optional[List[Text]] = None) -> List[Edge]: """Split an `Edge` into multiple edges according to `shape`. Reshapes - the underlying tensors connected to the edge accordingly. - + the underlying tensors connected to the edge accordingly. + This method acts as the inverse operation of flattening edges and distinguishes between the following edge cases when adding new edges: 1) standard edge connecting two different nodes: reshape node dimensions @@ -1772,7 +1772,7 @@ def disconnect(edge, edge2_name: Optional[Text] = None) -> Tuple[Edge, Edge]: """ Break an existing non-dangling edge. - This updates both Edge.node1 and Edge.node2 by removing the + This updates both Edge.node1 and Edge.node2 by removing the connecting edge from `Edge.node1.edges` and `Edge.node2.edges` and adding new dangling edges instead """ @@ -1894,9 +1894,9 @@ def outer_product_final_nodes(nodes: Iterable[BaseNode], edge_order: List[Edge]) -> BaseNode: """Get the outer product of `nodes` - For example, if there are 3 nodes remaining in `nodes` with + For example, if there are 3 nodes remaining in `nodes` with shapes :math:`(2, 3)`, :math:`(4, 5, 6)`, and :math:`(7)` - respectively, the newly returned node will have shape + respectively, the newly returned node will have shape :math:`(2, 3, 4, 5, 6, 7)`. Args: diff --git a/tensornetwork/network_operations.py b/tensornetwork/network_operations.py index 5aea214db..c2e228792 100644 --- a/tensornetwork/network_operations.py +++ b/tensornetwork/network_operations.py @@ -19,7 +19,6 @@ import numpy as np #pylint: disable=useless-import-alias -import tensornetwork.config as config #pylint: disable=line-too-long from tensornetwork.network_components import BaseNode, Node, CopyNode, Edge, disconnect from tensornetwork.backends import backend_factory diff --git a/tensornetwork/tests/backend_contextmanager_test.py b/tensornetwork/tests/backend_contextmanager_test.py new file mode 100644 index 000000000..60f6e833b --- /dev/null +++ b/tensornetwork/tests/backend_contextmanager_test.py @@ -0,0 +1,46 @@ +import tensornetwork as tn +from tensornetwork.backend_contextmanager import _default_backend_stack +import pytest +import numpy as np + +def test_contextmanager_simple(): + with tn.DefaultBackend("tensorflow"): + a = tn.Node(np.ones((10,))) + b = tn.Node(np.ones((10,))) + assert a.backend.name == b.backend.name + +def test_contextmanager_default_backend(): + tn.set_default_backend("pytorch") + with tn.DefaultBackend("numpy"): + assert _default_backend_stack.default_backend == "pytorch" + +def test_contextmanager_interruption(): + tn.set_default_backend("pytorch") + with pytest.raises(AssertionError): + with tn.DefaultBackend("numpy"): + tn.set_default_backend("tensorflow") + +def test_contextmanager_nested(): + with tn.DefaultBackend("tensorflow"): + a = tn.Node(np.ones((10,))) + assert a.backend.name == "tensorflow" + with tn.DefaultBackend("numpy"): + b = tn.Node(np.ones((10,))) + assert b.backend.name == "numpy" + c = tn.Node(np.ones((10,))) + assert c.backend.name == "tensorflow" + d = tn.Node(np.ones((10,))) + assert d.backend.name == "numpy" + +def test_contextmanager_wrong_item(): + a = tn.Node(np.ones((10,))) + with pytest.raises(ValueError): + with tn.DefaultBackend(a): # pytype: disable=wrong-arg-types + pass + +def test_contextmanager_BaseBackend(): + tn.set_default_backend("pytorch") + a = tn.Node(np.ones((10,))) + with tn.DefaultBackend(a.backend): + b = tn.Node(np.ones((10,))) + assert b.backend.name == "pytorch" diff --git a/tensornetwork/tests/tensornetwork_test.py b/tensornetwork/tests/tensornetwork_test.py index 259aff3ac..10cb4ce38 100644 --- a/tensornetwork/tests/tensornetwork_test.py +++ b/tensornetwork/tests/tensornetwork_test.py @@ -13,6 +13,7 @@ # limitations under the License. import tensornetwork as tn +from tensornetwork.backend_contextmanager import _default_backend_stack import pytest import numpy as np import tensorflow as tf @@ -522,7 +523,7 @@ def test_set_node2(backend): def test_set_default(backend): tn.set_default_backend(backend) - assert tn.config.default_backend == backend + assert _default_backend_stack.default_backend == backend a = tn.Node(np.eye(2)) assert a.backend.name == backend From 5d6ffc9bd61bbbad13a29cec47623cc92942d546 Mon Sep 17 00:00:00 2001 From: "Hyunbyung, Park" Date: Thu, 23 Jan 2020 11:59:59 -0800 Subject: [PATCH 178/212] Algebraic operation add( + ), sub( - ), mul( * ), div( / ) for BaseNode class (#439) * BaseNode / Edge class text input protection added (#423) BaseNode class - Add protection to name, axis_names *Protected in 3 place *Initialize stage - __init__ *Function use setting - set_name / add_axis_names *Property - Add @property to name to protect direct adding node.name = 123 Edge class - Add protection to name *Protected in 3 place *Initialize stage - __init__ *Function use setting - set_name *Property * BaseNode / Edge class text input protection code revise (#423) *if type(name) != str *if not isinstance(name, str) *change using type to isinstance to follow pylint * Algebraic operation add( + ), sub( - ), mul( * ), div( / ) for BaseNode class (#292) *[BaseNode class] - add / sub / mul / truediv NotImplemented function Added *[Node class] - add / sub / mul / truediv function added *[CopyNode class] - overload the BaseNode mul / truediv as NotImplemented *[basebackend] - add / sub / mul / div NotImplemented function added *[numpy / tensorflow / pytorch] - add / sub / mul / div function added *[shell] - add / sub / div NotImplemented function added *Testing files [network_components_free_test] * Exception - Tensorflow is not tested when the operand is scalar * 1. Check add / sub / mul / div with int / float / Node * 2. Check implicit conversion * 2. Check the Type Error when type is not int / float / Node * 3. Check is the operand backend same * 4. Check is BaseNode has attribute _tensor [backend_test - numpy / tensorflow / pytorch] *check add / sub / mul / divide work for int / float / Node * Add test cases for Tensorflow Algebraic operation and fix add, sub name (#292) [Change name] *add -> addition *subtract -> substraction [Add test case for Tensorflow] * Specify the datatype to resolve the conflict between different dtype operation [Test case for pytorch / jax] * pytorch - [int / int -> int] give different answer for torch when it is dividing two integer * jax - Different from other backend jax backend return 64bits dtype even operate between 32bits so put exceptional dtype test case for jax backend * Add test cases for Tensorflow Algebraic operation and fix add, sub name (#292) [Change name] *add -> addition *subtract -> substraction [Add test case for Tensorflow] * Specify the datatype to resolve the conflict between different dtype operation [Test case for pytorch / jax] * pytorch - [int / int -> int] give different answer for torch when it is dividing two integer * jax - Different from other backend jax backend return 64bits dtype even operate between 32bits so put exceptional dtype test case for jax backend * Add __add__, __sub__, __mul__, __truediv__ to TestNode Class Co-authored-by: Chase Roberts --- tensornetwork/backends/base_backend.py | 39 +++ tensornetwork/backends/numpy/numpy_backend.py | 9 + .../backends/numpy/numpy_backend_test.py | 56 ++- .../backends/pytorch/pytorch_backend.py | 9 + .../backends/pytorch/pytorch_backend_test.py | 48 ++- tensornetwork/backends/shell/shell_backend.py | 9 + .../backends/tensorflow/tensorflow_backend.py | 9 + .../tensorflow/tensorflow_backend_test.py | 48 ++- tensornetwork/network_components.py | 89 +++++ .../tests/network_components_free_test.py | 329 +++++++++++++++++- 10 files changed, 638 insertions(+), 7 deletions(-) diff --git a/tensornetwork/backends/base_backend.py b/tensornetwork/backends/base_backend.py index 0368e6d6b..9e718998f 100644 --- a/tensornetwork/backends/base_backend.py +++ b/tensornetwork/backends/base_backend.py @@ -391,6 +391,32 @@ def eigsh_lanczos(self, raise NotImplementedError( "Backend '{}' has not implemented eighs_lanczos.".format(self.name)) + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + """ + Return the default multiplication of `tensor`. + A backend can override such implementation. + Args: + tensor1: A tensor. + tensor2: A tensor. + Returns: + Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented addition.".format(self.name)) + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + """ + Return the default multiplication of `tensor`. + A backend can override such implementation. + Args: + tensor1: A tensor. + tensor2: A tensor. + Returns: + Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented subtraction.".format(self.name)) + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: """ Return the default multiplication of `tensor`. @@ -404,6 +430,19 @@ def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: raise NotImplementedError( "Backend '{}' has not implemented multiply.".format(self.name)) + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + """ + Return the default divide of `tensor`. + A backend can override such implementation. + Args: + tensor1: A tensor. + tensor2: A tensor. + Returns: + Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented divide.".format(self.name)) + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: """ diff --git a/tensornetwork/backends/numpy/numpy_backend.py b/tensornetwork/backends/numpy/numpy_backend.py index 41a0061c7..7a1606f9d 100644 --- a/tensornetwork/backends/numpy/numpy_backend.py +++ b/tensornetwork/backends/numpy/numpy_backend.py @@ -371,9 +371,18 @@ def eigsh_lanczos(self, eigenvectors.append(state / self.np.linalg.norm(state)) return eigvals[0:numeig], eigenvectors + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 + tensor2 + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 - tensor2 + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: return tensor1 * tensor2 + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 / tensor2 + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: t = self.np.copy(tensor) diff --git a/tensornetwork/backends/numpy/numpy_backend_test.py b/tensornetwork/backends/numpy/numpy_backend_test.py index e8688ce62..fe73271e8 100644 --- a/tensornetwork/backends/numpy/numpy_backend_test.py +++ b/tensornetwork/backends/numpy/numpy_backend_test.py @@ -321,15 +321,67 @@ def test_eigsh_lanczos_raises(): @pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 2), + pytest.param(1., np.ones((1, 2, 3)), 2*np.ones((1, 2, 3))), + pytest.param(2.*np.ones(()), 1., 3.*np.ones((1, 2, 3))), + pytest.param(2.*np.ones(()), 1.*np.ones((1, 2, 3)), 3.*np.ones((1, 2, 3))), +]) +def test_addition(a, b, expected): + backend = numpy_backend.NumPyBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.addition(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 0), + pytest.param(2., 1.*np.ones((1, 2, 3)), 1.*np.ones((1, 2, 3))), + pytest.param(np.ones((1, 2, 3)), 1., np.zeros((1, 2, 3))), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))), +]) +def test_subtraction(a, b, expected): + backend = numpy_backend.NumPyBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.subtraction(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 1), + pytest.param(2., 1.*np.ones((1, 2, 3)), 2.*np.ones((1, 2, 3))), + pytest.param(np.ones((1, 2, 3)), 1., np.ones((1, 2, 3))), pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))), - pytest.param(2. * np.ones(()), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))), ]) def test_multiply(a, b, expected): backend = numpy_backend.NumPyBackend() tensor1 = backend.convert_to_tensor(a) tensor2 = backend.convert_to_tensor(b) + result = backend.multiply(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(2., 2., 1.), + pytest.param(2., 0.5*np.ones((1, 2, 3)), 4.*np.ones((1, 2, 3))), + pytest.param(np.ones(()), 2., 0.5*np.ones((1, 2, 3))), + pytest.param(np.ones(()), 2.*np.ones((1, 2, 3)), 0.5*np.ones((1, 2, 3))), +]) +def test_divide(a, b, expected): + backend = numpy_backend.NumPyBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.divide(tensor1, tensor2) - np.testing.assert_allclose(backend.multiply(tensor1, tensor2), expected) + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype def find(which, vector): diff --git a/tensornetwork/backends/pytorch/pytorch_backend.py b/tensornetwork/backends/pytorch/pytorch_backend.py index b8e9a1ed7..788d4258b 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend.py +++ b/tensornetwork/backends/pytorch/pytorch_backend.py @@ -275,9 +275,18 @@ def eigsh_lanczos(self, eigenvectors.append(state / self.torch.norm(state)) return eigvals[0:numeig], eigenvectors + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 + tensor2 + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 - tensor2 + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: return tensor1 * tensor2 + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 / tensor2 + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: #make a copy diff --git a/tensornetwork/backends/pytorch/pytorch_backend_test.py b/tensornetwork/backends/pytorch/pytorch_backend_test.py index e55d71b4b..e25542ac8 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend_test.py +++ b/tensornetwork/backends/pytorch/pytorch_backend_test.py @@ -293,15 +293,59 @@ def test_eigsh_lanczos_raises(): @pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 2), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), 2.*np.ones((1, 2, 3))), +]) +def test_addition(a, b, expected): + backend = pytorch_backend.PyTorchBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.addition(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 0), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))), +]) +def test_subtraction(a, b, expected): + backend = pytorch_backend.PyTorchBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.subtraction(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 1), pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))), - pytest.param(2. * np.ones(()), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))), ]) def test_multiply(a, b, expected): backend = pytorch_backend.PyTorchBackend() tensor1 = backend.convert_to_tensor(a) tensor2 = backend.convert_to_tensor(b) + result = backend.multiply(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(2., 2., 1.), + pytest.param(np.ones(()), 2.*np.ones((1, 2, 3)), 0.5*np.ones((1, 2, 3))), +]) +def test_divide(a, b, expected): + backend = pytorch_backend.PyTorchBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.divide(tensor1, tensor2) - np.testing.assert_allclose(backend.multiply(tensor1, tensor2), expected) + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype def test_eigh(): diff --git a/tensornetwork/backends/shell/shell_backend.py b/tensornetwork/backends/shell/shell_backend.py index 33b30a99c..6cc6d9ae2 100644 --- a/tensornetwork/backends/shell/shell_backend.py +++ b/tensornetwork/backends/shell/shell_backend.py @@ -290,11 +290,20 @@ def eigsh_lanczos(self, raise ValueError( '`A` has no attribut shape adn no `initial_state` is given.') + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + raise NotImplementedError("Shell tensor has not implemented addition( + )") + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + raise NotImplementedError("Shell tensor has not implemented subtraction( - )") + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: a = np.ones(tensor1.shape) b = np.ones(tensor2.shape) return ShellTensor((a * b).shape) + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + raise NotImplementedError("Shell tensor has not implemented add( / )") + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: return ShellTensor(tensor.shape) diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend.py b/tensornetwork/backends/tensorflow/tensorflow_backend.py index c87d464c5..2d9c0ed4d 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend.py @@ -182,9 +182,18 @@ def eigsh_lanczos(self, raise NotImplementedError( "Backend '{}' has not implemented eighs_lanczos.".format(self.name)) + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 + tensor2 + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 - tensor2 + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: return tensor1 * tensor2 + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 / tensor2 + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: #returns a copy (unfortunately) diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py index 838771598..8073a61e2 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py @@ -247,15 +247,59 @@ def test_conj(): @pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 2), + pytest.param(2.*np.ones(()), 1.*np.ones((1, 2, 3)), 3.*np.ones((1, 2, 3))), +]) +def test_addition(a, b, expected): + backend = tensorflow_backend.TensorFlowBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.addition(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 0), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))), +]) +def test_subtraction(a, b, expected): + backend = tensorflow_backend.TensorFlowBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.subtraction(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 1), pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))), - pytest.param(2. * np.ones(()), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))), ]) def test_multiply(a, b, expected): backend = tensorflow_backend.TensorFlowBackend() tensor1 = backend.convert_to_tensor(a) tensor2 = backend.convert_to_tensor(b) + result = backend.multiply(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(2., 2., 1.), + pytest.param(np.ones(()), 2.*np.ones((1, 2, 3)), 0.5*np.ones((1, 2, 3))), +]) +def test_divide(a, b, expected): + backend = tensorflow_backend.TensorFlowBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.divide(tensor1, tensor2) - np.testing.assert_allclose(backend.multiply(tensor1, tensor2), expected) + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype @pytest.mark.parametrize("dtype", [tf.float64, tf.complex128]) diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index 8d564df5c..0da188416 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -105,6 +105,18 @@ def __init__(self, super().__init__() + def __add__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented addition ( + )") + + def __sub__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented subtraction ( - )") + + def __mul__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented multiply ( * )") + + def __truediv__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented divide ( / )") + @property def dtype(self): #any derived instance of BaseNode always has to have a tensor @@ -555,6 +567,71 @@ def __init__(self, backend=backend_obj, shape=backend_obj.shape_tuple(self._tensor)) + def op_protection(self, other: Union[int, float, "Node"]) -> "Node": + if not isinstance(other, (int, float, Node)): + raise TypeError("Operand should be one of int, float, Node type") + if not hasattr(self, '_tensor'): + raise AttributeError("Please provide a valid tensor for this Node.") + if isinstance(other, Node): + if not self.backend.name == other.backend.name: + raise TypeError("Operands backend must match.\noperand 1 backend: {}\ + \noperand 2 backend: {}".format(self.backend.name, + other.backend.name)) + if not hasattr(other, '_tensor'): + raise AttributeError("Please provide a valid tensor for this Node.") + else: + other_tensor = self.backend.convert_to_tensor(other) + other = Node(tensor=other_tensor, backend=self.backend.name) + return other + + def __add__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.addition(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + + def __sub__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.subtraction(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + + def __mul__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.multiply(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + + def __truediv__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.divide(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + def get_tensor(self) -> Tensor: return self.tensor @@ -653,6 +730,18 @@ def __init__(self, backend=backend_obj, shape=(dimension,) * rank) + def __add__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented addition ( + )") + + def __sub__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented subtraction ( - )") + + def __mul__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented multiply ( * )") + + def __truediv__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented divide ( / )") + @property def dtype(self): # Override so we don't construct the dense tensor when asked for the dtype! diff --git a/tensornetwork/tests/network_components_free_test.py b/tensornetwork/tests/network_components_free_test.py index a25993c4b..5a1e27ba0 100644 --- a/tensornetwork/tests/network_components_free_test.py +++ b/tensornetwork/tests/network_components_free_test.py @@ -1,5 +1,6 @@ import numpy as np import tensorflow as tf +import torch import pytest from unittest.mock import patch from collections import namedtuple @@ -25,6 +26,18 @@ def get_tensor(self): #pylint: disable=useless-super-delegation def set_tensor(self, tensor): #pylint: disable=useless-super-delegation return super().set_tensor(tensor) + def __add__(self, other): #pylint: disable=useless-super-delegation + return super().__add__(other) + + def __sub__(self, other): #pylint: disable=useless-super-delegation + return super().__sub__(other) + + def __mul__(self, other): #pylint: disable=useless-super-delegation + return super().__mul__(other) + + def __truediv__(self, other): #pylint: disable=useless-super-delegation + return super().__truediv__(other) + @property def shape(self): return super().shape @@ -383,6 +396,320 @@ def test_node_magic_matmul(backend): np.testing.assert_allclose(actual.tensor, expected) +def test_between_node_add_op(backend): + node1 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend=backend) + node3 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[11, 12], [13, 14]]) + result = (node1 + node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[3, 4], [5, 6]]) + result = (node1 + int_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + expected = np.array([[3, 4], [5, 6]]) + result = (int_node + node1).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + + expected = np.array([[3.5, 4.5], [5.5, 6.5]]) + result = (node3 + float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + expected = np.array([[3.5, 4.5], [5.5, 6.5]]) + result = (float_node + node3).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_add_op(backend): + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.int32), backend=backend) + expected = np.array([[3, 4], [5, 6]]) + result = (node + 2).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'int64' + else: + assert node.tensor.dtype == result.dtype + + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.float32), backend=backend) + expected = np.array([[3.5, 4.5], [5.5, 6.5]]) + result = (node + 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_between_node_sub_op(backend): + node1 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend=backend) + node3 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[-9, -8], [-7, -6]]) + result = (node1 - node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[-1, 0], [1, 2]]) + result = (node1 - int_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + expected = np.array([[1, 0], [-1, -2]]) + result = (int_node - node1).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + + expected = np.array([[-1.5, -0.5], [0.5, 1.5]]) + result = (node3 - float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + expected = np.array([[1.5, 0.5], [-0.5, -1.5]]) + result = (float_node - node3).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_sub_op(backend): + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.int32), backend=backend) + expected = np.array([[-1, 0], [1, 2]]) + result = (node - 2).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'int64' + else: + assert node.tensor.dtype == result.dtype + + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.float32), backend=backend) + expected = np.array([[-1.5, -0.5], [0.5, 1.5]]) + result = (node - 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_between_node_mul_op(backend): + node1 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend=backend) + node3 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[10, 20], [30, 40]]) + result = (node1 * node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[2, 4], [6, 8]]) + result = (node1 * int_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + result = (int_node * node1).tensor + np.testing.assert_almost_equal(result, expected) + + expected = np.array([[2.5, 5], [7.5, 10]]) + result = (node3 * float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + result = (float_node * node3).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_mul_op(backend): + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.int32), backend=backend) + expected = np.array([[2, 4], [6, 8]]) + result = (node * 2).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'int64' + else: + assert node.tensor.dtype == result.dtype + + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.float32), backend=backend) + expected = np.array([[2.5, 5], [7.5, 10]]) + result = (node * 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_between_node_div_op(backend): + node1 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + node2 = Node(tensor=np.array([[10., 10.], [10., 10.]]), backend=backend) + node3 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[0.1, 0.2], [0.3, 0.4]]) + result = (node1 / node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[0.5, 1.], [1.5, 2.]]) + expected_pytorch = np.array([[0, 1], [1, 2]]) + result = (node3 / int_node).tensor + if backend == 'pytorch': + np.testing.assert_almost_equal(result, expected_pytorch) + assert node3.tensor.dtype == result.dtype == torch.int64 + else: + np.testing.assert_almost_equal(result, expected) + assert node3.tensor.dtype == 'int64' + assert result.dtype == 'float64' + + expected = np.array([[2., 1.], [2/3, 0.5]]) + expected_pytorch = np.array([[2, 1], [0, 0]]) + result = (int_node / node3).tensor + if backend == 'pytorch': + np.testing.assert_almost_equal(result, expected_pytorch) + assert node3.tensor.dtype == result.dtype == torch.int64 + else: + np.testing.assert_almost_equal(result, expected) + assert node3.tensor.dtype == 'int64' + assert result.dtype == 'float64' + + expected = np.array([[4., 4.], [4., 4.]]) + result = (node2 / float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node2.dtype == float_node.dtype == result.dtype + expected = np.array([[0.25, 0.25], [0.25, 0.25]]) + result = (float_node / node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node2.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_div_op(backend): + node = Node(tensor=np.array([[5, 10], [15, 20]], dtype=np.int32), backend=backend) + expected = np.array([[0.5, 1.], [1.5, 2.]]) + expected_pytorch = np.array([[0, 1], [1, 2]]) + result = (node / 10).tensor + if backend == 'pytorch': + np.testing.assert_almost_equal(result, expected_pytorch) + assert node.tensor.dtype == result.dtype == torch.int32 + else: + np.testing.assert_almost_equal(result, expected) + assert result.dtype == 'float64' + assert node.tensor.dtype == 'int32' + + node = Node(tensor=np.array([[5., 10.], [15., 20.]], dtype=np.float32), backend=backend) + expected = np.array([[2., 4.], [6., 8.]]) + result = (node / 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_node_add_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 + node2 + result = node2 + node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 + node2 + result = node1 + copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 + node2 + #pytype: enable=unsupported-operands + + +def test_node_sub_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 - node2 + result = node2 - node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 - node2 + result = node1 - copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 - node2 + #pytype: enable=unsupported-operands + + +def test_node_mul_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 * node2 + result = node2 * node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 * node2 + result = node1 * copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 * node2 + #pytype: enable=unsupported-operands + + +def test_node_div_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 / node2 + result = node2 / node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 / node2 + result = node1 / copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 / node2 + #pytype: enable=unsupported-operands + + def test_node_save_structure(tmp_path, single_node_edge): node = single_node_edge.node with h5py.File(tmp_path / 'nodes', 'w') as node_file: @@ -1265,4 +1592,4 @@ def test_remove_edges_trace_raises_value_error(single_node_edge): node = single_node_edge.node edge = tn.connect(node[1], node[2]) with pytest.raises(ValueError): - _remove_edges(edge, node, node, node) # pytype: disable=wrong-arg-types \ No newline at end of file + _remove_edges(edge, node, node, node) # pytype: disable=wrong-arg-types From 38e57ffa21383e230c3c93b116c682d99d47adce Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 25 Jan 2020 21:00:50 -0500 Subject: [PATCH 179/212] improved performance, but only u1 currently supported in this commit --- tensornetwork/block_tensor/block_tensor.py | 1699 ++++++++++------- .../block_tensor/block_tensor_test.py | 300 +-- tensornetwork/block_tensor/charge.py | 490 ++--- 3 files changed, 1294 insertions(+), 1195 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 36de10f83..7f5a50f18 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -30,73 +30,38 @@ Tensor = Any -def _compute_sparse_lookups(row_charges: Union[BaseCharge, ChargeCollection], - row_flows, column_charges, column_flows): +def compute_sparse_lookup( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: Iterable[Union[bool, int]], + target_charges: Union[BaseCharge, ChargeCollection]) -> np.ndarray: """ - Compute lookup tables for looking up how dense index positions map + Compute lookup table for looking up how dense index positions map to sparse index positions for the diagonal blocks a symmetric matrix. Args: - row_charges: + charges: + flows: + target_charges: """ - column_flows = list(-np.asarray(column_flows)) - fused_column_charges = fuse_charges(column_charges, column_flows) - fused_row_charges = fuse_charges(row_charges, row_flows) - unique_column_charges, column_inverse = fused_column_charges.unique( - return_inverse=True) - unique_row_charges, row_inverse = fused_row_charges.unique( - return_inverse=True) - common_charges, comm_row, comm_col = unique_row_charges.intersect( - unique_column_charges, return_indices=True) - - col_ind_sort = np.argsort(column_inverse, kind='stable') - row_ind_sort = np.argsort(row_inverse, kind='stable') - _, col_charge_degeneracies = compute_fused_charge_degeneracies( - column_charges, column_flows) - _, row_charge_degeneracies = compute_fused_charge_degeneracies( - row_charges, row_flows) - # labelsorted_indices = column_inverse[col_ind_sort] - # tmp = np.nonzero( - # np.append(labelsorted_indices, unique_column_charges.charges.shape[0] + 1) - - # np.append(labelsorted_indices[0], labelsorted_indices))[0] - #charge_degeneracies = tmp - np.append(0, tmp[0:-1]) - - col_start_positions = np.cumsum(np.append(0, col_charge_degeneracies)) - row_start_positions = np.cumsum(np.append(0, row_charge_degeneracies)) - column_lookup = np.empty(len(fused_column_charges), dtype=np.int64) - row_lookup = np.zeros(len(fused_row_charges), dtype=np.int64) - for n in range(len(common_charges)): - column_lookup[col_ind_sort[col_start_positions[ - comm_col[n]]:col_start_positions[comm_col[n] + 1]]] = np.arange( - col_charge_degeneracies[comm_col[n]]) - # row_start_positions[comm_row[n]] - # row_start_positions[comm_row[n] + 1] - row_lookup[ - row_ind_sort[row_start_positions[comm_row[n]]:row_start_positions[ - comm_row[n] + 1]]] = col_charge_degeneracies[comm_col[n]] + fused_charges = fuse_charges(charges, flows) + unique_charges, inverse, degens = fused_charges.unique( + return_inverse=True, return_counts=True) + common_charges, label_to_unique, label_to_target = unique_charges.intersect( + target_charges, return_indices=True) - return np.append(0, np.cumsum(row_lookup[0:-1])), column_lookup + tmp = np.full(len(unique_charges), fill_value=-1, dtype=np.int16) + tmp[label_to_unique] = label_to_unique + lookup = tmp[inverse] + vec = np.empty(len(fused_charges), dtype=np.uint32) + for n in label_to_unique: + vec[lookup == n] = np.arange(degens[n]) + return vec def _get_strides(dims): return np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) -def _get_stride_arrays(dims): - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - return [np.arange(dims[n]) * strides[n] for n in range(len(dims))] - - -def _find_values_in_fused(indices: np.ndarray, left: np.ndarray, - right: np.ndarray) -> np.ndarray: - """ - Returns fuse(left,right)[indices], i.e. the elements - in the fusion of `left` and `right` at positions `indices'. - """ - left_inds, right_inds = np.divmod(indices, len(right)) - return left[left_inds] + right[right_inds] - - def fuse_ndarray_pair(array1: Union[List, np.ndarray], array2: Union[List, np.ndarray]) -> np.ndarray: """ @@ -138,47 +103,27 @@ def _check_flows(flows: List[int]) -> None: "flows = {} contains values different from 1 and -1".format(flows)) -def _find_best_partition(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[int], - return_charges: Optional[bool] = True - ) -> Tuple[Union[BaseCharge, ChargeCollection], - Union[BaseCharge, ChargeCollection], int]: +def _find_best_partition(dims: Iterable[int]) -> int: """ - compute the best partition for fusing `charges`, i.e. the integer `p` - such that fusing `len(fuse_charges(charges[0:p],flows[0:p]))` is - and `len(fuse_charges(charges[p::],flows[p::]))` are as close as possible. - Returns: - fused_left_charges, fused_right_charges, p - + """ - #FIXME: fusing charges with dims (N,M) with M>~N is faster than fusing charges - # with dims (M,N). Thus, it is not always best to fuse at the minimum cut. - #for example, for dims (1000, 4, 1002), its better to fuse at the cut - #(1000, 4008) than at (4000, 1002), even though the difference between the - #dimensions is minimal for the latter case. We should implement some heuristic - #to find these cuts. - if len(charges) == 1: + if len(dims) == 1: raise ValueError( - '_expecting `charges` with a length of at least 2, got `len(charges)={}`' - .format(len(charges))) - dims = np.asarray([len(c) for c in charges]) + 'expecting `dims` with a length of at least 2, got `len(dims ) =1`') diffs = [ np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) - for n in range(1, len(charges)) + for n in range(1, len(dims)) ] + + # diffs = [ + # np.abs(np.prod(dims[:n]) - np.prod(dims[n:])) for n in range(1, dims) + # ] min_inds = np.nonzero(diffs == np.min(diffs))[0] if len(min_inds) > 1: - right_dims = [np.prod(len(charges[min_ind + 1::])) for min_ind in min_inds] + right_dims = [np.prod(dims[min_ind + 1:]) for min_ind in min_inds] min_ind = min_inds[np.argmax(right_dims)] else: min_ind = min_inds[0] - if return_charges: - fused_left_charges = fuse_charges(charges[0:min_ind + 1], - flows[0:min_ind + 1]) - fused_right_charges = fuse_charges(charges[min_ind + 1::], - flows[min_ind + 1::]) - - return fused_left_charges, fused_right_charges, min_ind + 1 return min_ind + 1 @@ -210,23 +155,13 @@ def compute_fused_charge_degeneracies( accumulated_charges, accumulated_degeneracies = ( charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): - #list of unique charges and list of their degeneracies - #on the next unfused leg of the tensor leg_charges, leg_degeneracies = charges[n].unique(return_counts=True) - #fuse the unique charges - #Note: entries in `fused_charges` are not unique anymore. - #flow1 = 1 because the flow of leg 0 has already been - #mulitplied above fused_charges = accumulated_charges + leg_charges * flows[n] - #compute the degeneracies of `fused_charges` charges - #`fused_degeneracies` is a list of degeneracies such that - # `fused_degeneracies[n]` is the degeneracy of of - # charge `c = fused_charges[n]`. fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, leg_degeneracies) accumulated_charges = fused_charges.unique() accumulated_degeneracies = np.empty( - len(accumulated_charges), dtype=np.int64) + len(accumulated_charges), dtype=np.uint32) for n in range(len(accumulated_charges)): accumulated_degeneracies[n] = np.sum( @@ -258,22 +193,10 @@ def compute_unique_fused_charges( if len(charges) == 1: return (charges[0] * flows[0]).unique() - # get unique charges and their degeneracies on the first leg. - # We are fusing from "left" to "right". accumulated_charges = (charges[0] * flows[0]).unique() for n in range(1, len(charges)): - #list of unique charges and list of their degeneracies - #on the next unfused leg of the tensor leg_charges = charges[n].unique() - #fuse the unique charges - #Note: entries in `fused_charges` are not unique anymore. - #flow1 = 1 because the flow of leg 0 has already been - #mulitplied above fused_charges = accumulated_charges + leg_charges * flows[n] - #compute the degeneracies of `fused_charges` charges - #`fused_degeneracies` is a list of degeneracies such that - # `fused_degeneracies[n]` is the degeneracy of of - # charge `c = fused_charges[n]`. accumulated_charges = fused_charges.unique() return accumulated_charges @@ -307,13 +230,9 @@ def compute_num_nonzero(charges: List[np.ndarray], def _find_diagonal_sparse_blocks( - data: np.ndarray, - row_charges: List[Union[BaseCharge, ChargeCollection]], - column_charges: List[Union[BaseCharge, ChargeCollection]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = False -) -> Tuple[Union[BaseCharge, ChargeCollection], List, np.ndarray, Dict, Dict]: + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]], + partition: int) -> Tuple[Union[BaseCharge, ChargeCollection], List]: """ Given the meta data and underlying data of a symmetric matrix, compute all diagonal blocks and return them in a dict. @@ -322,48 +241,21 @@ def _find_diagonal_sparse_blocks( columns given by fusing `column_charges`. Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the sparse locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - + charges: A list of charges. + flows: A list of flows. + partition: The location of the partition of `charges` into rows and colums. Returns: return common_charges, blocks, start_positions, row_locations, column_degeneracies List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. List[np.ndarray]: A list containing the blocks. - np.ndarray: The start position within the sparse data array of each row with non-zero - elements. - Dict: Dict mapping row-charges of each block to an np.ndarray of sparse positions - along the rows - Dict: Dict mapping row-charges of each block to its column-degeneracy """ - flows = row_flows.copy() - flows.extend(column_flows) _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges)`") + row_charges = charges[:partition] + row_flows = flows[:partition] + column_charges = charges[partition:] + column_flows = flows[partition:] #get the unique column-charges #we only care about their degeneracies, not their order; that's much faster @@ -373,36 +265,26 @@ def _find_diagonal_sparse_blocks( column_charges, column_flows) unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) #get the charges common to rows and columns (only those matter) - common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) + common_charges, label_to_row, label_to_column = unique_row_charges.intersect( + unique_column_charges * (-1), return_indices=True) #convenience container for storing the degeneracies of each #column charge #column_degeneracies = dict(zip(unique_column_charges, column_dims)) column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) + row_locations = find_sparse_positions( charges=row_charges, flows=row_flows, target_charges=common_charges) degeneracy_vector = np.empty( - np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) + np.sum([len(v) for v in row_locations.values()]), dtype=np.uint32) #for each charge `c` in `common_charges` we generate a boolean mask #for indexing the positions where `relevant_column_charges` has a value of `c`. for c in common_charges: degeneracy_vector[row_locations[c]] = column_degeneracies[c] - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector + start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( + np.uint32) blocks = [] for c in common_charges: @@ -410,135 +292,16 @@ def _find_diagonal_sparse_blocks( rlocs = row_locations[c] rlocs.sort() #sort in place (we need it again later) cdegs = column_degeneracies[c] - a = np.expand_dims(start_positions[rlocs], 1) - b = np.expand_dims(np.arange(cdegs), 0) - inds = np.reshape(a + b, len(rlocs) * cdegs) - if not return_data: - blocks.append([inds, (len(rlocs), cdegs)]) - else: - blocks.append(np.reshape(data[inds], (len(rlocs), cdegs))) - return common_charges, blocks #, start_positions, row_locations, column_degeneracies - - -def _find_diagonal_dense_blocks( - row_charges: List[Union[BaseCharge, ChargeCollection]], - column_charges: List[Union[BaseCharge, ChargeCollection]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - row_strides: Optional[np.ndarray] = None, - column_strides: Optional[np.ndarray] = None, -) -> Tuple[Union[BaseCharge, ChargeCollection], List[np.ndarray]]: - """ - Given the meta data and underlying data of a symmetric matrix, compute the - dense positions of all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. - - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - row_strides: An optional np.ndarray denoting the strides of `row_charges`. - If `None`, natural stride ordering is assumed. - column_strides: An optional np.ndarray denoting the strides of - `column_charges`. If `None`, natural stride ordering is assumed. - - Returns: - List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. - List[List]: A list containing the blocks information. - For each element `e` in the list `e[0]` is an `np.ndarray` of ints - denoting the dense positions of the non-zero elements and `e[1]` - is a tuple corresponding to the blocks' matrix shape - - """ - flows = list(row_flows).copy() - flows.extend(column_flows) - _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - #`compute_fused_charge_degeneracies` multiplies flows into the column_charges - unique_column_charges = compute_unique_fused_charges(column_charges, - column_flows) - - unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) - #get the charges common to rows and columns (only those matter) - fused = unique_row_charges + unique_column_charges - li, ri = np.divmod( - np.nonzero(fused == unique_column_charges.zero_charge)[0], - len(unique_column_charges)) - common_charges = unique_row_charges.intersect(unique_column_charges * (-1)) - #print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) - if ((row_strides is None) and - (column_strides is not None)) or ((row_strides is not None) and - (column_strides is None)): - raise ValueError("`row_strides` and `column_strides` " - "have to be passed simultaneously." - " Found `row_strides={}` and " - "`column_strides={}`".format(row_strides, column_strides)) - if row_strides is not None: - row_locations = find_dense_positions( - charges=row_charges, - flows=row_flows, - target_charges=unique_row_charges[li], - strides=row_strides) - - else: - column_dim = np.prod([len(c) for c in column_charges]) - row_locations = find_dense_positions( - charges=row_charges, - flows=row_flows, - target_charges=unique_row_charges[li]) - for v in row_locations.values(): - v *= column_dim - if column_strides is not None: - column_locations = find_dense_positions( - charges=column_charges, - flows=column_flows, - target_charges=unique_column_charges[ri], - strides=column_strides, - store_dual=True) - - else: - column_locations = find_dense_positions( - charges=column_charges, - flows=column_flows, - target_charges=unique_column_charges[ri], - store_dual=True) - blocks = [] - for c in unique_row_charges[li]: - #numpy broadcasting is substantially faster than kron! - rlocs = np.expand_dims(row_locations[c], 1) - clocs = np.expand_dims(column_locations[c], 0) - inds = np.reshape(rlocs + clocs, rlocs.shape[0] * clocs.shape[1]) - blocks.append([inds, (rlocs.shape[0], clocs.shape[1])]) - return unique_row_charges[li], blocks + inds = (np.add.outer(start_positions[rlocs], np.arange(cdegs))).ravel() + blocks.append([inds, (len(rlocs), cdegs)]) + return common_charges, blocks def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], flows: List[Union[int, bool]], target_charges: Union[BaseCharge, ChargeCollection], strides: Optional[np.ndarray] = None, - store_dual: Optional[bool] = False) -> np.ndarray: + store_dual: Optional[bool] = False) -> Dict: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector of `fused_charges` resulting from fusing all elements of `charges` @@ -587,8 +350,7 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], if `None`, natural stride ordering is assumed. Returns: - np.ndarray: The index-positions within the dense data array - of the elements fusing to `target_charge`. + dict """ _check_flows(flows) @@ -620,7 +382,9 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], out[store_charges.get_item(n)] = inds[0] return out - left_charges, right_charges, partition = _find_best_partition(charges, flows) + partition = _find_best_partition([len(c) for c in charges]) + left_charges = fuse_charges(charges[:partition], flows[:partition]) + right_charges = fuse_charges(charges[partition:], flows[partition:]) if strides is not None: stride_arrays = [ np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) @@ -641,12 +405,12 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], len(unique_right)) relevant_unique_left_inds = np.unique(tmp_inds_left) - left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.int64) + left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) left_lookup[relevant_unique_left_inds] = np.arange( len(relevant_unique_left_inds)) relevant_unique_right_inds = np.unique(tmp_inds_right) right_lookup = np.empty( - np.max(relevant_unique_right_inds) + 1, dtype=np.int64) + np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) right_lookup[relevant_unique_right_inds] = np.arange( len(relevant_unique_right_inds)) @@ -671,10 +435,10 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], for m in range(len(lis)): li = lis[m] ri = ris[m] - dense_left_positions = left_charge_labels[0][left_charge_labels[1] == - left_lookup[li]] - dense_right_positions = right_charge_labels[0][right_charge_labels[1] == - right_lookup[ri]] + dense_left_positions = (left_charge_labels[0][ + left_charge_labels[1] == left_lookup[li]]).astype(np.uint32) + dense_right_positions = (right_charge_labels[0][ + right_charge_labels[1] == right_lookup[ri]]).astype(np.uint32) if strides is None: positions = np.expand_dims(dense_left_positions * len_right, 1) + np.expand_dims(dense_right_positions, 0) @@ -687,8 +451,8 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], left_positions.append(dense_left_positions) lookup.append( np.stack([ - np.arange(len(dense_left_positions)), - np.full(len(dense_left_positions), fill_value=m, dtype=np.int64) + np.arange(len(dense_left_positions), dtype=np.uint32), + np.full(len(dense_left_positions), fill_value=m, dtype=np.uint32) ], axis=1)) @@ -697,7 +461,7 @@ def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], it = np.concatenate(lookup, axis=0) table = it[ind_sort, :] out[store_charges.get_item(n)] = np.concatenate([ - dense_positions[table[n, 1]][table[n, 0], :] + dense_positions[table[n, 1]][table[n, 0], :].astype(np.uint32) for n in range(table.shape[0]) ]) else: @@ -736,16 +500,12 @@ def find_sparse_positions( * `target_charge=0`: [0,1,3,5,7] * `target_charge=1`: [2,4,6,8] Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. + charges: An np.ndarray of integer charges. + flows: The flow direction of the left charges. + target_charges: The target charges. Returns: dict: Mapping integers to np.ndarray of integers. """ - #FIXME: this is probably still not optimal - _check_flows(flows) if len(charges) == 1: fused_charges = charges[0] * flows[0] @@ -758,8 +518,9 @@ def find_sparse_positions( c: np.nonzero(relevant_fused_charges == c)[0] for c in relevant_target_charges } - - left_charges, right_charges, partition = _find_best_partition(charges, flows) + partition = _find_best_partition([len(c) for c in charges]) + left_charges = fuse_charges(charges[:partition], flows[:partition]) + right_charges = fuse_charges(charges[partition:], flows[partition:]) # unique_target_charges, inds = target_charges.unique(return_index=True) # target_charges = target_charges[np.sort(inds)] @@ -769,17 +530,17 @@ def find_sparse_positions( fused_unique = unique_left + unique_right unique_inds = np.nonzero(fused_unique == target_charges) - relevant_positions = unique_inds[0] + relevant_positions = unique_inds[0].astype(np.uint32) tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, len(unique_right)) relevant_unique_left_inds = np.unique(tmp_inds_left) - left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.int64) + left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) left_lookup[relevant_unique_left_inds] = np.arange( len(relevant_unique_left_inds)) relevant_unique_right_inds = np.unique(tmp_inds_right) right_lookup = np.empty( - np.max(relevant_unique_right_inds) + 1, dtype=np.int64) + np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) right_lookup[relevant_unique_right_inds] = np.arange( len(relevant_unique_right_inds)) @@ -801,12 +562,14 @@ def find_sparse_positions( #generate a degeneracy vector which for each value r in relevant_right_charges #holds the corresponding number of non-zero elements `relevant_right_charges` #that can add up to `target_charges`. - degeneracy_vector = np.empty(len(left_charge_labels[0]), dtype=np.int64) + degeneracy_vector = np.empty(len(left_charge_labels[0]), dtype=np.uint32) for n in range(len(relevant_unique_left_inds)): degeneracy_vector[relevant_left_inverse[ left_charge_labels[1] == n]] = np.sum(right_dims[tmp_inds_right[ tmp_inds_left == relevant_unique_left_inds[n]]]) - start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector + + start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( + np.uint32) out = {} for n in range(len(target_charges)): block = [] @@ -817,200 +580,23 @@ def find_sparse_positions( lis, ris = np.divmod(unique_inds[0], len(unique_right)) for m in range(len(lis)): - a = np.expand_dims( - start_positions[relevant_left_inverse[left_charge_labels[1] == - left_lookup[lis[m]]]], 0) - ri_tmp, arange, tmp_inds = right_block_information[lis[m]] - b = np.expand_dims(arange[tmp_inds == np.nonzero(ri_tmp == ris[m])[0]], 1) - inds = a + b - block.append(np.reshape(inds, np.prod(inds.shape))) + block.append( + np.add.outer( + start_positions[relevant_left_inverse[left_charge_labels[1] == + left_lookup[lis[m]]]], + arange[tmp_inds == np.nonzero( + ri_tmp == ris[m])[0]]).ravel().astype(np.uint32)) out[target_charges.get_item(n)] = np.concatenate(block) return out -# def find_sparse_positions_2( -# charges: List[Union[BaseCharge, ChargeCollection]], -# flows: List[Union[int, bool]], -# target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: -# """ -# Find the sparse locations of elements (i.e. the index-values within -# the SPARSE tensor) in the vector `fused_charges` (resulting from -# fusing `left_charges` and `right_charges`) -# that have a value of `target_charges`, assuming that all elements -# different from `target_charges` are `0`. -# For example, given -# ``` -# left_charges = [-2,0,1,0,0] -# right_charges = [-1,0,2,1] -# target_charges = [0,1] -# fused_charges = fuse_charges([left_charges, right_charges],[1,1]) -# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] -# ``` 0 1 2 3 4 5 6 7 8 -# we want to find the all different blocks -# that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, -# together with their corresponding sparse index-values of the data in the sparse array, -# assuming that all elements in `fused_charges` different from `target_charges` are 0. - -# `find_sparse_blocks` returns a dict mapping integers `target_charge` -# to an array of integers denoting the sparse locations of elements within -# `fused_charges`. -# For the above example, we get: -# * `target_charge=0`: [0,1,3,5,7] -# * `target_charge=1`: [2,4,6,8] -# Args: -# left_charges: An np.ndarray of integer charges. -# left_flow: The flow direction of the left charges. -# right_charges: An np.ndarray of integer charges. -# right_flow: The flow direction of the right charges. -# target_charge: The target charge. -# Returns: -# dict: Mapping integers to np.ndarray of integers. -# """ -# #FIXME: this is probably still not optimal - -# _check_flows(flows) -# if len(charges) == 1: -# fused_charges = charges[0] * flows[0] -# unique_charges = fused_charges.unique() -# target_charges = target_charges.unique() -# relevant_target_charges = unique_charges.intersect(target_charges) -# relevant_fused_charges = fused_charges[fused_charges.isin( -# relevant_target_charges)] -# return { -# c: np.nonzero(relevant_fused_charges == c)[0] -# for c in relevant_target_charges -# } - -# left_charges, right_charges, partition = _find_best_partition(charges, flows) - -# unique_target_charges, inds = target_charges.unique(return_index=True) -# target_charges = target_charges[np.sort(inds)] - -# unique_left = left_charges.unique() -# unique_right = right_charges.unique() -# fused = unique_left + unique_right - -# #compute all unique charges that can add up to -# #target_charges -# left_inds, right_inds = [], [] -# for target_charge in target_charges: -# li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) -# left_inds.append(li) -# right_inds.append(ri) - -# #now compute the relevant unique left and right charges -# unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] -# unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] - -# #only keep those charges that are relevant -# relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] -# relevant_right_charges = right_charges[right_charges.isin( -# unique_right_charges)] - -# unique_right_charges, right_dims = relevant_right_charges.unique( -# return_counts=True) -# right_degeneracies = dict(zip(unique_right_charges, right_dims)) -# #generate a degeneracy vector which for each value r in relevant_right_charges -# #holds the corresponding number of non-zero elements `relevant_right_charges` -# #that can add up to `target_charges`. -# degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) -# right_indices = {} - -# for n in range(len(unique_left_charges)): -# left_charge = unique_left_charges[n] -# total_charge = left_charge + unique_right_charges -# total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) -# tmp_relevant_right_charges = relevant_right_charges[ -# relevant_right_charges.isin((target_charges + left_charge * (-1)))] - -# for n in range(len(target_charges)): -# target_charge = target_charges[n] -# right_indices[(left_charge.get_item(0), -# target_charge.get_item(0))] = np.nonzero( -# tmp_relevant_right_charges == ( -# target_charge + left_charge * (-1)))[0] - -# degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy - -# start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector -# blocks = {t: [] for t in target_charges} -# # iterator returns tuple of `int` for ChargeCollection objects -# # and `int` for Ba seCharge objects (both hashable) -# for left_charge in unique_left_charges: -# a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) -# for target_charge in target_charges: -# ri = right_indices[(left_charge, target_charge)] -# if len(ri) != 0: -# b = np.expand_dims(ri, 1) -# tmp = a + b -# blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) -# out = {} -# for target_charge in target_charges: -# out[target_charge] = np.concatenate(blocks[target_charge]) -# return out - - -def compute_dense_to_sparse_mapping( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]], - target_charge: Union[BaseCharge, ChargeCollection]) -> List[np.ndarray]: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - list of np.ndarray: A list of length `r`, with `r` the rank of the tensor. - Each element in the list is an N-dimensional np.ndarray of int, - with `N` the number of non-zero elements. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - #note: left_charges and right_charges have been fused from RIGHT to LEFT - left_charges, right_charges, partition = _find_best_partition(charges, flows) - nz_indices = find_dense_positions([left_charges], [1], [right_charges], [1], - target_charges=target_charge) - - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - return np.unravel_index(nz_indices, dims) - - -class BlockSparseTensor: - """ - Minimal class implementation of block sparsity. - The class design follows Glen's proposal (Design 0). - The class currently only supports a single U(1) symmetry - and only numpy.ndarray. +class BlockSparseTensor: + """ + Minimal class implementation of block sparsity. + The class design follows Glen's proposal (Design 0). + The class currently only supports a single U(1) symmetry + and only numpy.ndarray. Attributes: * self.data: A 1d np.ndarray storing the underlying @@ -1071,7 +657,6 @@ def todense(self) -> np.ndarray: """ out = np.asarray(np.zeros(self.dense_shape, dtype=self.dtype).flat) - charges = self.charges out[np.nonzero(fuse_charges(charges, self.flows) == charges[0].zero_charge) [0]] = self.data @@ -1208,7 +793,6 @@ def transpose( Returns: BlockSparseTensor: The transposed tensor. """ - if len(order) != self.rank: raise ValueError( "`len(order)={}` is different form `self.rank={}`".format( @@ -1217,24 +801,26 @@ def transpose( #check for trivial permutation if np.all(order == np.arange(len(order))): return self - #TODO: flatten_meta_data is called within _compute_transposition_data - #as well. reuse it. - _, tr_data, tr_partition = _compute_transposition_data(self.indices, order) - flat_charges, flat_flows, _, flat_order = flatten_meta_data( - self.indices, order) - - cs, sparse_blocks = _find_diagonal_sparse_blocks( - [], [flat_charges[n] for n in flat_order[0:tr_partition]], - [flat_charges[n] for n in flat_order[tr_partition:]], - [flat_flows[n] for n in flat_order[0:tr_partition]], - [flat_flows[n] for n in flat_order[tr_partition:]], - return_data=False) + flat_indices, flat_charges, flat_flows, _, flat_order, _ = flatten_meta_data( + self.indices, order, 0) + tr_partition = _find_best_partition( + [len(flat_charges[n]) for n in flat_order]) + + tr_charges, tr_sparse_blocks = _find_transposed_diagonal_sparse_blocks( + flat_charges, flat_flows, flat_order, tr_partition) + + charges, sparse_blocks = _find_diagonal_sparse_blocks( + [flat_charges[n] for n in flat_order], + [flat_flows[n] for n in flat_order], tr_partition) data = np.empty(len(self.data), dtype=self.dtype) for n in range(len(sparse_blocks)): - sparse_block = sparse_blocks[n] - data[sparse_block[0]] = self.data[tr_data[cs.get_item(n)][0]] - self.indices = [self.indices[o] for o in order] + c = charges.get_item(n) + sparse_block = sparse_blocks[n][0] + ind = np.nonzero(tr_charges == c)[0][0] + permutation = tr_sparse_blocks[ind][0] + data[sparse_block] = self.data[permutation] + self.indices = [self.indices[o] for o in order] self.data = data return self @@ -1256,7 +842,7 @@ def get_elementary_indices(self) -> List: def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: """ - Reshape `tensor` into `shape` in place. + Reshape `tensor` into `shape. `BlockSparseTensor.reshape` works essentially the same as the dense version, with the notable exception that the tensor can only be reshaped into a form compatible with its elementary indices. @@ -1290,93 +876,56 @@ def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: Returns: BlockSparseTensor: A new tensor reshaped into `shape` """ - dense_shape = [] + new_shape = [] for s in shape: if isinstance(s, Index): - dense_shape.append(s.dimension) + new_shape.append(s.dimension) else: - dense_shape.append(s) + new_shape.append(s) # a few simple checks - if np.prod(dense_shape) != np.prod(self.dense_shape): + if np.prod(new_shape) != np.prod(self.dense_shape): raise ValueError("A tensor with {} elements cannot be " "reshaped into a tensor with {} elements".format( - np.prod(self.shape), np.prod(dense_shape))) + np.prod(self.shape), np.prod(self.dense_shape))) #keep a copy of the old indices for the case where reshaping fails #FIXME: this is pretty hacky! - index_copy = [i.copy() for i in self.indices] + indices = [i.copy() for i in self.indices] + flat_indices = [] + for i in indices: + flat_indices.extend(i.get_elementary_indices()) def raise_error(): #if this error is raised then `shape` is incompatible #with the elementary indices. We then reset the shape #to what is was before the call to `reshape`. - self.indices = index_copy - elementary_indices = [] - for i in self.indices: - elementary_indices.extend(i.get_elementary_indices()) + # self.indices = index_copy raise ValueError("The shape {} is incompatible with the " "elementary shape {} of the tensor.".format( - dense_shape, - tuple([e.dimension for e in elementary_indices]))) - - self.reset_shape() #bring tensor back into its elementary shape - for n in range(len(dense_shape)): - if dense_shape[n] > self.dense_shape[n]: - while dense_shape[n] > self.dense_shape[n]: - #fuse indices - i1, i2 = self.indices.pop(n), self.indices.pop(n) + new_shape, + tuple([e.dimension for e in flat_indices]))) + + for n in range(len(new_shape)): + if new_shape[n] > flat_indices[n].dimension: + while new_shape[n] > flat_indices[n].dimension: + #fuse flat_indices + i1, i2 = flat_indices.pop(n), flat_indices.pop(n) #note: the resulting flow is set to one since the flow #is multiplied into the charges. As a result the tensor #will then be invariant in any case. - self.indices.insert(n, fuse_index_pair(i1, i2)) - if self.dense_shape[n] > dense_shape[n]: + flat_indices.insert(n, fuse_index_pair(i1, i2)) + if flat_indices[n].dimension > new_shape[n]: raise_error() - elif dense_shape[n] < self.dense_shape[n]: + elif new_shape[n] < flat_indices[n].dimension: raise_error() - #at this point the first len(dense_shape) indices of the tensor - #match the `dense_shape`. - while len(dense_shape) < len(self.indices): - i2, i1 = self.indices.pop(), self.indices.pop() - self.indices.append(fuse_index_pair(i1, i2)) - - def _get_diagonal_blocks(self, return_data: Optional[bool] = False) -> Dict: - """ - Obtain the diagonal blocks of a symmetric matrix. - BlockSparseTensor has to be a matrix. - This routine avoids explicit fusion of row or column charges. - - Args: - return_data: If `True`, the returned dictionary maps quantum numbers `q` to - an actual `np.ndarray` containing the data of block `q`. - If `False`, the returned dict maps quantum numbers `q` to a list - `[locations, shape]`, where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within `self.data`, i.e. - `self.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: If `return_data=True`: Dictionary mapping charge `q` to an - np.ndarray of rank 2 (a matrix). - If `return_data=False`: Dictionary mapping charge `q` to a - list `[locations, shape]`, where `locations` is an np.ndarray of type - np.int64 containing the locations of the tensor elements within `self.data` - - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) + #at this point the first len(new_shape) flat_indices of the tensor + #match the `new_shape`. + while len(new_shape) < len(flat_indices): + i2, i1 = flat_indices.pop(), flat_indices.pop() + flat_indices.append(fuse_index_pair(i1, i2)) - row_indices = self.indices[0].get_elementary_indices() - column_indices = self.indices[1].get_elementary_indices() - - return _find_diagonal_sparse_blocks( - data=self.data, - row_charges=[i.charges for i in row_indices], - column_charges=[i.charges for i in column_indices], - row_flows=[i.flow for i in row_indices], - column_flows=[i.flow for i in column_indices], - return_data=return_data) + result = BlockSparseTensor(data=self.data, indices=flat_indices) + return result def reshape(tensor: BlockSparseTensor, @@ -1415,41 +964,23 @@ def reshape(tensor: BlockSparseTensor, Returns: BlockSparseTensor: A new tensor reshaped into `shape` """ - result = BlockSparseTensor( - data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) - result.reshape(shape) - return result + return self.reshape(shape) -def transpose( - tensor: BlockSparseTensor, - order: Union[List[int], np.ndarray], - permutation: Optional[np.ndarray] = None, - return_permutation: Optional[bool] = False) -> "BlockSparseTensor": + +def transpose(tensor: BlockSparseTensor, + order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ Transpose `tensor` into the new order `order`. This routine currently shuffles data. Args: tensor: The tensor to be transposed. order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. Returns: - if `return_permutation == False`: - BlockSparseTensor: The transposed tensor. - if `return_permutation == True`: - BlockSparseTensor, permutation: The transposed tensor - and the permutation data - + BlockSparseTensor: The transposed tensor. """ - if (permutation is not None) and (len(permutation) != len(tensor.data)): - raise ValueError("len(permutation) != len(tensor.data).") result = tensor.copy() - inds = result.transpose(order, permutation, return_permutation) - if return_permutation: - return result, inds + result.transpose(order) return result @@ -1464,16 +995,9 @@ def tensordot( tensor1: First tensor. tensor2: Second tensor. axes: The axes to contract. - permutation1: Permutation data for `tensor1`. - permutation2: Permutation data for `tensor2`. - return_permutation: If `True`, return the the permutation data. + final_order: An optional final order for the result Returns: - if `return_permutation == False`: - BlockSparseTensor: The result of contracting `tensor1` and `tensor2`. - if `return_permutation == True`: - BlockSparseTensor, np.ndarrays, np.ndarray: The result of - contracting `tensor1` and `tensor2`, together with their respective - permutation data. + BlockSparseTensor: The result of the tensor contraction. """ axes1 = axes[0] @@ -1548,11 +1072,19 @@ def tensordot( for n, i in enumerate(indices): i.name = 'index_{}'.format(n) - charges1, tr_data_1, tr_partition1 = _compute_transposition_data( + t1 = time.time() + _, flat_charges1, flat_flows1, flat_strides1, flat_order1, tr_partition1 = flatten_meta_data( tensor1.indices, new_order1, len(free_axes1)) - charges2, tr_data_2, tr_partition2 = _compute_transposition_data( + charges1, tr_sparse_blocks_1 = _find_transposed_diagonal_sparse_blocks( + flat_charges1, flat_flows1, flat_order1, tr_partition1) + + _, flat_charges2, flat_flows2, flat_strides2, flat_order2, tr_partition2 = flatten_meta_data( tensor2.indices, new_order2, len(axes2)) + charges2, tr_sparse_blocks_2 = _find_transposed_diagonal_sparse_blocks( + flat_charges2, flat_flows2, flat_order2, tr_partition2) + dt1 = time.time() - t1 + print('time spent in _compute_transposition_data: {}'.format(dt1)) common_charges = charges1.intersect(charges2) #initialize the data-vector of the output with zeros; @@ -1561,57 +1093,71 @@ def tensordot( #as a transposition of the final tensor final_indices = [indices[n] for n in final_order] _, reverse_order = np.unique(final_order, return_index=True) - - charges_final, tr_data_final, tr_partition_final = _compute_transposition_data( + t1 = time.time() + charges_final, sparse_blocks_final = _compute_transposed_sparse_blocks( final_indices, reverse_order, len(free_axes1)) - num_nonzero_elements = np.sum([len(t[0]) for t in tr_data_final.values()]) + dt2 = time.time() - t1 + print('time spent in _compute_transposition_data: {}'.format(dt2)) + + num_nonzero_elements = np.sum([len(t[0]) for t in sparse_blocks_final]) data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) + t1 = time.time() for n in range(len(common_charges)): c = common_charges.get_item(n) - permutation1 = tr_data_1[c] - permutation2 = tr_data_2[c] - permutationfinal = tr_data_final[c] - b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) - b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) - res = np.matmul(b1, b2) + permutation1 = tr_sparse_blocks_1[np.nonzero(charges1 == c)[0][0]] + permutation2 = tr_sparse_blocks_2[np.nonzero(charges1 == c)[0][0]] + permutationfinal = sparse_blocks_final[np.nonzero( + charges_final == c)[0][0]] + res = np.matmul( + np.reshape(tensor1.data[permutation1[0]], permutation1[1]), + np.reshape(tensor2.data[permutation2[0]], permutation2[1])) data[permutationfinal[0]] = res.flat + + dt3 = time.time() - t1 + print('time spent doing matmul: {}'.format(dt3)) + + print('total: {}'.format(dt1 + dt2 + dt3)) return BlockSparseTensor(data=data, indices=final_indices) else: #Note: `cs` may contain charges that are not present in `common_charges` - cs, sparse_blocks = _find_diagonal_sparse_blocks( - [], [i.charges for i in left_indices], - [i.charges for i in right_indices], [i.flow for i in left_indices], - [i.flow for i in right_indices], - return_data=False) + t1 = time.time() + charges = [i.charges for i in indices] + flows = [i.flow for i in indices] + cs, sparse_blocks = _find_diagonal_sparse_blocks(charges, flows, + len(left_indices)) + print('time spent finding sparse blocks: {}'.format(time.time() - t1)) #print('finding sparse positions', time.time() - t1) num_nonzero_elements = np.sum([len(v[0]) for v in sparse_blocks]) #Note that empty is not a viable choice here. data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) + t1 = time.time() for n in range(len(common_charges)): c = common_charges.get_item(n) - permutation1 = tr_data_1[c] - permutation2 = tr_data_2[c] + permutation1 = tr_sparse_blocks_1[np.nonzero(charges1 == c)[0][0]] + permutation2 = tr_sparse_blocks_2[np.nonzero(charges2 == c)[0][0]] sparse_block = sparse_blocks[np.nonzero(cs == c)[0][0]] b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) res = np.matmul(b1, b2) data[sparse_block[0]] = res.flat #print('tensordot', time.time() - t1) + print('time spent doing matmul: {}'.format(time.time() - t1)) return BlockSparseTensor(data=data, indices=indices) -def flatten_meta_data(indices, order): - for n, i in enumerate(indices): - i.name = 'index_{}'.format(n) +def flatten_meta_data(indices, order, partition): elementary_indices = {} flat_elementary_indices = [] + new_partition = 0 for n in range(len(indices)): elementary_indices[n] = indices[n].get_elementary_indices() + if n < partition: + new_partition += len(elementary_indices[n]) flat_elementary_indices.extend(elementary_indices[n]) flat_index_list = np.arange(len(flat_elementary_indices)) cum_num_legs = np.append( @@ -1624,10 +1170,10 @@ def flatten_meta_data(indices, order): flat_order = np.concatenate( [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - return flat_charges, flat_flows, flat_strides, flat_order + return flat_elementary_indices, flat_charges, flat_flows, flat_strides, flat_order, new_partition -def _compute_transposition_data( +def _compute_transposed_sparse_blocks( indices: BlockSparseTensor, order: Union[List[int], np.ndarray], transposed_partition: Optional[int] = None @@ -1647,40 +1193,835 @@ def _compute_transposition_data( raise ValueError( "`len(order)={}` is different form `len(indices)={}`".format( len(order), len(indices))) - - #we use flat meta data because it is - #more efficient to get the fused charges using - #the best partition - flat_charges, flat_flows, flat_strides, flat_order = flatten_meta_data( - indices, order) - partition = _find_best_partition( - flat_charges, flat_flows, return_charges=False) - + flat_indices, flat_charges, flat_flows, flat_strides, flat_order, transposed_partition = flatten_meta_data( + indices, order, transposed_partition) if transposed_partition is None: transposed_partition = _find_best_partition( - [flat_charges[n] for n in flat_order], - [flat_flows[n] for n in flat_order], - return_charges=False) - row_lookup, column_lookup = _compute_sparse_lookups( - flat_charges[0:partition], flat_flows[0:partition], - flat_charges[partition:], flat_flows[partition:]) - cs, dense_blocks = _find_diagonal_dense_blocks( - [flat_charges[n] for n in flat_order[0:transposed_partition]], - [flat_charges[n] for n in flat_order[transposed_partition:]], - [flat_flows[n] for n in flat_order[0:transposed_partition]], - [flat_flows[n] for n in flat_order[transposed_partition:]], - row_strides=flat_strides[flat_order[0:transposed_partition]], - column_strides=flat_strides[flat_order[transposed_partition:]]) - - column_dim = np.prod( - [len(flat_charges[n]) for n in range(partition, len(flat_charges))]) - transposed_positions = {} - - for n in range(len(dense_blocks)): - b = dense_blocks[n] - rinds, cinds = np.divmod(b[0], column_dim) - transposed_positions[cs.get_item(n)] = [ - row_lookup[rinds] + column_lookup[cinds], b[1] - ] - #return row_lookup, column_lookup, cs, dense_blocks - return cs, transposed_positions, transposed_partition + [len(flat_charges[n]) for n in flat_order]) + + cs, blocks = _find_transposed_diagonal_sparse_blocks( + flat_charges, flat_flows, flat_order, transposed_partition) + return cs, blocks + + +def _find_transposed_diagonal_sparse_blocks( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]], order: np.ndarray, tr_partition: int +) -> Tuple[Union[BaseCharge, ChargeCollection], List[np.ndarray]]: + """ + Given the meta data and underlying data of a symmetric matrix, compute the + dense positions of all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. + + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + row_strides: An optional np.ndarray denoting the strides of `row_charges`. + If `None`, natural stride ordering is assumed. + column_strides: An optional np.ndarray denoting the strides of + `column_charges`. If `None`, natural stride ordering is assumed. + + Returns: + List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. + List[List]: A list containing the blocks information. + For each element `e` in the list `e[0]` is an `np.ndarray` of ints + denoting the dense positions of the non-zero elements and `e[1]` + is a tuple corresponding to the blocks' matrix shape + """ + t11 = time.time() + _check_flows(flows) + if len(flows) != len(charges): + raise ValueError("`len(flows)` is different from `len(charges) ") + if np.all(order == np.arange(len(order))): + return _find_diagonal_sparse_blocks(charges, flows, tr_partition) + + strides = _get_strides([len(c) for c in charges]) + + tr_row_charges = [charges[n] for n in order[:tr_partition]] + tr_row_flows = [flows[n] for n in order[:tr_partition]] + tr_row_strides = [strides[n] for n in order[:tr_partition]] + + tr_column_charges = [charges[n] for n in order[tr_partition:]] + tr_column_flows = [flows[n] for n in order[tr_partition:]] + tr_column_strides = [strides[n] for n in order[tr_partition:]] + + unique_tr_column_charges, tr_column_dims = compute_fused_charge_degeneracies( + tr_column_charges, tr_column_flows) + unique_tr_row_charges = compute_unique_fused_charges(tr_row_charges, + tr_row_flows) + + fused = unique_tr_row_charges + unique_tr_column_charges + tr_li, tr_ri = np.divmod( + np.nonzero(fused == unique_tr_column_charges.zero_charge)[0], + len(unique_tr_column_charges)) + t1 = time.time() + row_locations = find_dense_positions( + charges=tr_row_charges, + flows=tr_row_flows, + target_charges=unique_tr_row_charges[tr_li], + strides=tr_row_strides) + + column_locations = find_dense_positions( + charges=tr_column_charges, + flows=tr_column_flows, + target_charges=unique_tr_column_charges[tr_ri], + strides=tr_column_strides, + store_dual=True) + print('find_dense_positions: ', time.time() - t1) + partition = _find_best_partition([len(c) for c in charges]) + fused_row_charges = fuse_charges(charges[:partition], flows[:partition]) + fused_column_charges = fuse_charges(charges[partition:], flows[partition:]) + + unique_fused_row, row_inverse = fused_row_charges.unique(return_inverse=True) + unique_fused_column, column_inverse = fused_column_charges.unique( + return_inverse=True) + + unique_column_charges, column_dims = compute_fused_charge_degeneracies( + charges[partition:], flows[partition:]) + unique_row_charges = compute_unique_fused_charges(charges[:partition], + flows[:partition]) + fused = unique_row_charges + unique_column_charges + li, ri = np.divmod( + np.nonzero(fused == unique_column_charges.zero_charge)[0], + len(unique_column_charges)) + + common_charges, label_to_row, label_to_column = unique_row_charges.intersect( + unique_column_charges * (-1), return_indices=True) + + tmp = -np.ones(len(unique_column_charges), dtype=np.int16) + for n in range(len(label_to_row)): + tmp[label_to_row[n]] = n + + degeneracy_vector = np.append(column_dims[label_to_column], + 0)[tmp[row_inverse]] + start_positions = np.cumsum(np.insert(degeneracy_vector[:-1], 0, + 0)).astype(np.uint32) + + column_dimension = np.prod([len(c) for c in charges[partition:]]) + + column_lookup = compute_sparse_lookup(charges[partition:], flows[partition:], + common_charges) + + blocks = [] + t1 = time.time() + for c in unique_tr_row_charges[tr_li]: + rlocs = row_locations[c] + clocs = column_locations[c] + orig_row_posL, orig_col_posL = np.divmod(rlocs, np.uint32(column_dimension)) + orig_row_posR, orig_col_posR = np.divmod(clocs, np.uint32(column_dimension)) + inds = (start_positions[np.add.outer(orig_row_posL, orig_row_posR)] + + column_lookup[np.add.outer(orig_col_posL, orig_col_posR)]).ravel() + + blocks.append([inds, (len(rlocs), len(clocs))]) + print('doing divmods and other: ', time.time() - t1) + t1 = time.time() + charges_out = unique_tr_row_charges[tr_li] + print('computing charges: ', time.time() - t1) + print('total in _find_transposed_sparse_blocks: ', time.time() - t11) + return charges_out, blocks + + +##################################################### DEPRECATED ROUTINES ############################ + + +def _find_diagonal_dense_blocks( + row_charges: List[Union[BaseCharge, ChargeCollection]], + column_charges: List[Union[BaseCharge, ChargeCollection]], + row_flows: List[Union[bool, int]], + column_flows: List[Union[bool, int]], + row_strides: Optional[np.ndarray] = None, + column_strides: Optional[np.ndarray] = None, +) -> Tuple[Union[BaseCharge, ChargeCollection], List[np.ndarray]]: + """ + + Deprecated + Given the meta data and underlying data of a symmetric matrix, compute the + dense positions of all diagonal blocks and return them in a dict. + `row_charges` and `column_charges` are lists of np.ndarray. The tensor + is viewed as a matrix with rows given by fusing `row_charges` and + columns given by fusing `column_charges`. + + Args: + data: An np.ndarray of the data. The number of elements in `data` + has to match the number of non-zero elements defined by `charges` + and `flows` + row_charges: List of np.ndarray, one for each leg of the row-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + column_charges: List of np.ndarray, one for each leg of the column-indices. + Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. + The bond dimension `D[leg]` can vary on each leg. + row_flows: A list of integers, one for each entry in `row_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + column_flows: A list of integers, one for each entry in `column_charges`. + with values `1` or `-1`, denoting the flow direction + of the charges on each leg. `1` is inflowing, `-1` is outflowing + charge. + row_strides: An optional np.ndarray denoting the strides of `row_charges`. + If `None`, natural stride ordering is assumed. + column_strides: An optional np.ndarray denoting the strides of + `column_charges`. If `None`, natural stride ordering is assumed. + + Returns: + List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. + List[List]: A list containing the blocks information. + For each element `e` in the list `e[0]` is an `np.ndarray` of ints + denoting the dense positions of the non-zero elements and `e[1]` + is a tuple corresponding to the blocks' matrix shape + + """ + flows = list(row_flows).copy() + flows.extend(column_flows) + _check_flows(flows) + if len(flows) != (len(row_charges) + len(column_charges)): + raise ValueError( + "`len(flows)` is different from `len(row_charges) + len(column_charges)`" + ) + #get the unique column-charges + #we only care about their degeneracies, not their order; that's much faster + #to compute since we don't have to fuse all charges explicitly + #`compute_fused_charge_degeneracies` multiplies flows into the column_charges + unique_column_charges = compute_unique_fused_charges(column_charges, + column_flows) + + unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) + #get the charges common to rows and columns (only those matter) + fused = unique_row_charges + unique_column_charges + li, ri = np.divmod( + np.nonzero(fused == unique_column_charges.zero_charge)[0], + len(unique_column_charges)) + #print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) + if ((row_strides is None) and + (column_strides is not None)) or ((row_strides is not None) and + (column_strides is None)): + raise ValueError("`row_strides` and `column_strides` " + "have to be passed simultaneously." + " Found `row_strides={}` and " + "`column_strides={}`".format(row_strides, column_strides)) + if row_strides is not None: + row_locations = find_dense_positions( + charges=row_charges, + flows=row_flows, + target_charges=unique_row_charges[li], + strides=row_strides) + + else: + column_dim = np.prod([len(c) for c in column_charges]) + row_locations = find_dense_positions( + charges=row_charges, + flows=row_flows, + target_charges=unique_row_charges[li]) + for v in row_locations.values(): + v *= column_dim + if column_strides is not None: + column_locations = find_dense_positions( + charges=column_charges, + flows=column_flows, + target_charges=unique_column_charges[ri], + strides=column_strides, + store_dual=True) + + else: + column_locations = find_dense_positions( + charges=column_charges, + flows=column_flows, + target_charges=unique_column_charges[ri], + store_dual=True) + blocks = [] + for c in unique_row_charges[li]: + #numpy broadcasting is substantially faster than kron! + rlocs = np.expand_dims(row_locations[c], 1) + clocs = np.expand_dims(column_locations[c], 0) + inds = np.reshape(rlocs + clocs, rlocs.shape[0] * clocs.shape[1]) + blocks.append([inds, (rlocs.shape[0], clocs.shape[1])]) + return unique_row_charges[li], blocks + + +# def find_sparse_positions_2( +# charges: List[Union[BaseCharge, ChargeCollection]], +# flows: List[Union[int, bool]], +# target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: +# """ +# Find the sparse locations of elements (i.e. the index-values within +# the SPARSE tensor) in the vector `fused_charges` (resulting from +# fusing `left_charges` and `right_charges`) +# that have a value of `target_charges`, assuming that all elements +# different from `target_charges` are `0`. +# For example, given +# ``` +# left_charges = [-2,0,1,0,0] +# right_charges = [-1,0,2,1] +# target_charges = [0,1] +# fused_charges = fuse_charges([left_charges, right_charges],[1,1]) +# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] +# ``` 0 1 2 3 4 5 6 7 8 +# we want to find the all different blocks +# that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, +# together with their corresponding sparse index-values of the data in the sparse array, +# assuming that all elements in `fused_charges` different from `target_charges` are 0. + +# `find_sparse_blocks` returns a dict mapping integers `target_charge` +# to an array of integers denoting the sparse locations of elements within +# `fused_charges`. +# For the above example, we get: +# * `target_charge=0`: [0,1,3,5,7] +# * `target_charge=1`: [2,4,6,8] +# Args: +# left_charges: An np.ndarray of integer charges. +# left_flow: The flow direction of the left charges. +# right_charges: An np.ndarray of integer charges. +# right_flow: The flow direction of the right charges. +# target_charge: The target charge. +# Returns: +# dict: Mapping integers to np.ndarray of integers. +# """ +# #FIXME: this is probably still not optimal + +# _check_flows(flows) +# if len(charges) == 1: +# fused_charges = charges[0] * flows[0] +# unique_charges = fused_charges.unique() +# target_charges = target_charges.unique() +# relevant_target_charges = unique_charges.intersect(target_charges) +# relevant_fused_charges = fused_charges[fused_charges.isin( +# relevant_target_charges)] +# return { +# c: np.nonzero(relevant_fused_charges == c)[0] +# for c in relevant_target_charges +# } + +# left_charges, right_charges, partition = _find_best_partition(charges, flows) + +# unique_target_charges, inds = target_charges.unique(return_index=True) +# target_charges = target_charges[np.sort(inds)] + +# unique_left = left_charges.unique() +# unique_right = right_charges.unique() +# fused = unique_left + unique_right + +# #compute all unique charges that can add up to +# #target_charges +# left_inds, right_inds = [], [] +# for target_charge in target_charges: +# li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) +# left_inds.append(li) +# right_inds.append(ri) + +# #now compute the relevant unique left and right charges +# unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] +# unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] + +# #only keep those charges that are relevant +# relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] +# relevant_right_charges = right_charges[right_charges.isin( +# unique_right_charges)] + +# unique_right_charges, right_dims = relevant_right_charges.unique( +# return_counts=True) +# right_degeneracies = dict(zip(unique_right_charges, right_dims)) +# #generate a degeneracy vector which for each value r in relevant_right_charges +# #holds the corresponding number of non-zero elements `relevant_right_charges` +# #that can add up to `target_charges`. +# degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) +# right_indices = {} + +# for n in range(len(unique_left_charges)): +# left_charge = unique_left_charges[n] +# total_charge = left_charge + unique_right_charges +# total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) +# tmp_relevant_right_charges = relevant_right_charges[ +# relevant_right_charges.isin((target_charges + left_charge * (-1)))] + +# for n in range(len(target_charges)): +# target_charge = target_charges[n] +# right_indices[(left_charge.get_item(0), +# target_charge.get_item(0))] = np.nonzero( +# tmp_relevant_right_charges == ( +# target_charge + left_charge * (-1)))[0] + +# degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy + +# start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector +# blocks = {t: [] for t in target_charges} +# # iterator returns tuple of `int` for ChargeCollection objects +# # and `int` for Ba seCharge objects (both hashable) +# for left_charge in unique_left_charges: +# a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) +# for target_charge in target_charges: +# ri = right_indices[(left_charge, target_charge)] +# if len(ri) != 0: +# b = np.expand_dims(ri, 1) +# tmp = a + b +# blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) +# out = {} +# for target_charge in target_charges: +# out[target_charge] = np.concatenate(blocks[target_charge]) +# return out + + +def _compute_sparse_lookups(row_charges: Union[BaseCharge, ChargeCollection], + row_flows, column_charges, column_flows): + """ + Compute lookup tables for looking up how dense index positions map + to sparse index positions for the diagonal blocks a symmetric matrix. + Args: + row_charges: + + """ + column_flows = list(-np.asarray(column_flows)) + fused_column_charges = fuse_charges(column_charges, column_flows) + fused_row_charges = fuse_charges(row_charges, row_flows) + unique_column_charges, column_inverse = fused_column_charges.unique( + return_inverse=True) + unique_row_charges, row_inverse = fused_row_charges.unique( + return_inverse=True) + common_charges, comm_row, comm_col = unique_row_charges.intersect( + unique_column_charges, return_indices=True) + + col_ind_sort = np.argsort(column_inverse, kind='stable') + row_ind_sort = np.argsort(row_inverse, kind='stable') + _, col_charge_degeneracies = compute_fused_charge_degeneracies( + column_charges, column_flows) + _, row_charge_degeneracies = compute_fused_charge_degeneracies( + row_charges, row_flows) + # labelsorted_indices = column_inverse[col_ind_sort] + # tmp = np.nonzero( + # np.append(labelsorted_indices, unique_column_charges.charges.shape[0] + 1) - + # np.append(labelsorted_indices[0], labelsorted_indices))[0] + #charge_degeneracies = tmp - np.append(0, tmp[0:-1]) + + col_start_positions = np.cumsum(np.append(0, col_charge_degeneracies)) + row_start_positions = np.cumsum(np.append(0, row_charge_degeneracies)) + column_lookup = np.empty(len(fused_column_charges), dtype=np.uint32) + row_lookup = np.zeros(len(fused_row_charges), dtype=np.uint32) + for n in range(len(common_charges)): + column_lookup[col_ind_sort[col_start_positions[ + comm_col[n]]:col_start_positions[comm_col[n] + 1]]] = np.arange( + col_charge_degeneracies[comm_col[n]]) + row_lookup[ + row_ind_sort[row_start_positions[comm_row[n]]:row_start_positions[ + comm_row[n] + 1]]] = col_charge_degeneracies[comm_col[n]] + + return np.append(0, np.cumsum(row_lookup[0:-1])), column_lookup + + +def _get_stride_arrays(dims): + strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) + return [np.arange(dims[n]) * strides[n] for n in range(len(dims))] + + +# def combine_indices_reduced( +# charges: List[BaseCharge], +# flows: np.ndarray, +# target_charges: np.ndarray, +# return_locactions: Optional[bool] = False, +# strides: Optional[np.ndarray] = np.zeros(0)) -> (SymIndex, np.ndarray): +# """ +# Add quantum numbers arising from combining two or more indices into a +# single index, keeping only the quantum numbers that appear in 'kept_qnums'. +# Equilvalent to using "combine_indices" followed by "reduce", but is +# generally much more efficient. +# Args: +# indices (List[SymIndex]): list of SymIndex. +# arrows (np.ndarray): vector of bools describing index orientations. +# kept_qnums (np.ndarray): n-by-m array describing qauntum numbers of the +# qnums which should be kept with 'n' the number of symmetries. +# return_locs (bool, optional): if True then return the location of the kept +# values of the fused indices +# strides (np.ndarray, optional): index strides with which to compute the +# return_locs of the kept elements. Defaults to trivial strides (based on +# row major order) if ommitted. +# Returns: +# SymIndex: the fused index after reduction. +# np.ndarray: locations of the fused SymIndex qnums that were kept. +# """ + +# num_inds = len(charges) +# tensor_dims = [len(c) for c in charges] + +# if len(charges) == 1: +# # reduce single index +# if strides.size == 0: +# strides = np.array([1], dtype=np.uint32) +# return indices[0].dual(arrows[0]).reduce( +# kept_qnums, return_locs=return_locs, strides=strides[0]) + +# else: +# # find size-balanced partition of indices +# partition_loc = find_balanced_partition(tensor_dims) + +# # compute quantum numbers for each partition +# left_ind = combine_indices(indices[:partition_loc], arrows[:partition_loc]) +# right_ind = combine_indices(indices[partition_loc:], arrows[partition_loc:]) + +# # compute combined qnums +# comb_qnums = fuse_qnums(left_ind.unique_qnums, right_ind.unique_qnums, +# indices[0].syms) +# [unique_comb_qnums, comb_labels] = np.unique( +# comb_qnums, return_inverse=True, axis=1) +# num_unique = unique_comb_qnums.shape[1] + +# # intersect combined qnums and kept_qnums +# reduced_qnums, label_to_unique, label_to_kept = intersect2d( +# unique_comb_qnums, kept_qnums, axis=1, return_indices=True) +# map_to_kept = -np.ones(num_unique, dtype=np.int16) +# for n in range(len(label_to_unique)): +# map_to_kept[label_to_unique[n]] = n +# new_comb_labels = map_to_kept[comb_labels].reshape( +# [left_ind.num_unique, right_ind.num_unique]) +# if return_locs: +# if (strides.size != 0): +# # computed locations based on non-trivial strides +# row_pos = combine_index_strides(tensor_dims[:partition_loc], +# strides[:partition_loc]) +# col_pos = combine_index_strides(tensor_dims[partition_loc:], +# strides[partition_loc:]) + +# # reduce combined qnums to include only those in kept_qnums +# reduced_rows = [0] * left_ind.num_unique +# row_locs = [0] * left_ind.num_unique +# for n in range(left_ind.num_unique): +# temp_label = new_comb_labels[n, right_ind.ind_labels] +# temp_keep = temp_label >= 0 +# reduced_rows[n] = temp_label[temp_keep] +# row_locs[n] = col_pos[temp_keep] + +# reduced_labels = np.concatenate( +# [reduced_rows[n] for n in left_ind.ind_labels]) +# reduced_locs = np.concatenate([ +# row_pos[n] + row_locs[left_ind.ind_labels[n]] +# for n in range(left_ind.dim) +# ]) + +# return SymIndex(reduced_qnums, reduced_labels, +# indices[0].syms), reduced_locs + +# else: # trivial strides +# # reduce combined qnums to include only those in kept_qnums +# reduced_rows = [0] * left_ind.num_unique +# row_locs = [0] * left_ind.num_unique +# for n in range(left_ind.num_unique): +# temp_label = new_comb_labels[n, right_ind.ind_labels] +# temp_keep = temp_label >= 0 +# reduced_rows[n] = temp_label[temp_keep] +# row_locs[n] = np.where(temp_keep)[0] + +# reduced_labels = np.concatenate( +# [reduced_rows[n] for n in left_ind.ind_labels]) +# reduced_locs = np.concatenate([ +# n * right_ind.dim + row_locs[left_ind.ind_labels[n]] +# for n in range(left_ind.dim) +# ]) + +# return SymIndex(reduced_qnums, reduced_labels, +# indices[0].syms), reduced_locs + +# else: +# # reduce combined qnums to include only those in kept_qnums +# reduced_rows = [0] * left_ind.num_unique +# for n in range(left_ind.num_unique): +# temp_label = new_comb_labels[n, right_ind.ind_labels] +# reduced_rows[n] = temp_label[temp_label >= 0] + +# reduced_labels = np.concatenate( +# [reduced_rows[n] for n in left_ind.ind_labels]) + +# return SymIndex(reduced_qnums, reduced_labels, indices[0].syms) + + +def reduce_to_target_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection], + strides: Optional[np.ndarray] = None, + return_positions: Optional[bool] = False) -> np.ndarray: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector of `fused_charges` resulting from fusing all elements of `charges` + that have a value of `target_charge`. + For example, given + ``` + charges = [[-2,0,1,0,0],[-1,0,2,1]] + target_charge = 0 + fused_charges = fuse_charges(charges,[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the index-positions of charges + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + within the dense array. As one additional wrinkle, `charges` + is a subset of the permuted charges of a tensor with rank R > len(charges), + and `stride_arrays` are their corresponding range of strides, i.e. + + ``` + R=5 + D = [2,3,4,5,6] + tensor_flows = np.random.randint(-1,2,R) + tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] + order = np.arange(R) + np.random.shuffle(order) + tensor_strides = [360, 120, 30, 6, 1] + + charges = [tensor_charges[order[n]] for n in range(3)] + flows = [tensor_flows[order[n]] for n in range(len(3))] + strides = [tensor_stride[order[n]] for n in range(3)] + _ = _find_transposed_dense_positions(charges, flows, 0, strides) + + ``` + `_find_transposed_dense_blocks` returns an np.ndarray containing the + index-positions of these elements calculated using `stride_arrays`. + The result only makes sense in conjuction with the complementary + data computed from the complementary + elements in`tensor_charges`, + `tensor_strides` and `tensor_flows`. + This routine is mainly used in `_find_diagonal_dense_blocks`. + + Args: + charges: A list of BaseCharge or ChargeCollection. + flows: The flow directions of the `charges`. + target_charge: The target charge. + strides: The strides for the `charges` subset. + if `None`, natural stride ordering is assumed. + + Returns: + np.ndarray: The index-positions within the dense data array + of the elements fusing to `target_charge`. + """ + + _check_flows(flows) + if len(charges) == 1: + fused_charges = charges[0] * flows[0] + unique, inverse = fused_charges.unique(return_inverse=True) + common, label_to_unique, label_to_target = unique.intersect( + target_charges, return_indices=True) + inds = np.nonzero(np.isin(inverse, label_to_unique))[0] + if strides is not None: + permuted_inds = strides[0] * np.arange(len(charges[0])) + if return_positions: + return fused_charges[permuted_inds[inds]], inds + return fused_charges[permuted_inds[inds]] + + if return_positions: + return fused_charges[inds], inds + return fused_charges[inds] + + partition = _find_best_partition([len(c) for c in charges]) + left_charges = fuse_charges(charges[:partition], flows[:partition]) + right_charges = fuse_charges(charges[partition:], flows[partition:]) + + # unique_target_charges, inds = target_charges.unique(return_index=True) + # target_charges = target_charges[np.sort(inds)] + unique_left, left_inverse = left_charges.unique(return_inverse=True) + unique_right, right_inverse = right_charges.unique(return_inverse=True) + + fused = unique_left + unique_right + unique_fused, unique_fused_labels = fused.unique(return_inverse=True) + + relevant_charges, relevant_labels, _ = unique_fused.intersect( + target_charges, return_indices=True) + + tmp = np.full(len(unique_fused), fill_value=-1, dtype=np.int16) + tmp[relevant_labels] = np.arange(len(relevant_labels), dtype=np.int16) + lookup_target = tmp[unique_fused_labels].reshape( + [len(unique_left), len(unique_right)]) + + if return_positions: + if strides is not None: + stride_arrays = [ + np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) + ] + permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) + permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) + + row_locations = [None] * len(unique_left) + final_relevant_labels = [None] * len(unique_left) + for n in range(len(unique_left)): + labels = lookup_target[n, right_inverse] + lookup = labels >= 0 + row_locations[n] = permuted_right_inds[lookup] + final_relevant_labels[n] = labels[lookup] + + charge_labels = np.concatenate( + [final_relevant_labels[n] for n in left_inverse]) + tmp_inds = [ + permuted_left_inds[n] + row_locations[left_inverse[n]] + for n in range(len(left_charges)) + ] + try: + inds = np.concatenate(tmp_inds) + except ValueError: + inds = np.asarray(tmp_inds) + + else: + row_locations = [None] * len(unique_left) + final_relevant_labels = [None] * len(unique_left) + for n in range(len(unique_left)): + labels = lookup_target[n, right_inverse] + lookup = labels >= 0 + row_locations[n] = np.nonzero(lookup)[0] + final_relevant_labels[n] = labels[lookup] + charge_labels = np.concatenate( + [final_relevant_labels[n] for n in left_inverse]) + + inds = np.concatenate([ + n * len(right_charges) + row_locations[left_inverse[n]] + for n in range(len(left_charges)) + ]) + return relevant_charges[charge_labels], inds + + else: + final_relevant_labels = [None] * len(unique_left) + for n in range(len(unique_left)): + labels = lookup_target[n, right_inverse] + lookup = labels >= 0 + final_relevant_labels[n] = labels[lookup] + charge_labels = np.concatenate( + [final_relevant_labels[n] for n in left_inverse]) + return relevant_charges[charge_labels] + + +def find_sparse_positions_new( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[int, bool]], + target_charges: Union[BaseCharge, ChargeCollection], + strides: Optional[np.ndarray] = None, + store_dual: Optional[bool] = False) -> np.ndarray: + """ + Find the dense locations of elements (i.e. the index-values within the DENSE tensor) + in the vector of `fused_charges` resulting from fusing all elements of `charges` + that have a value of `target_charge`. + For example, given + ``` + charges = [[-2,0,1,0,0],[-1,0,2,1]] + target_charge = 0 + fused_charges = fuse_charges(charges,[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` + we want to find the index-positions of charges + that fuse to `target_charge=0`, i.e. where `fused_charges==0`, + within the dense array. As one additional wrinkle, `charges` + is a subset of the permuted charges of a tensor with rank R > len(charges), + and `stride_arrays` are their corresponding range of strides, i.e. + + ``` + R=5 + D = [2,3,4,5,6] + tensor_flows = np.random.randint(-1,2,R) + tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] + order = np.arange(R) + np.random.shuffle(order) + tensor_strides = [360, 120, 30, 6, 1] + + charges = [tensor_charges[order[n]] for n in range(3)] + flows = [tensor_flows[order[n]] for n in range(len(3))] + strides = [tensor_stride[order[n]] for n in range(3)] + _ = _find_transposed_dense_positions(charges, flows, 0, strides) + + ``` + `_find_transposed_dense_blocks` returns an np.ndarray containing the + index-positions of these elements calculated using `stride_arrays`. + The result only makes sense in conjuction with the complementary + data computed from the complementary + elements in`tensor_charges`, + `tensor_strides` and `tensor_flows`. + This routine is mainly used in `_find_diagonal_dense_blocks`. + + Args: + charges: A list of BaseCharge or ChargeCollection. + flows: The flow directions of the `charges`. + target_charge: The target charge. + strides: The strides for the `charges` subset. + if `None`, natural stride ordering is assumed. + + Returns: + np.ndarray: The index-positions within the dense data array + of the elements fusing to `target_charge`. + """ + + _check_flows(flows) + if len(charges) == 1: + fused_charges = charges[0] * flows[0] + unique, inverse = fused_charges.unique(return_inverse=True) + common, label_to_unique, label_to_target = unique.intersect( + target_charges, return_indices=True) + inds = np.nonzero(np.isin(inverse, label_to_unique))[0] + if strides is not None: + permuted_inds = strides[0] * np.arange(len(charges[0])) + return fused_charges[permuted_inds[inds]], inds + + return fused_charges[inds], inds + + partition = _find_best_partition([len(c) for c in charges]) + left_charges = fuse_charges(charges[:partition], flows[:partition]) + right_charges = fuse_charges(charges[partition:], flows[partition:]) + + # unique_target_charges, inds = target_charges.unique(return_index=True) + # target_charges = target_charges[np.sort(inds)] + unique_left, left_inverse = left_charges.unique(return_inverse=True) + unique_right, right_inverse, right_degens = right_charges.unique( + return_inverse=True, return_counts=True) + + fused = unique_left + unique_right + + unique_fused, labels_fused = fused.unique(return_inverse=True) + + relevant_charges, label_to_unique_fused, label_to_target = unique_fused.intersect( + target_charges, return_indices=True) + + relevant_fused_positions = np.nonzero( + np.isin(labels_fused, label_to_unique_fused))[0] + relevant_left_labels, relevant_right_labels = np.divmod( + relevant_fused_positions, len(unique_right)) + rel_l_labels = np.unique(relevant_left_labels) + total_degen = { + t: np.sum(right_degens[relevant_right_labels[relevant_left_labels == t]]) + for t in rel_l_labels + } + + relevant_left_inverse = left_inverse[np.isin(left_inverse, rel_l_labels)] + degeneracy_vector = np.empty(len(relevant_left_inverse), dtype=np.uint32) + row_locations = [None] * len(unique_left) + final_relevant_labels = [None] * len(unique_left) + for n in range(len(relevant_left_labels)): + degeneracy_vector[relevant_left_inverse == + relevant_left_labels[n]] = total_degen[ + relevant_left_labels[n]] + start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector + tmp = np.full(len(unique_fused), fill_value=-1, dtype=np.int16) + tmp[label_to_unique_fused] = np.arange( + len(label_to_unique_fused), dtype=np.int16) + lookup_target = tmp[labels_fused].reshape( + [len(unique_left), len(unique_right)]) + + final_relevant_labels = [None] * len(unique_left) + for n in range(len(rel_l_labels)): + labels = lookup_target[rel_l_labels[n], right_inverse] + lookup = labels >= 0 + final_relevant_labels[rel_l_labels[n]] = labels[lookup] + charge_labels = np.concatenate( + [final_relevant_labels[n] for n in relevant_left_inverse]) + inds = np.concatenate([ + start_positions[n] + np.arange( + total_degen[relevant_left_inverse[n]], dtype=np.uint32) + for n in range(len(relevant_left_inverse)) + ]) + + return relevant_charges[charge_labels], inds diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 9d889f8de..015c2c9b6 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -7,143 +7,136 @@ np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] +# def test_test_num_nonzero_consistency(): +# B = 4 +# D = 100 +# rank = 4 + +# qs = [[ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) +# for _ in range(2) +# ] +# for _ in range(rank)] +# charges1 = [U1Charge(qs[n]) for n in range(rank)] +# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] +# charges3 = [ +# ChargeCollection([U1Charge(qs[n][m]) +# for m in range(2)]) +# for n in range(rank) +# ] +# flows = [1, 1, 1, -1] +# n1 = compute_num_nonzero(charges1, flows) +# n2 = compute_num_nonzero(charges3, flows) +# n3 = compute_num_nonzero(charges3, flows) +# assert n1 == n2 + +# def test_find_sparse_positions_consistency(): +# B = 4 +# D = 100 +# rank = 4 + +# qs = [[ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) +# for _ in range(2) +# ] +# for _ in range(rank)] +# charges1 = [U1Charge(qs[n]) for n in range(rank)] +# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] +# charges3 = [ +# ChargeCollection([U1Charge(qs[n][m]) +# for m in range(2)]) +# for n in range(rank) +# ] -def test_test_num_nonzero_consistency(): - B = 4 - D = 100 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] - flows = [1, 1, 1, -1] - n1 = compute_num_nonzero(charges1, flows) - n2 = compute_num_nonzero(charges3, flows) - n3 = compute_num_nonzero(charges3, flows) - assert n1 == n2 - - -def test_find_sparse_positions_consistency(): - B = 4 - D = 100 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] - - data1 = find_sparse_positions( - charges=charges1, - flows=[1, 1, 1, 1], - target_charges=charges1[0].zero_charge) - data2 = find_sparse_positions( - charges=charges2, - flows=[1, 1, 1, 1], - target_charges=charges2[0].zero_charge) - data3 = find_sparse_positions( - charges=charges3, - flows=[1, 1, 1, 1], - target_charges=charges3[0].zero_charge) - - nz1 = np.asarray(list(data1.values())[0]) - nz2 = np.asarray(list(data2.values())[0]) - nz3 = np.asarray(list(data3.values())[0]) - assert np.all(nz1 == nz2) - assert np.all(nz1 == nz3) - - -def test_find_dense_positions_consistency(): - B = 5 - D = 20 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] - flows = [1, 1, 1, -1] - data1 = find_dense_positions( - charges=charges1, flows=flows, target_charge=charges1[0].zero_charge) - data2 = find_dense_positions( - charges=charges2, flows=flows, target_charge=charges2[0].zero_charge) - data3 = find_dense_positions( - charges=charges3, flows=flows, target_charge=charges3[0].zero_charge) - - nz = compute_num_nonzero(charges1, flows) - assert nz == len(data1) - assert len(data1) == len(data2) - assert len(data1) == len(data3) - - -def test_find_diagonal_sparse_blocks_consistency(): - B = 5 - D = 20 - rank = 4 - - qs = [[ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - for _ in range(rank)] - charges1 = [U1Charge(qs[n]) for n in range(rank)] - charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] - charges3 = [ - ChargeCollection([U1Charge(qs[n][m]) - for m in range(2)]) - for n in range(rank) - ] +# data1 = find_sparse_positions( +# charges=charges1, +# flows=[1, 1, 1, 1], +# target_charges=charges1[0].zero_charge) +# data2 = find_sparse_positions( +# charges=charges2, +# flows=[1, 1, 1, 1], +# target_charges=charges2[0].zero_charge) +# data3 = find_sparse_positions( +# charges=charges3, +# flows=[1, 1, 1, 1], +# target_charges=charges3[0].zero_charge) + +# nz1 = np.asarray(list(data1.values())[0]) +# nz2 = np.asarray(list(data2.values())[0]) +# nz3 = np.asarray(list(data3.values())[0]) +# assert np.all(nz1 == nz2) +# assert np.all(nz1 == nz3) + +# def test_find_dense_positions_consistency(): +# B = 5 +# D = 20 +# rank = 4 + +# qs = [[ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) +# for _ in range(2) +# ] +# for _ in range(rank)] +# charges1 = [U1Charge(qs[n]) for n in range(rank)] +# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] +# charges3 = [ +# ChargeCollection([U1Charge(qs[n][m]) +# for m in range(2)]) +# for n in range(rank) +# ] +# flows = [1, 1, 1, -1] +# data1 = find_dense_positions( +# charges=charges1, flows=flows, target_charge=charges1[0].zero_charge) +# data2 = find_dense_positions( +# charges=charges2, flows=flows, target_charge=charges2[0].zero_charge) +# data3 = find_dense_positions( +# charges=charges3, flows=flows, target_charge=charges3[0].zero_charge) + +# nz = compute_num_nonzero(charges1, flows) +# assert nz == len(data1) +# assert len(data1) == len(data2) +# assert len(data1) == len(data3) + +# def test_find_diagonal_sparse_blocks_consistency(): +# B = 5 +# D = 20 +# rank = 4 + +# qs = [[ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) +# for _ in range(2) +# ] +# for _ in range(rank)] +# charges1 = [U1Charge(qs[n]) for n in range(rank)] +# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] +# charges3 = [ +# ChargeCollection([U1Charge(qs[n][m]) +# for m in range(2)]) +# for n in range(rank) +# ] - _, _, start_positions1, _, _ = _find_diagonal_sparse_blocks( - data=[], - row_charges=[charges1[0], charges1[1]], - column_charges=[charges1[2], charges1[3]], - row_flows=[1, 1], - column_flows=[1, -1], - return_data=False) - - _, _, start_positions2, _, _ = _find_diagonal_sparse_blocks( - data=[], - row_charges=[charges2[0], charges2[1]], - column_charges=[charges2[2], charges2[3]], - row_flows=[1, 1], - column_flows=[1, -1], - return_data=False) - - _, _, start_positions3, _, _ = _find_diagonal_sparse_blocks( - data=[], - row_charges=[charges3[0], charges3[1]], - column_charges=[charges3[2], charges3[3]], - row_flows=[1, 1], - column_flows=[1, -1], - return_data=False) - assert np.all(start_positions1 == start_positions2) - assert np.all(start_positions1 == start_positions3) +# _, _, start_positions1, _, _ = _find_diagonal_sparse_blocks( +# row_charges=[charges1[0], charges1[1]], +# column_charges=[charges1[2], charges1[3]], +# row_flows=[1, 1], +# column_flows=[1, -1], +# return_data=False) + +# _, _, start_positions2, _, _ = _find_diagonal_sparse_blocks( +# row_charges=[charges2[0], charges2[1]], +# column_charges=[charges2[2], charges2[3]], +# row_flows=[1, 1], +# column_flows=[1, -1], +# return_data=False) + +# _, _, start_positions3, _, _ = _find_diagonal_sparse_blocks( +# row_charges=[charges3[0], charges3[1]], +# column_charges=[charges3[2], charges3[3]], +# row_flows=[1, 1], +# column_flows=[1, -1], +# return_data=False) +# assert np.all(start_positions1 == start_positions2) +# assert np.all(start_positions1 == start_positions3) @pytest.mark.parametrize("dtype", np_dtypes) @@ -202,7 +195,7 @@ def test_find_dense_positions(): dense_positions = find_dense_positions( [U1Charge(left_charges), U1Charge(right_charges)], [1, 1], U1Charge(np.asarray([target_charge]))) - np.testing.assert_allclose(dense_positions, + np.testing.assert_allclose(dense_positions[0], np.nonzero(fused_charges == target_charge)[0]) @@ -229,7 +222,7 @@ def test_find_dense_positions_2(): i23 = indices[2] * indices[3] positions = find_dense_positions([i01.charges, i23.charges], [1, 1], U1Charge(np.asarray([0]))) - assert len(positions) == n1 + assert len(positions[0]) == n1 def test_find_sparse_positions(): @@ -330,8 +323,8 @@ def test_find_sparse_positions_3(): # np.testing.assert_allclose(A.data, B.flat) -def test_find_diagonal_dense_blocks(): - R = 2 +@pytest.mark.parametrize("R", [1, 2]) +def test_find_diagonal_dense_blocks(R): rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] charges = rs + cs @@ -353,11 +346,34 @@ def test_find_diagonal_dense_blocks(): assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) -def test_find_diagonal_dense_blocks_transposed(): - R = 2 +# #@pytest.mark.parametrize("dtype", np_dtypes) +# def test_find_diagonal_dense_blocks_2(): +# R = 1 +# rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] +# cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] +# charges = rs + cs + +# left_fused = fuse_charges(charges[0:R], [1] * R) +# right_fused = fuse_charges(charges[R:], [1] * R) +# left_unique = left_fused.unique() +# right_unique = right_fused.unique() +# zero = left_unique.zero_charge +# blocks = {} +# rdim = len(right_fused) +# for lu in left_unique: +# linds = np.nonzero(left_fused == lu)[0] +# rinds = np.nonzero(right_fused == lu * (-1))[0] +# if (len(linds) > 0) and (len(rinds) > 0): +# blocks[lu] = fuse_ndarrays([linds * rdim, rinds]) +# comm, blocks_ = _find_diagonal_dense_blocks(rs, cs, [1] * R, [1] * R) +# for n in range(len(comm)): +# assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) + + +@pytest.mark.parametrize("R", [1, 2]) +def test_find_diagonal_dense_blocks_transposed(R): order = np.arange(2 * R) np.random.shuffle(order) - R = 2 rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] cs = [U1Charge(np.random.randint(-4, 4, 40)) for _ in range(R)] charges = rs + cs diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 385094f58..9be4be39b 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -24,207 +24,54 @@ from typing import List, Union, Any, Optional, Tuple, Text, Iterable, Type -def _copy_charges(charges): - cs = [] - for n in range(len(charges)): - c = type(charges[n]).__new__(type( - charges[n])) #create a new charge object of type type(other) - c.__init__(charges[n].charges.copy()) - cs.append(c) - return cs - - class BaseCharge: - """ - Base class for fundamental charges (i.e. for symmetries that - are not products of smaller groups) - """ def __init__(self, - charges: Optional[Union[List[np.ndarray], np.ndarray]] = None, - shifts: Optional[Union[List[int], np.ndarray]] = None) -> None: - """ - Initialize a BaseCharge object. - Args: - charges: Optional `np.ndarray` or list of `np.ndarray` of type `int` holdingn - the physical charges. If a list of `np,ndarray` is passed, the arrays are merged - into a single `np.ndarray` by `np.left_shift`-ing and adding up charges. The amount - of left-shift per `np,ndarray` is determined by its `dtype`. E.g. an `np,ndarray` of - `dtype=np.int16` is shifted by 16 bits. Charges are shifted and added moving from - small to large indices in `charges`. `BaseCharge` can hold at most 8 individual - charges of `dtype=np.int8` on 64-bit architectures. - shifts: An optional list of shifts, used for initializing a `BaseCharge` object from - an existing `BaseCharge` object. - """ - if charges is not None: - if isinstance(charges, np.ndarray): - charges = [charges] - self._itemsizes = [c.dtype.itemsize for c in charges] - if np.sum(self._itemsizes) > 8: - raise TypeError("number of bits required to store all charges " - "in a single int is larger than 64") - - if len(charges) > 1: - if shifts is not None: - raise ValueError("If `shifts` is passed, only a single charge array " - "can be passed. Got len(charges) = {}".format( - len(charges))) - if shifts is None: - dtype = np.int8 - if np.sum(self._itemsizes) > 1: - dtype = np.int16 - if np.sum(self._itemsizes) > 2: - dtype = np.int32 - if np.sum(self._itemsizes) > 4: - dtype = np.int64 - #multiply by eight to get number of bits - self.shifts = 8 * np.flip( - np.append(0, np.cumsum(np.flip( - self._itemsizes[1::])))).astype(dtype) - dtype_charges = [c.astype(dtype) for c in charges] - self.charges = np.sum([ - np.left_shift(dtype_charges[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) - else: - if np.max(shifts) >= charges[0].dtype.itemsize * 8: - raise TypeError("shifts {} are incompatible with dtype {}".format( - shifts, charges[0].dtype)) - self.shifts = np.asarray(shifts) - self.charges = charges[0] + charges: np.ndarray, + charge_labels: Optional[np.ndarray] = None) -> None: + if charges.dtype is not np.int16: + raise TypeError("`charges` have to be of dtype `np.int16`") + if charge_labels.dtype is not np.int16: + raise TypeError("`charge_labels` have to be of dtype `np.int16`") + + if charge_labels is None: + self.unique_charges, charge_labels = np.unique( + charges, return_inverse=True) + self.charge_labels = charge_labels.astype(np.uint16) + else: - self.charges = np.asarray([]) - self.shifts = np.asarray([]) + self.unique_charges = charges + self.charge_labels = charge_labels.astype(np.uint16) def __add__(self, other: "BaseCharge") -> "BaseCharge": - """ - Fuse the charges of two `BaseCharge` objects and return a new - `BaseCharge` holding the result. - Args: - other: A `BaseChare` object. - Returns: - BaseCharge: The result of fusing `self` with `other`. - """ - raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") - - def __sub__(self, other: "BaseCharge") -> "BaseCharge": - """ - Subtract the charges of `other` from `self. - Returns a `BaseCharge` holding the result. - Args: - other: A `BaseChare` object. - Returns: - BaseCharge: The result subtracting `other` from `self`. - """ - - raise NotImplementedError("`__sub__` is not implemented for `BaseCharge`") - - def __matmul__(self, other: "BaseCharge") -> "BaseCharge": - """ - Build the direct product of two charges and return - it in a new `BaseCharge` object. - Args: - other: A `BaseCharge` object. - Returns: - BaseCharge: The direct product of `self` and `other`. - """ - raise NotImplementedError( - "`__matmul__` is not implemented for `BaseCharge`") - - def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Return the charge-element at position `n`. - Args: - n: An integer or `np.ndarray`. - Returns: - np.ndarray: The charges at `n`. - """ - return self.charges[n] - - def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Return the charge-element at position `n`. - Needed to provide a common interface with `ChargeCollection`. - Args: - n: An integer or `np.ndarray`. - Returns: - np.ndarray: The charges at `n`. - - """ - - return self.get_item(n) - - def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": - """ - Return the charge-element at position `n`, wrapped into a `BaseCharge` - object. - Args: - n: An integer or `np.ndarray`. - Returns: - BaseCharge: The charges at `n`. - """ - - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - charges = self.charges[n] + # fuse the unique charges from each index, then compute new unique charges + comb_qnums = self.fuse(self.unique_charges, other.unique_charges) + [unique_charges, new_labels] = np.unique(comb_qnums, return_inverse=True) + new_labels = new_labels.reshape( + len(self.unique_charges), len(other.unique_charges)).astype(np.uint16) + + # find new labels using broadcasting (could use np.tile but less efficient) + charge_labels = new_labels[( + self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.uint16) + ).ravel(), (other.charge_labels[None, :] + + np.zeros([len(self), 1], dtype=np.uint16)).ravel()] obj = self.__new__(type(self)) - obj.__init__(charges=[charges], shifts=self.shifts) + obj.__init__(unique_charges, charge_labels) return obj - @property - def num_symmetries(self): - """ - The number of individual symmetries stored in this object. - """ - return len(self.shifts) - - def __len__(self) -> int: - return np.prod(self.charges.shape) - - def __repr__(self): - return str(type(self)) + '\nshifts: ' + self.shifts.__repr__( - ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' + def __len__(self): + return len(self.charge_labels) @property - def dual_charges(self) -> np.ndarray: - raise NotImplementedError( - "`dual_charges` is not implemented for `BaseCharge`") - - def __mul__(self, number: Union[bool, int]) -> "BaseCharge": - """ - Multiply `self` with `number` from the left. - `number` can take values in `1,-1, 0, True, False`. - This multiplication is used to transform between charges and dual-charges. - Args: - number: Can can take values in `1,-1, 0, True, False`. - If `1,True`, return the original object - If `-1, 0, False` return a new `BaseCharge` holding the - dual-charges. - Returns: - BaseCharge: The result of `self * number` - """ - raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") - - def __rmul__(self, number: Union[bool, int]) -> "BaseCharge": - """ - Multiply `self` with `number` from the right. - `number` can take values in `1,-1, 0, True, False`. - This multiplication is used to transform between charges and dual-charges. - Args: - number: Can can take values in `1,-1, 0, True, False`. - If `1,True`, return the original object - If `-1, 0, False` return a new `BaseCharge` holding the - dual-charges. - Returns: - BaseCharge: The result of `number * self`. - """ - - raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") + def charges(self) -> np.ndarray: + return self.unique_charges[self.charge_labels] @property def dtype(self): - return self.charges.dtype + return self.unique_charges.dtype + + def __repr__(self): + return str(type(self)) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' def unique(self, return_index=False, @@ -252,19 +99,26 @@ def unique(self, np.ndarray: The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True. """ - result = np.unique( - self.charges, - return_index=return_index, - return_inverse=return_inverse, - return_counts=return_counts) - if not (return_index or return_inverse or return_counts): - out = self.__new__(type(self)) - out.__init__([result], self.shifts) - return out - else: - out = self.__new__(type(self)) - out.__init__([result[0]], self.shifts) - return tuple([out] + [result[n] for n in range(1, len(result))]) + obj = self.__new__(type(self)) + obj.__init__( + self.unique_charges, + charge_labels=np.arange(len(self.unique_charges), dtype=np.uint16)) + + out = [obj] + if return_index: + _, index = np.unique(self.charge_labels, return_index=True) + out.append(index) + if return_inverse: + out.append(self.charge_labels) + if return_counts: + _, cnts = np.unique(self.charge_labels, return_counts=True) + out.append(cnts) + if len(out) == 1: + return out[0] + if len(out) == 2: + return out[0], out[1] + if len(out) == 3: + return out[0], out[1], out[2] def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: """ @@ -276,56 +130,22 @@ def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: np.ndarray: An array of `bool` type holding the result of the comparison. """ if isinstance(targets, type(self)): - if not np.all(self.shifts == targets.shifts): - raise ValueError( - "Cannot compare charges with different shifts {} and {}".format( - self.shifts, targets.shifts)) - - targets = targets.charges + targets = targets.unique_charges targets = np.asarray(targets) - return np.isin(self.charges, targets) + common, label_to_unique, label_to_targets = np.intersect1d( + self.unique_charges, targets, return_indices=True) + if len(common) == 0: + return np.full(len(self.charge_labels), fill_value=False, dtype=np.bool) + return np.isin(self.charge_labels, label_to_unique) def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: """ - Test each element of `BaseCharge` if it is in `targets`. Returns - an `np.ndarray` of `dtype=bool`. - Args: - targets: The test elements - Returns: - np.ndarray: An array of `bool` type holding the result of the comparison. """ if isinstance(target, type(self)): - if not np.all(self.shifts == target.shifts): - raise ValueError( - "Cannot compare charges with different shifts {} and {}".format( - self.shifts, tparget.shifts)) - target = target.charges + target = target.unique_charges target = np.asarray(target) - return target in self.charges - - def equals(self, target_charges: Iterable) -> np.ndarray: - """ - Find indices where `BaseCharge` equals `target_charges`. - `target_charges` has to be an array of the same lenghts - as `BaseCharge.shifts`, containing one integer per symmetry of - `BaseCharge` - Args: - target_charges: np.ndarray of integers encoding charges. - Returns: - np.ndarray: Boolean array with `True` where `BaseCharge` equals - `target_charges` and `False` everywhere else. - """ - if len(target_charges) != len(self.shifts): - raise ValueError("len(target_charges) = {} is different " - "from len(shifts) = {}".format( - len(target_charges), len(self.shifts))) - _target_charges = np.asarray(target_charges).astype(self.charges.dtype) - target = np.sum([ - np.left_shift(_target_charges[n], self.shifts[n]) - for n in range(len(self.shifts)) - ]) - return self.charges == target + return target in self.unique_charges def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: """ @@ -339,39 +159,23 @@ def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: `target` and `False` everywhere else. """ if isinstance(target, type(self)): - return self.charges == target.charges - return self.charges == np.asarray(target) - - def concatenate(self, others: Union["BaseCharge", List["BaseCharge"]]): - """ - Concatenate `self.charges` with `others.charges`. - Args: - others: List of `BaseCharge` objects. - Returns: - BaseCharge: The concatenated charges. - """ - if isinstance(others, type(self)): - others = [others] - for o in others: - if not np.all(self.shifts == o.shifts): - raise ValueError( - "Cannot fuse charges with different shifts {} and {}".format( - self.shifts, o.shifts)) - - charges = np.concatenate( - [self.charges] + [o.charges for o in others], axis=0) - out = self.__new__(type(self)) - out.__init__([charges], self.shifts) - return out + target = target.charges + elif isinstance(target, (np.integer, int)): + target = np.asarray([target]) + target = np.asarray(target) + tmp = np.full(len(target), fill_value=-1, dtype=np.int16) - @property - def dtype(self): - return self.charges.dtype + _, label_to_unique, label_to_target = np.intersect1d( + self.unique_charges, target, return_indices=True) + tmp[label_to_target] = label_to_unique + return np.squeeze( + np.expand_dims(self.charge_labels, 1) == np.expand_dims(tmp, 0)) @property def zero_charge(self): obj = self.__new__(type(self)) - obj.__init__(charges=[np.asarray([self.dtype.type(0)])], shifts=self.shifts) + obj.__init__( + np.asarray([self.dtype.type(0)]), np.asarray([0], dtype=np.uint16)) return obj def __iter__(self): @@ -380,10 +184,6 @@ def __iter__(self): def intersect(self, other: "BaseCharge", return_indices: Optional[bool] = False) -> "BaseCharge": - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot intersect charges with different shifts {} and {}".format( - self.shifts, other.shifts)) if return_indices: charges, comm1, comm2 = np.intersect1d( self.charges, other.charges, return_indices=return_indices) @@ -391,99 +191,36 @@ def intersect(self, charges = np.intersect1d(self.charges, other.charges) obj = self.__new__(type(self)) - obj.__init__(charges=[charges], shifts=self.shifts) + obj.__init__(charges, np.arange(len(charges), dtype=np.uint16)) if return_indices: - return obj, comm1, comm2 + return obj, comm1.astype(np.uint16), comm2.astype(np.uint16) return obj - -class U1Charge(BaseCharge): - """ - A simple charge class for a single U1 symmetry. - This class can store multiple U1 charges in a single - np.ndarray of integer dtype. Depending on the dtype of - the individual symmetries, this class can store: - * 8 np.int8 - * 4 np.int16 - * 2 np.int32 - * 1 np.int64 - or any suitable combination of dtypes, such that their - bite-sum remains below 64. - """ - - def __init__(self, - charges: List[np.ndarray], - shifts: Optional[np.ndarray] = None) -> None: - super().__init__(charges=charges, shifts=shifts) - - def __add__(self, other: "U1Charge") -> "U1Charge": + def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": """ - Fuse the charges of `self` with charges of `other`, and - return a new `U1Charge` object holding the result. + Return the charge-element at position `n`, wrapped into a `BaseCharge` + object. Args: - other: A `U1Charge` object. + n: An integer or `np.ndarray`. Returns: - U1Charge: The result of fusing `self` with `other`. + BaseCharge: The charges at `n`. """ - if self.num_symmetries != other.num_symmetries: - raise ValueError( - "cannot fuse charges with different number of symmetries") - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse U1-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, U1Charge): - raise TypeError( - "can only add objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - fused = np.reshape(self.charges[:, None] + other.charges[None, :], - len(self.charges) * len(other.charges)) - return U1Charge(charges=[fused], shifts=self.shifts) + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + obj = self.__new__(type(self)) + obj.__init__(self.unique_charges, self.charge_labels[n]) + return obj - def __sub__(self, other: "U1Charge") -> "U1Charge": + def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: """ - Subtract the charges of `other` from charges of `self` and - return a new `U1Charge` object holding the result. + Return the charge-element at position `n`. Args: - other: A `U1Charge` object. + n: An integer or `np.ndarray`. Returns: - U1Charge: The result of fusing `self` with `other`. + np.ndarray: The charges at `n`. """ - if self.num_symmetries != other.num_symmetries: - raise ValueError( - "cannot fuse charges with different number of symmetries") - - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse U1-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, U1Charge): - raise TypeError( - "can only subtract objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - fused = np.reshape(self.charges[:, None] - other.charges[None, :], - len(self.charges) * len(other.charges)) - return U1Charge(charges=[fused], shifts=self.shifts) - - def __matmul__(self, other: Union["U1Charge", "U1Charge"]) -> "U1Charge": - itemsize = np.sum(self._itemsizes + other._itemsizes) - if itemsize > 8: - raise TypeError("Number of bits required to store all charges " - "in a single int is larger than 64") - dtype = np.int16 #need at least np.int16 to store two charges - if itemsize > 2: - dtype = np.int32 - if itemsize > 4: - dtype = np.int64 - - charges = np.left_shift( - self.charges.astype(dtype), - 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) - - shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) - return U1Charge(charges=[charges], shifts=shifts) + return self.charges[n] def __mul__(self, number: Union[bool, int]) -> "U1Charge": if number not in (True, False, 0, 1, -1): @@ -492,27 +229,31 @@ def __mul__(self, number: Union[bool, int]) -> "U1Charge": number)) #outflowing charges if number in (0, False, -1): - charges = self.dtype.type(-1) * self.charges - shifts = self.shifts - return U1Charge(charges=[charges], shifts=shifts) + return U1Charge( + self.dual_charges(self.unique_charges), self.charge_labels) #inflowing charges if number in (1, True): - #Note: the returned U1Charge shares its data with self - return U1Charge(charges=[self.charges], shifts=self.shifts) - - # def __rmul__(self, number: Union[bool, int]) -> "U1Charge": - # raise - # print(number not in (True, False, 0, 1, -1)) - # if number not in (True, False, 0, 1, -1): - # raise ValueError( - # "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - # number)) - # return self.__mul__(number) + return U1Charge(self.unique_charges, self.charge_labels) @property - def dual_charges(self) -> np.ndarray: - #the dual of a U1 charge is its negative value - return self.charges * self.dtype.type(-1) + def dual(self, charges): + return self.dual_charges + + +class U1Charge(BaseCharge): + + def __init__(self, + charges: np.ndarray, + charge_labels: Optional[np.ndarray] = None) -> None: + super().__init__(charges, charge_labels) + + @staticmethod + def fuse(charge1, charge2): + return np.add.outer(charge1, charge2).ravel() + + @staticmethod + def dual_charges(charges): + return charges * charges.dtype.type(-1) class Z2Charge(BaseCharge): @@ -915,6 +656,7 @@ def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: ]) def __eq__(self, target_charges: Iterable): + raise NotImplementedError() if isinstance(target_charges, type(self)): target_charges = np.stack([c.charges for c in target_charges.charges], axis=1) @@ -992,9 +734,9 @@ def intersect(self, return tmp_unique[counts == 2] -def fuse_charges(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]] - ) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: """ Fuse all `charges` into a new charge. Charges are fused from "right to left", From 70871278105526fe608540ca1ad7fff7a323d6d8 Mon Sep 17 00:00:00 2001 From: Jayanth Chandra Date: Sun, 26 Jan 2020 23:10:49 +0530 Subject: [PATCH 180/212] Fix unsafe None checks (#449) * None checks added for constructors * Changes in None check and resolve comments --- tensornetwork/network_components.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index 0da188416..17d4549ed 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -69,7 +69,7 @@ def __init__(self, """ self.is_disabled = False - if not name: + if name is None: name = '__unnamed_node__' else: if not isinstance(name, str): @@ -554,7 +554,7 @@ def __init__(self, #always use the `Node`'s backend backend = tensor.backend tensor = tensor.tensor - if not backend: + if backend is None: backend = get_default_backend() if isinstance(backend, BaseBackend): backend_obj = backend @@ -715,7 +715,7 @@ def __init__(self, backend with a tf.Dtype=tf.floa32, `dtype` has to be `np.float32`. """ - if not backend: + if backend is None: backend = get_default_backend() backend_obj = backend_factory.get_backend(backend) @@ -924,7 +924,7 @@ def __init__(self, raise ValueError( "node2 and axis2 must either be both None or both not be None") self.is_disabled = False - if not name: + if name is None: name = '__unnamed_edge__' else: if not isinstance(name, str): From ce21161539e77cf695437ae3d13cc68136145cb5 Mon Sep 17 00:00:00 2001 From: MichaelMarien Date: Mon, 27 Jan 2020 20:24:47 +0100 Subject: [PATCH 181/212] Backend test (#448) * added test for mps switch backend * added switch backend method to MPS * added test for network operations switch backend * make sure switch_backend not only fixes tensor but also node property * added switch_backend to init * missing test for backend contextmanager * notimplemented tests for base backend * added subtraction test notimplemented * added jax backend index_update test * first missing tests for numpy * actually catched an error in numpy_backend eigs method! * more eigs tests * didnt catch an error, unexpected convention * more tests for eigsh_lancszos * added missing pytorch backend tests * added missing tf backend tests * pytype * suppress pytype Co-authored-by: Chase Roberts --- tensornetwork/backends/backend_test.py | 198 ++++++++++++++++++ .../backends/jax/jax_backend_test.py | 24 +++ .../backends/numpy/numpy_backend_test.py | 139 ++++++++++++ .../backends/pytorch/pytorch_backend_test.py | 60 +++++- .../tensorflow/tensorflow_backend_test.py | 11 + .../tests/backend_contextmanager_test.py | 13 ++ 6 files changed, 442 insertions(+), 3 deletions(-) diff --git a/tensornetwork/backends/backend_test.py b/tensornetwork/backends/backend_test.py index e1a96071a..3e30dcffe 100644 --- a/tensornetwork/backends/backend_test.py +++ b/tensornetwork/backends/backend_test.py @@ -4,6 +4,7 @@ import pytest import numpy as np from tensornetwork import connect, contract, Node +from tensornetwork.backends.base_backend import BaseBackend def clean_tensornetwork_modules(): @@ -146,3 +147,200 @@ def test_basic_network_without_backends_raises_error(): Node(np.ones((2, 2)), backend="tensorflow") with pytest.raises(ImportError): Node(np.ones((2, 2)), backend="pytorch") +[] + +def test_base_backend_name(): + backend = BaseBackend() + assert backend.name == "base backend" + + +def test_base_backend_tensordot_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.tensordot(np.ones((2, 2)), np.ones((2, 2)), axes=[[0], [0]]) + + +def test_base_backend_reshape_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.reshape(np.ones((2, 2)), (4, 1)) + + +def test_base_backend_transpose_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.transpose(np.ones((2, 2)), [0, 1]) + + +def test_base_backend_svd_decompositon_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.svd_decomposition(np.ones((2, 2)), 0) + + +def test_base_backend_qr_decompositon_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.qr_decomposition(np.ones((2, 2)), 0) + + +def test_base_backend_rq_decompositon_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.rq_decomposition(np.ones((2, 2)), 0) + + +def test_base_backend_shape_concat_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.shape_concat([np.ones((2, 2)), np.ones((2, 2))], 0) + + +def test_base_backend_shape_tensor_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.shape_tensor(np.ones((2, 2))) + + +def test_base_backend_shape_tuple_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.shape_tuple(np.ones((2, 2))) + + +def test_base_backend_shape_prod_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.shape_prod(np.ones((2, 2))) + + +def test_base_backend_sqrt_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.sqrt(np.ones((2, 2))) + + +def test_base_backend_diag_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.diag(np.ones((2, 2))) + + +def test_base_backend_convert_to_tensor_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.convert_to_tensor(np.ones((2, 2))) + + +def test_base_backend_trace_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.trace(np.ones((2, 2))) + + +def test_base_backend_outer_product_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.outer_product(np.ones((2, 2)), np.ones((2, 2))) + + +def test_base_backend_einsul_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.einsum("ii", np.ones((2, 2))) + + +def test_base_backend_norm_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.norm(np.ones((2, 2))) + + +def test_base_backend_eye_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.eye(2, dtype=np.float64) + + +def test_base_backend_ones_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.ones((2, 2), dtype=np.float64) + + +def test_base_backend_zeros_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.zeros((2, 2), dtype=np.float64) + + +def test_base_backend_randn_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.randn((2, 2)) + + +def test_base_backend_random_uniforl_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.random_uniform((2, 2)) + + +def test_base_backend_conj_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.conj(np.ones((2, 2))) + + +def test_base_backend_eigh_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.eigh(np.ones((2, 2))) + + +def test_base_backend_eigs_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.eigs(np.ones((2, 2))) + + +def test_base_backend_eigs_lanczos_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.eigsh_lanczos(np.ones((2, 2))) + + +def test_base_backend_addition_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.addition(np.ones((2, 2)), np.ones((2, 2))) + + +def test_base_backend_subtraction_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.subtraction(np.ones((2, 2)), np.ones((2, 2))) + + +def test_base_backend_multiply_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.multiply(np.ones((2, 2)), np.ones((2, 2))) + + +def test_base_backend_divide_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.divide(np.ones((2, 2)), np.ones((2, 2))) + + +def test_base_backend_index_update_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.index_update(np.ones((2, 2)), np.ones((2, 2)), np.ones((2, 2))) + + +def test_base_backend_inv_not_implemented(): + backend = BaseBackend() + with pytest.raises(NotImplementedError): + backend.inv(np.ones((2, 2))) diff --git a/tensornetwork/backends/jax/jax_backend_test.py b/tensornetwork/backends/jax/jax_backend_test.py index 330dbe224..4c2868734 100644 --- a/tensornetwork/backends/jax/jax_backend_test.py +++ b/tensornetwork/backends/jax/jax_backend_test.py @@ -271,3 +271,27 @@ def index_update(dtype): tensor = np.array(tensor) tensor[tensor > 0.1] = 0.0 np.testing.assert_allclose(tensor, out) + + +def test_base_backend_eigs_not_implemented(): + backend = jax_backend.JaxBackend() + tensor = backend.randn((4, 2, 3), dtype=np.float64) + with pytest.raises(NotImplementedError): + backend.eigs(tensor) + + +def test_base_backend_eigsh_lanczos_not_implemented(): + backend = jax_backend.JaxBackend() + tensor = backend.randn((4, 2, 3), dtype=np.float64) + with pytest.raises(NotImplementedError): + backend.eigsh_lanczos(tensor) + + +@pytest.mark.parametrize("dtype", np_dtypes) +def test_index_update(dtype): + backend = jax_backend.JaxBackend() + tensor = backend.randn((4, 2, 3), dtype=dtype, seed=10) + out = backend.index_update(tensor, tensor > 0.1, 0.0) + np_tensor = np.array(tensor) + np_tensor[np_tensor > 0.1] = 0.0 + np.testing.assert_allclose(out, np_tensor) diff --git a/tensornetwork/backends/numpy/numpy_backend_test.py b/tensornetwork/backends/numpy/numpy_backend_test.py index fe73271e8..6a8068c41 100644 --- a/tensornetwork/backends/numpy/numpy_backend_test.py +++ b/tensornetwork/backends/numpy/numpy_backend_test.py @@ -3,6 +3,7 @@ import numpy as np import pytest from tensornetwork.backends.numpy import numpy_backend +from unittest.mock import Mock np_randn_dtypes = [np.float32, np.float16, np.float64] np_dtypes = np_randn_dtypes + [np.complex64, np.complex128] @@ -310,6 +311,35 @@ def __call__(self, x): np.testing.assert_allclose(v1, v2) +@pytest.mark.parametrize("dtype", [np.float64, np.complex128]) +def test_eigsh_lanczos_reorthogonalize(dtype): + backend = numpy_backend.NumPyBackend() + D = 24 + np.random.seed(10) + tmp = backend.randn((D, D), dtype=dtype, seed=10) + H = tmp + backend.transpose(backend.conj(tmp), (1, 0)) + + class LinearOperator: + + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + def __call__(self, x): + return np.dot(H, x) + + mv = LinearOperator(shape=((D,), (D,)), dtype=dtype) + eta1, U1 = backend.eigsh_lanczos(mv, reorthogonalize=True, ndiag=1, + tol=10**(-12), delta=10**(-12)) + eta2, U2 = np.linalg.eigh(H) + v2 = U2[:, 0] + v2 = v2 / sum(v2) + v1 = np.reshape(U1[0], (D)) + v1 = v1 / sum(v1) + np.testing.assert_allclose(eta1[0], min(eta2)) + np.testing.assert_allclose(v1, v2, rtol=10**(-5), atol=10**(-5)) + + def test_eigsh_lanczos_raises(): backend = numpy_backend.NumPyBackend() with pytest.raises(AttributeError): @@ -320,6 +350,37 @@ def test_eigsh_lanczos_raises(): backend.eigsh_lanczos(lambda x: x, numeig=2, reorthogonalize=False) +def test_eigsh_lanczos_raises_error_for_incompatible_shapes(): + backend = numpy_backend.NumPyBackend() + A = backend.randn((4, 4), dtype=np.float64) + init = backend.randn((3, ), dtype=np.float64) + with pytest.raises(ValueError): + backend.eigsh_lanczos(A, initial_state=init) + + +def test_eigsh_lanczos_raises_error_for_untyped_A(): + backend = numpy_backend.NumPyBackend() + A = Mock(spec=[]) + A.shape = Mock(return_value=(2, 2)) + err_msg = "`A` has no attribute `dtype`. Cannot initialize lanczos. " \ + "Please provide a valid `initial_state` with a `dtype` attribute" + with pytest.raises(AttributeError, match=err_msg): + backend.eigsh_lanczos(A) + + +def test_eigsh_lanczos_raises_error_for_bad_initial_state(): + backend = numpy_backend.NumPyBackend() + D = 16 + init = [1]*D + M = backend.randn((D, D), dtype=np.float64) + + def mv(x): + return np.dot(M, x) + + with pytest.raises(TypeError): + backend.eigsh_lanczos(mv, initial_state=init) + + @pytest.mark.parametrize("a, b, expected", [ pytest.param(1, 1, 2), pytest.param(1., np.ones((1, 2, 3)), 2*np.ones((1, 2, 3))), @@ -430,6 +491,84 @@ def mv(x): np.testing.assert_allclose(v1, v2) +@pytest.mark.parametrize("dtype", [np.float64, np.complex128]) +@pytest.mark.parametrize("which", ['LM', 'LR', 'SM', 'SR']) +def test_eigs_no_init(dtype, which): + backend = numpy_backend.NumPyBackend() + D = 16 + np.random.seed(10) + H = backend.randn((D, D), dtype=dtype, seed=10) + + class LinearOperator: + + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + def __call__(self, x): + return np.dot(H, x) + + mv = LinearOperator(shape=((D,), (D,)), dtype=dtype) + eta1, U1 = backend.eigs(mv, numeig=1, which=which) + eta2, U2 = np.linalg.eig(H) + val, index = find(which, eta2) + v2 = U2[:, index] + v2 = v2 / sum(v2) + v1 = np.reshape(U1[0], (D)) + v1 = v1 / sum(v1) + np.testing.assert_allclose(find(which, eta1)[0], val) + np.testing.assert_allclose(v1, v2) + + +@pytest.mark.parametrize("which", ['SI', 'LI']) +def test_eigs_raises_error_for_unsupported_which(which): + backend = numpy_backend.NumPyBackend() + A = backend.randn((4, 4), dtype=np.float64) + with pytest.raises(ValueError): + backend.eigs(A=A, which=which) + + +def test_eigs_raises_error_for_incompatible_shapes(): + backend = numpy_backend.NumPyBackend() + A = backend.randn((4, 4), dtype=np.float64) + init = backend.randn((3, ), dtype=np.float64) + with pytest.raises(ValueError): + backend.eigs(A, initial_state=init) + + +def test_eigs_raises_error_for_unshaped_A(): + backend = numpy_backend.NumPyBackend() + A = Mock(spec=[]) + print(hasattr(A, "shape")) + err_msg = "`A` has no attribute `shape`. Cannot initialize lanczos. " \ + "Please provide a valid `initial_state`" + with pytest.raises(AttributeError, match=err_msg): + backend.eigs(A) + + +def test_eigs_raises_error_for_untyped_A(): + backend = numpy_backend.NumPyBackend() + A = Mock(spec=[]) + A.shape = Mock(return_value=(2, 2)) + err_msg = "`A` has no attribute `dtype`. Cannot initialize lanczos. " \ + "Please provide a valid `initial_state` with a `dtype` attribute" + with pytest.raises(AttributeError, match=err_msg): + backend.eigs(A) + + +def test_eigs_raises_error_for_bad_initial_state(): + backend = numpy_backend.NumPyBackend() + D = 16 + init = [1]*D + M = backend.randn((D, D), dtype=np.float64) + + def mv(x): + return np.dot(M, x) + + with pytest.raises(TypeError): + backend.eigs(mv, initial_state=init) + + @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) def test_eigh(dtype): backend = numpy_backend.NumPyBackend() diff --git a/tensornetwork/backends/pytorch/pytorch_backend_test.py b/tensornetwork/backends/pytorch/pytorch_backend_test.py index e25542ac8..f03dc26a3 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend_test.py +++ b/tensornetwork/backends/pytorch/pytorch_backend_test.py @@ -3,6 +3,7 @@ from tensornetwork.backends.pytorch import pytorch_backend import torch import pytest +from unittest.mock import Mock torch_dtypes = [torch.float32, torch.float64, torch.int32] torch_eye_dtypes = [torch.float32, torch.float64, torch.int32, torch.int64] @@ -237,7 +238,7 @@ def test_conj(): def test_eigsh_lanczos_1(): dtype = torch.float64 backend = pytorch_backend.PyTorchBackend() - D = 16 + D = 24 init = backend.randn((D,), dtype=dtype) tmp = backend.randn((D, D), dtype=dtype) H = tmp + backend.transpose(backend.conj(tmp), (1, 0)) @@ -255,10 +256,11 @@ def mv(x): np.testing.assert_allclose(v1, v2) -def test_eigsh_lanczos_2(): +def test_eigsh_lanczos_reorthogonalize(): dtype = torch.float64 backend = pytorch_backend.PyTorchBackend() D = 16 + init = backend.randn((D,), dtype=dtype) tmp = backend.randn((D, D), dtype=dtype) H = tmp + backend.transpose(backend.conj(tmp), (1, 0)) @@ -272,7 +274,7 @@ def __call__(self, x): return H.mv(x) mv = LinearOperator(shape=((D,), (D,)), dtype=dtype) - eta1, U1 = backend.eigsh_lanczos(mv) + eta1, U1 = backend.eigsh_lanczos(mv, init) eta2, U2 = H.symeig() v2 = U2[:, 0] v2 = v2 / sum(v2) @@ -282,6 +284,34 @@ def __call__(self, x): np.testing.assert_allclose(v1, v2) +def test_eigsh_lanczos_2(): + dtype = torch.float64 + backend = pytorch_backend.PyTorchBackend() + D = 16 + tmp = backend.randn((D, D), dtype=dtype) + H = tmp + backend.transpose(backend.conj(tmp), (1, 0)) + + class LinearOperator: + + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + def __call__(self, x): + return H.mv(x) + + mv = LinearOperator(shape=((D,), (D,)), dtype=dtype) + eta1, U1 = backend.eigsh_lanczos(mv, reorthogonalize=True, ndiag=1, + tol=10**(-12), delta=10**(-12)) + eta2, U2 = H.symeig() + v2 = U2[:, 0] + v2 = v2 / sum(v2) + v1 = np.reshape(U1[0], (D)) + v1 = v1 / sum(v1) + np.testing.assert_allclose(eta1[0], min(eta2)) + np.testing.assert_allclose(v1, v2, rtol=10**(-5), atol=10**(-5)) + + def test_eigsh_lanczos_raises(): backend = pytorch_backend.PyTorchBackend() with pytest.raises(AttributeError): @@ -388,3 +418,27 @@ def test_matrix_inv_raises(dtype): matrix = backend.randn((4, 4, 4), dtype=dtype, seed=10) with pytest.raises(ValueError): backend.inv(matrix) + + +def test_eigs_not_implemented(): + backend = pytorch_backend.PyTorchBackend() + with pytest.raises(NotImplementedError): + backend.eigs(np.ones((2, 2))) + + +def test_eigsh_lanczos_raises_error_for_incompatible_shapes(): + backend = pytorch_backend.PyTorchBackend() + A = backend.randn((4, 4), dtype=torch.float64) + init = backend.randn((3, ), dtype=torch.float64) + with pytest.raises(ValueError): + backend.eigsh_lanczos(A, initial_state=init) + + +def test_eigsh_lanczos_raises_error_for_untyped_A(): + backend = pytorch_backend.PyTorchBackend() + A = Mock(spec=[]) + A.shape = Mock(return_value=(2, 2)) + err_msg = "`A` has no attribute `dtype`. Cannot initialize lanczos. " \ + "Please provide a valid `initial_state` with a `dtype` attribute" + with pytest.raises(AttributeError, match=err_msg): + backend.eigsh_lanczos(A) diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py index 8073a61e2..4916301ce 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py @@ -342,3 +342,14 @@ def test_matrix_inv_raises(dtype): matrix = backend.randn((4, 4, 4), dtype=dtype, seed=10) with pytest.raises(ValueError): backend.inv(matrix) + +def test_eigs_not_implemented(): + backend = tensorflow_backend.TensorFlowBackend() + with pytest.raises(NotImplementedError): + backend.eigs(np.ones((2, 2))) + + +def test_eigsh_lanczos_not_implemented(): + backend = tensorflow_backend.TensorFlowBackend() + with pytest.raises(NotImplementedError): + backend.eigsh_lanczos(np.ones((2, 2))) diff --git a/tensornetwork/tests/backend_contextmanager_test.py b/tensornetwork/tests/backend_contextmanager_test.py index 60f6e833b..644c055d0 100644 --- a/tensornetwork/tests/backend_contextmanager_test.py +++ b/tensornetwork/tests/backend_contextmanager_test.py @@ -3,23 +3,27 @@ import pytest import numpy as np + def test_contextmanager_simple(): with tn.DefaultBackend("tensorflow"): a = tn.Node(np.ones((10,))) b = tn.Node(np.ones((10,))) assert a.backend.name == b.backend.name + def test_contextmanager_default_backend(): tn.set_default_backend("pytorch") with tn.DefaultBackend("numpy"): assert _default_backend_stack.default_backend == "pytorch" + def test_contextmanager_interruption(): tn.set_default_backend("pytorch") with pytest.raises(AssertionError): with tn.DefaultBackend("numpy"): tn.set_default_backend("tensorflow") + def test_contextmanager_nested(): with tn.DefaultBackend("tensorflow"): a = tn.Node(np.ones((10,))) @@ -32,15 +36,24 @@ def test_contextmanager_nested(): d = tn.Node(np.ones((10,))) assert d.backend.name == "numpy" + def test_contextmanager_wrong_item(): a = tn.Node(np.ones((10,))) with pytest.raises(ValueError): with tn.DefaultBackend(a): # pytype: disable=wrong-arg-types pass + def test_contextmanager_BaseBackend(): tn.set_default_backend("pytorch") a = tn.Node(np.ones((10,))) with tn.DefaultBackend(a.backend): b = tn.Node(np.ones((10,))) assert b.backend.name == "pytorch" + + +def test_set_default_backend_value_error(): + tn.set_default_backend("pytorch") + with pytest.raises(ValueError, match="Item passed to set_default_backend " + "must be Text or BaseBackend"): + tn.set_default_backend(-1) # pytype: disable=wrong-arg-types From 1dd23c91da0d059ec892470c38ca9c31935ef49a Mon Sep 17 00:00:00 2001 From: Chase Roberts Date: Tue, 28 Jan 2020 09:31:49 -0800 Subject: [PATCH 182/212] Version bump for release --- tensornetwork/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/version.py b/tensornetwork/version.py index fb3fd385e..45642f983 100644 --- a/tensornetwork/version.py +++ b/tensornetwork/version.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = '0.2.0' +__version__ = '0.2.1' From f18f70f7b8869aef5d157965ad9bff8c2129a642 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 13:58:11 -0500 Subject: [PATCH 183/212] merging Glen's and my code --- tensornetwork/block_tensor/block_tensor.py | 1485 +++++++++++--------- tensornetwork/block_tensor/charge.py | 1308 +++++++++-------- tensornetwork/block_tensor/index.py | 15 +- 3 files changed, 1556 insertions(+), 1252 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 7f5a50f18..1ad33b5b2 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -21,7 +21,7 @@ # pylint: disable=line-too-long from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index # pylint: disable=line-too-long -from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, ChargeCollection +from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, BaseCharge, fuse_ndarray_charges, intersect import numpy as np import scipy as sp import itertools @@ -30,10 +30,31 @@ Tensor = Any -def compute_sparse_lookup( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: Iterable[Union[bool, int]], - target_charges: Union[BaseCharge, ChargeCollection]) -> np.ndarray: +def combine_index_strides(index_dims: np.ndarray, + strides: np.ndarray) -> np.ndarray: + """ + Combine multiple indices of some dimensions and strides into a single index, + based on row-major order. Used when transposing SymTensors. + Args: + index_dims (np.ndarray): list of dim of each index. + strides (np.ndarray): list of strides of each index. + Returns: + np.ndarray: strides of combined index. + """ + num_ind = len(index_dims) + comb_ind_locs = np.arange( + 0, strides[0] * index_dims[0], strides[0], dtype=np.uint32) + for n in range(1, num_ind): + comb_ind_locs = np.add.outer( + comb_ind_locs, + np.arange(0, strides[n] * index_dims[n], strides[n], + dtype=np.uint32)).ravel() + + return comb_ind_locs + + +def compute_sparse_lookup(charges: List[BaseCharge], flows: Iterable[bool], + target_charges: BaseCharge) -> np.ndarray: """ Compute lookup table for looking up how dense index positions map to sparse index positions for the diagonal blocks a symmetric matrix. @@ -98,9 +119,7 @@ def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: def _check_flows(flows: List[int]) -> None: - if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): - raise ValueError( - "flows = {} contains values different from 1 and -1".format(flows)) + return def _find_best_partition(dims: Iterable[int]) -> int: @@ -114,10 +133,6 @@ def _find_best_partition(dims: Iterable[int]) -> int: np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) for n in range(1, len(dims)) ] - - # diffs = [ - # np.abs(np.prod(dims[:n]) - np.prod(dims[n:])) for n in range(1, dims) - # ] min_inds = np.nonzero(diffs == np.min(diffs))[0] if len(min_inds) > 1: right_dims = [np.prod(dims[min_ind + 1:]) for min_ind in min_inds] @@ -128,9 +143,8 @@ def _find_best_partition(dims: Iterable[int]) -> int: def compute_fused_charge_degeneracies( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]] -) -> Tuple[Union[BaseCharge, ChargeCollection], np.ndarray]: + charges: List[BaseCharge], + flows: List[bool]) -> Tuple[BaseCharge, np.ndarray]: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`, together with their respective degeneracies @@ -144,7 +158,7 @@ def compute_fused_charge_degeneracies( of the charges on each leg. `1` is inflowing, `-1` is outflowing charge. Returns: - Union[BaseCharge, ChargeCollection]: The unique fused charges. + Union[BaseCharge, BaseCharge]: The unique fused charges. np.ndarray of integers: The degeneracies of each unqiue fused charge. """ if len(charges) == 1: @@ -164,16 +178,15 @@ def compute_fused_charge_degeneracies( len(accumulated_charges), dtype=np.uint32) for n in range(len(accumulated_charges)): - accumulated_degeneracies[n] = np.sum( - fused_degeneracies[fused_charges == accumulated_charges[n]]) + accumulated_degeneracies[n] = np.sum(fused_degeneracies[ + fused_charges.charge_labels == accumulated_charges.charge_labels[n]]) return accumulated_charges, accumulated_degeneracies def compute_unique_fused_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]] -) -> Tuple[Union[BaseCharge, ChargeCollection], np.ndarray]: + charges: List[BaseCharge], + flows: List[Union[bool, int]]) -> Tuple[BaseCharge, np.ndarray]: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`. @@ -201,8 +214,7 @@ def compute_unique_fused_charges( return accumulated_charges -def compute_num_nonzero(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> int: +def compute_num_nonzero(charges: List[BaseCharge], flows: List[bool]) -> int: """ Compute the number of non-zero elements, given the meta-data of a symmetric tensor. @@ -220,375 +232,233 @@ def compute_num_nonzero(charges: List[np.ndarray], """ accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( charges, flows) - res = accumulated_charges == accumulated_charges.zero_charge - - if len(np.nonzero(res)[0]) == 0: + res = accumulated_charges == accumulated_charges.identity_charges + nz_inds = np.nonzero(res)[0] + if len(nz_inds) == 0: raise ValueError( "given leg-charges `charges` and flows `flows` are incompatible " "with a symmetric tensor") - return np.squeeze(accumulated_degeneracies[res][0]) + return np.squeeze(accumulated_degeneracies[nz_inds][0]) def _find_diagonal_sparse_blocks( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]], - partition: int) -> Tuple[Union[BaseCharge, ChargeCollection], List]: + charges: List[BaseCharge], flows: np.ndarray, + partition: int) -> (np.ndarray, np.ndarray, np.ndarray): """ - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. - - Args: - charges: A list of charges. - flows: A list of flows. - partition: The location of the partition of `charges` into rows and colums. + Find the location of all non-trivial symmetry blocks from the data vector of + of SymTensor (when viewed as a matrix across some prescribed index + bi-partition). + Args: + charges (List[SymIndex]): list of SymIndex. + flows (np.ndarray): vector of bools describing index orientations. + partition_loc (int): location of tensor partition (i.e. such that the + tensor is viewed as a matrix between first partition_loc charges and + the remaining charges). Returns: - return common_charges, blocks, start_positions, row_locations, column_degeneracies - List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. - List[np.ndarray]: A list containing the blocks. + block_maps (List[np.ndarray]): list of integer arrays, which each + containing the location of a symmetry block in the data vector. + block_qnums (np.ndarray): n-by-m array describing qauntum numbers of each + block, with 'n' the number of symmetries and 'm' the number of blocks. + block_dims (np.ndarray): 2-by-m array describing the dims each block, + with 'm' the number of blocks). """ - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - row_charges = charges[:partition] - row_flows = flows[:partition] - column_charges = charges[partition:] - column_flows = flows[partition:] - - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - #`compute_fused_charge_degeneracies` multiplies flows into the column_charges - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - column_charges, column_flows) - unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) - #get the charges common to rows and columns (only those matter) - common_charges, label_to_row, label_to_column = unique_row_charges.intersect( - unique_column_charges * (-1), return_indices=True) - - #convenience container for storing the degeneracies of each - #column charge - #column_degeneracies = dict(zip(unique_column_charges, column_dims)) - column_degeneracies = dict(zip(unique_column_charges * (-1), column_dims)) - - row_locations = find_sparse_positions( - charges=row_charges, flows=row_flows, target_charges=common_charges) + num_inds = len(charges) + num_syms = charges[0].num_symmetries - degeneracy_vector = np.empty( - np.sum([len(v) for v in row_locations.values()]), dtype=np.uint32) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - for c in common_charges: - degeneracy_vector[row_locations[c]] = column_degeneracies[c] - - start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( - np.uint32) - blocks = [] - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - rlocs = row_locations[c] - rlocs.sort() #sort in place (we need it again later) - cdegs = column_degeneracies[c] - inds = (np.add.outer(start_positions[rlocs], np.arange(cdegs))).ravel() - blocks.append([inds, (len(rlocs), cdegs)]) - return common_charges, blocks - - -def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection], - strides: Optional[np.ndarray] = None, - store_dual: Optional[bool] = False) -> Dict: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector of `fused_charges` resulting from fusing all elements of `charges` - that have a value of `target_charge`. - For example, given - ``` - charges = [[-2,0,1,0,0],[-1,0,2,1]] - target_charge = 0 - fused_charges = fuse_charges(charges,[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the index-positions of charges - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - within the dense array. As one additional wrinkle, `charges` - is a subset of the permuted charges of a tensor with rank R > len(charges), - and `stride_arrays` are their corresponding range of strides, i.e. - - ``` - R=5 - D = [2,3,4,5,6] - tensor_flows = np.random.randint(-1,2,R) - tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] - order = np.arange(R) - np.random.shuffle(order) - tensor_strides = [360, 120, 30, 6, 1] - - charges = [tensor_charges[order[n]] for n in range(3)] - flows = [tensor_flows[order[n]] for n in range(len(3))] - strides = [tensor_stride[order[n]] for n in range(3)] - _ = _find_transposed_dense_positions(charges, flows, 0, strides) - - ``` - `_find_transposed_dense_blocks` returns an np.ndarray containing the - index-positions of these elements calculated using `stride_arrays`. - The result only makes sense in conjuction with the complementary - data computed from the complementary - elements in`tensor_charges`, - `tensor_strides` and `tensor_flows`. - This routine is mainly used in `_find_diagonal_dense_blocks`. + if (partition == 0) or (partition == num_inds): + # special cases (matrix of trivial height or width) + num_nonzero = compute_num_nonzero(charges, flows) + block_maps = [np.arange(0, num_nonzero, dtype=np.uint64).ravel()] + block_qnums = np.zeros([num_syms, 1], dtype=np.int16) + block_dims = np.array([[1], [num_nonzero]]) - Args: - charges: A list of BaseCharge or ChargeCollection. - flows: The flow directions of the `charges`. - target_charge: The target charge. - strides: The strides for the `charges` subset. - if `None`, natural stride ordering is assumed. + if partition == len(flows): + block_dims = np.flipud(block_dims) - Returns: - dict - """ + return block_maps, block_qnums, block_dims - _check_flows(flows) - out = {} - if store_dual: - store_charges = target_charges * (-1) else: - store_charges = target_charges + unique_row_qnums, row_degen = compute_fused_charge_degeneracies( + charges[:partition], flows[:partition]) + + unique_col_qnums, col_degen = compute_fused_charge_degeneracies( + charges[partition:], np.logical_not(flows[partition:])) + + block_qnums, row_to_block, col_to_block = intersect( + unique_row_qnums.unique_charges, + unique_col_qnums.unique_charges, + axis=1, + return_indices=True) + num_blocks = block_qnums.shape[1] + if num_blocks == 0: + obj = charges[0].__new__(type(charges[0])) + obj.__init__( + np.zeros(0, dtype=np.int16), np.arange(0, dtype=np.int16), + charges[0].charge_types) + + return [], obj, [] - if len(charges) == 1: - fused_charges = charges[0] * flows[0] - inds = np.nonzero(fused_charges == target_charges) - if len(target_charges) > 1: - for n in range(len(target_charges)): - i = inds[0][inds[1] == n] - if len(i) == 0: - continue - if strides is not None: - permuted_inds = strides[0] * np.arange(len(charges[0])) - out[store_charges.get_item(n)] = permuted_inds[i] - else: - out[store_charges.get_item(n)] = i - return out else: - if strides is not None: - permuted_inds = strides[0] * np.arange(len(charges[0])) - out[store_charges.get_item(n)] = permuted_inds[inds[0]] + # calculate number of non-zero elements in each row of the matrix + row_ind = reduce_charges(charges[:partition], flows[:partition], + block_qnums) + row_num_nz = col_degen[col_to_block[row_ind.charge_labels]] + cumulate_num_nz = np.insert(np.cumsum(row_num_nz[0:-1]), 0, + 0).astype(np.uint32) + + # calculate mappings for the position in datavector of each block + if num_blocks < 15: + # faster method for small number of blocks + row_locs = np.concatenate( + [(row_ind.charge_labels == n) for n in range(num_blocks)]).reshape( + num_blocks, row_ind.dim) else: - out[store_charges.get_item(n)] = inds[0] - return out + # faster method for large number of blocks + row_locs = np.zeros([num_blocks, row_ind.dim], dtype=bool) + row_locs[row_ind + .charge_labels, np.arange(row_ind.dim)] = np.ones( + row_ind.dim, dtype=bool) + + # block_dims = np.array([row_degen[row_to_block],col_degen[col_to_block]], dtype=np.uint32) + block_dims = np.array( + [[row_degen[row_to_block[n]], col_degen[col_to_block[n]]] + for n in range(num_blocks)], + dtype=np.uint32).T + block_maps = [(cumulate_num_nz[row_locs[n, :]][:, None] + np.arange( + block_dims[1, n])[None, :]).ravel() for n in range(num_blocks)] + obj = charges[0].__new__(type(charges[0])) + obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), + charges[0].charge_types) + + return block_maps, obj, block_dims - partition = _find_best_partition([len(c) for c in charges]) - left_charges = fuse_charges(charges[:partition], flows[:partition]) - right_charges = fuse_charges(charges[partition:], flows[partition:]) - if strides is not None: - stride_arrays = [ - np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) - ] - permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) - permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) - # unique_target_charges, inds = target_charges.unique(return_index=True) - # target_charges = target_charges[np.sort(inds)] - unique_left, left_inverse = left_charges.unique(return_inverse=True) - unique_right, right_inverse = right_charges.unique(return_inverse=True) - - fused_unique = unique_left + unique_right - unique_inds = np.nonzero(fused_unique == target_charges) - - relevant_positions = unique_inds[0] - tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, - len(unique_right)) - - relevant_unique_left_inds = np.unique(tmp_inds_left) - left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) - left_lookup[relevant_unique_left_inds] = np.arange( - len(relevant_unique_left_inds)) - relevant_unique_right_inds = np.unique(tmp_inds_right) - right_lookup = np.empty( - np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) - right_lookup[relevant_unique_right_inds] = np.arange( - len(relevant_unique_right_inds)) - - left_charge_labels = np.nonzero( - np.expand_dims(left_inverse, 1) == np.expand_dims( - relevant_unique_left_inds, 0)) - right_charge_labels = np.nonzero( - np.expand_dims(right_inverse, 1) == np.expand_dims( - relevant_unique_right_inds, 0)) - - len_right = len(right_charges) - - for n in range(len(target_charges)): - if len(unique_inds) > 1: - lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], - len(unique_right)) - else: - lis, ris = np.divmod(unique_inds[0], len(unique_right)) - dense_positions = [] - left_positions = [] - lookup = [] - for m in range(len(lis)): - li = lis[m] - ri = ris[m] - dense_left_positions = (left_charge_labels[0][ - left_charge_labels[1] == left_lookup[li]]).astype(np.uint32) - dense_right_positions = (right_charge_labels[0][ - right_charge_labels[1] == right_lookup[ri]]).astype(np.uint32) - if strides is None: - positions = np.expand_dims(dense_left_positions * len_right, - 1) + np.expand_dims(dense_right_positions, 0) - else: - positions = np.expand_dims( - permuted_left_inds[dense_left_positions], 1) + np.expand_dims( - permuted_right_inds[dense_right_positions], 0) - - dense_positions.append(positions) - left_positions.append(dense_left_positions) - lookup.append( - np.stack([ - np.arange(len(dense_left_positions), dtype=np.uint32), - np.full(len(dense_left_positions), fill_value=m, dtype=np.uint32) - ], - axis=1)) - - if len(lookup) > 0: - ind_sort = np.argsort(np.concatenate(left_positions)) - it = np.concatenate(lookup, axis=0) - table = it[ind_sort, :] - out[store_charges.get_item(n)] = np.concatenate([ - dense_positions[table[n, 1]][table[n, 0], :].astype(np.uint32) - for n in range(table.shape[0]) - ]) - else: - out[store_charges.get_item(n)] = np.array([]) - - return out - - -def find_sparse_positions( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: +def _find_transposed_diagonal_sparse_blocks( + charges: List[BaseCharge], + flows: np.ndarray, + tr_partition: int, + order: np.ndarray = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ - Find the sparse locations of elements (i.e. the index-values within - the SPARSE tensor) in the vector `fused_charges` (resulting from - fusing `left_charges` and `right_charges`) - that have a value of `target_charges`, assuming that all elements - different from `target_charges` are `0`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charges = [0,1] - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` 0 1 2 3 4 5 6 7 8 - we want to find the all different blocks - that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, - together with their corresponding sparse index-values of the data in the sparse array, - assuming that all elements in `fused_charges` different from `target_charges` are 0. - - `find_sparse_blocks` returns a dict mapping integers `target_charge` - to an array of integers denoting the sparse locations of elements within - `fused_charges`. - For the above example, we get: - * `target_charge=0`: [0,1,3,5,7] - * `target_charge=1`: [2,4,6,8] + Find the location of all non-trivial symmetry blocks from the data vector of + of SymTensor after transposition (and then viewed as a matrix across some + prescribed index bi-tr_partition). Produces and equivalent result to + retrieve_blocks acting on a transposed SymTensor, but is much faster. Args: - charges: An np.ndarray of integer charges. - flows: The flow direction of the left charges. - target_charges: The target charges. + charges (List[SymIndex]): list of SymIndex. + flows (np.ndarray): vector of bools describing index orientations. + tr_partition (int): location of tensor partition (i.e. such that the + tensor is viewed as a matrix between first partition charges and + the remaining charges). + order (np.ndarray): order with which to permute the tensor axes. Returns: - dict: Mapping integers to np.ndarray of integers. + block_maps (List[np.ndarray]): list of integer arrays, which each + containing the location of a symmetry block in the data vector. + block_qnums (np.ndarray): n-by-m array describing qauntum numbers of each + block, with 'n' the number of symmetries and 'm' the number of blocks. + block_dims (np.ndarray): 2-by-m array describing the dims each block, + with 'm' the number of blocks). """ - _check_flows(flows) - if len(charges) == 1: - fused_charges = charges[0] * flows[0] - unique_charges = fused_charges.unique() - target_charges = target_charges.unique() - relevant_target_charges = unique_charges.intersect(target_charges) - relevant_fused_charges = fused_charges[fused_charges.isin( - relevant_target_charges)] - return { - c: np.nonzero(relevant_fused_charges == c)[0] - for c in relevant_target_charges - } - partition = _find_best_partition([len(c) for c in charges]) - left_charges = fuse_charges(charges[:partition], flows[:partition]) - right_charges = fuse_charges(charges[partition:], flows[partition:]) - - # unique_target_charges, inds = target_charges.unique(return_index=True) - # target_charges = target_charges[np.sort(inds)] - unique_left, left_inverse = left_charges.unique(return_inverse=True) - unique_right, right_inverse, right_dims = right_charges.unique( - return_inverse=True, return_counts=True) - - fused_unique = unique_left + unique_right - unique_inds = np.nonzero(fused_unique == target_charges) - relevant_positions = unique_inds[0].astype(np.uint32) - tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, - len(unique_right)) - - relevant_unique_left_inds = np.unique(tmp_inds_left) - left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) - left_lookup[relevant_unique_left_inds] = np.arange( - len(relevant_unique_left_inds)) - relevant_unique_right_inds = np.unique(tmp_inds_right) - right_lookup = np.empty( - np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) - right_lookup[relevant_unique_right_inds] = np.arange( - len(relevant_unique_right_inds)) - - left_charge_labels = np.nonzero( - np.expand_dims(left_inverse, 1) == np.expand_dims( - relevant_unique_left_inds, 0)) - relevant_left_inverse = np.arange(len(left_charge_labels[0])) - - right_charge_labels = np.expand_dims(right_inverse, 1) == np.expand_dims( - relevant_unique_right_inds, 0) - right_block_information = {} - for n in relevant_unique_left_inds: - ri = np.nonzero((unique_left[n] + unique_right).isin(target_charges))[0] - tmp_inds = np.nonzero(right_charge_labels[:, right_lookup[ri]]) - right_block_information[n] = [ri, np.arange(len(tmp_inds[0])), tmp_inds[1]] - - relevant_right_inverse = np.arange(len(right_charge_labels[0])) - - #generate a degeneracy vector which for each value r in relevant_right_charges - #holds the corresponding number of non-zero elements `relevant_right_charges` - #that can add up to `target_charges`. - degeneracy_vector = np.empty(len(left_charge_labels[0]), dtype=np.uint32) - for n in range(len(relevant_unique_left_inds)): - degeneracy_vector[relevant_left_inverse[ - left_charge_labels[1] == n]] = np.sum(right_dims[tmp_inds_right[ - tmp_inds_left == relevant_unique_left_inds[n]]]) - - start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( - np.uint32) - out = {} - for n in range(len(target_charges)): - block = [] - if len(unique_inds) > 1: - lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], - len(unique_right)) - else: - lis, ris = np.divmod(unique_inds[0], len(unique_right)) + flows = np.asarray(flows) + if np.array_equal(order, None) or (np.array_equal( + np.array(order), np.arange(len(charges)))): + # no transpose order + return _find_diagonal_sparse_blocks(charges, flows, tr_partition) - for m in range(len(lis)): - ri_tmp, arange, tmp_inds = right_block_information[lis[m]] - block.append( - np.add.outer( - start_positions[relevant_left_inverse[left_charge_labels[1] == - left_lookup[lis[m]]]], - arange[tmp_inds == np.nonzero( - ri_tmp == ris[m])[0]]).ravel().astype(np.uint32)) - out[target_charges.get_item(n)] = np.concatenate(block) - return out + else: + # non-trivial transposition is required + num_inds = len(charges) + tensor_dims = np.array([charges[n].dim for n in range(num_inds)], dtype=int) + strides = np.append(np.flip(np.cumprod(np.flip(tensor_dims[1:]))), 1) + + # define properties of new tensor resulting from transposition + new_strides = strides[order] + new_row_charges = [charges[n] for n in order[:tr_partition]] + new_col_charges = [charges[n] for n in order[tr_partition:]] + new_row_flows = flows[order[:tr_partition]] + new_col_flows = flows[order[tr_partition:]] + + # compute qnums of row/cols in transposed tensor + unique_row_qnums, new_row_degen = compute_fused_charge_degeneracies( + new_row_charges, new_row_flows) + + # unique_row_qnums, new_row_degen = compute_qnum_degen( + # new_row_charges, new_row_flows) + unique_col_qnums, new_col_degen = compute_fused_charge_degeneracies( + new_col_charges, np.logical_not(new_col_flows)) + # unique_col_qnums, new_col_degen = compute_qnum_degen( + # new_col_charges, np.logical_not(new_col_flows)) + block_qnums, new_row_map, new_col_map = intersect( + unique_row_qnums.unique_charges, + unique_col_qnums.unique_charges, + axis=1, + return_indices=True) + block_dims = np.array( + [new_row_degen[new_row_map], new_col_degen[new_col_map]], + dtype=np.uint32) + num_blocks = len(new_row_map) + row_ind, row_locs = reduce_charges( + new_row_charges, + new_row_flows, + block_qnums, + return_locations=True, + strides=new_strides[:tr_partition]) + + col_ind, col_locs = reduce_charges( + new_col_charges, + np.logical_not(new_col_flows), + block_qnums, + return_locations=True, + strides=new_strides[tr_partition:]) + # compute qnums of row/cols in original tensor + orig_partition = _find_best_partition(tensor_dims) + orig_width = np.prod(tensor_dims[orig_partition:]) + + orig_unique_row_qnums = compute_unique_fused_charges( + charges[:orig_partition], flows[:orig_partition]) + orig_unique_col_qnums, orig_col_degen = compute_fused_charge_degeneracies( + charges[orig_partition:], np.logical_not(flows[orig_partition:])) + orig_block_qnums, row_map, col_map = intersect( + orig_unique_row_qnums.unique_charges, + orig_unique_col_qnums.unique_charges, + axis=1, + return_indices=True) + orig_num_blocks = orig_block_qnums.shape[1] + + orig_row_ind = fuse_charges(charges[:orig_partition], + flows[:orig_partition]) + orig_col_ind = fuse_charges(charges[orig_partition:], + np.logical_not(flows[orig_partition:])) + + # compute row degens (i.e. number of non-zero elements per row) + inv_row_map = -np.ones( + orig_unique_row_qnums.unique_charges.shape[1], dtype=np.int16) + for n in range(len(row_map)): + inv_row_map[row_map[n]] = n + + all_degens = np.append(orig_col_degen[col_map], + 0)[inv_row_map[orig_row_ind.charge_labels]] + all_cumul_degens = np.cumsum(np.insert(all_degens[:-1], 0, + 0)).astype(np.uint32) + # generate vector which translates from dense row position to sparse row position + dense_to_sparse = np.zeros(orig_width, dtype=np.uint32) + for n in range(orig_num_blocks): + dense_to_sparse[orig_col_ind.charge_labels == col_map[n]] = np.arange( + orig_col_degen[col_map[n]], dtype=np.uint32) + + block_maps = [0] * num_blocks + for n in range(num_blocks): + orig_row_posL, orig_col_posL = np.divmod( + row_locs[row_ind.charge_labels == n], orig_width) + orig_row_posR, orig_col_posR = np.divmod( + col_locs[col_ind.charge_labels == n], orig_width) + block_maps[n] = ( + all_cumul_degens[np.add.outer(orig_row_posL, orig_row_posR)] + + dense_to_sparse[np.add.outer(orig_col_posL, orig_col_posR)]).ravel() + obj = charges[0].__new__(type(charges[0])) + obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), + charges[0].charge_types) + + return block_maps, obj, block_dims class BlockSparseTensor: @@ -625,20 +495,6 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: and `flows` indices: List of `Index` objecst, one for each leg. """ - for n, i in enumerate(indices): - if i is None: - i.name = 'index_{}'.format(n) - - index_names = [ - i.name if i.name else 'index_{}'.format(n) - for n, i in enumerate(indices) - ] - unique, cnts = np.unique(index_names, return_counts=True) - if np.any(cnts > 1): - raise ValueError("Index names {} appeared multiple times. " - "Please rename indices uniquely.".format( - unique[cnts > 1])) - self.indices = indices _check_flows(self.flows) num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) @@ -658,7 +514,8 @@ def todense(self) -> np.ndarray: """ out = np.asarray(np.zeros(self.dense_shape, dtype=self.dtype).flat) charges = self.charges - out[np.nonzero(fuse_charges(charges, self.flows) == charges[0].zero_charge) + out[np.nonzero( + fuse_charges(charges, self.flows) == charges[0].identity_charges) [0]] = self.data return np.reshape(out, self.dense_shape) @@ -744,10 +601,6 @@ def init_random(): return cls(data=init_random(), indices=indices) - @property - def index_names(self): - return [i.name for i in self.indices] - @property def rank(self): return len(self.indices) @@ -806,21 +659,19 @@ def transpose( tr_partition = _find_best_partition( [len(flat_charges[n]) for n in flat_order]) - tr_charges, tr_sparse_blocks = _find_transposed_diagonal_sparse_blocks( - flat_charges, flat_flows, flat_order, tr_partition) + tr_sparse_blocks, tr_charges, tr_shapes = _find_transposed_diagonal_sparse_blocks( + flat_charges, flat_flows, tr_partition, flat_order) - charges, sparse_blocks = _find_diagonal_sparse_blocks( + sparse_blocks, charges, shapes = _find_diagonal_sparse_blocks( [flat_charges[n] for n in flat_order], [flat_flows[n] for n in flat_order], tr_partition) - data = np.empty(len(self.data), dtype=self.dtype) for n in range(len(sparse_blocks)): - c = charges.get_item(n) - sparse_block = sparse_blocks[n][0] - ind = np.nonzero(tr_charges == c)[0][0] - permutation = tr_sparse_blocks[ind][0] + sparse_block = sparse_blocks[n] + ind = np.nonzero(tr_charges == charges[n])[0][0] + permutation = tr_sparse_blocks[ind] data[sparse_block] = self.data[permutation] - self.indices = [self.indices[o] for o in order] + self.indices = [self.indices[o] for o in order] self.data = data return self @@ -984,6 +835,38 @@ def transpose(tensor: BlockSparseTensor, return result +def _compute_transposed_sparse_blocks( + indices: BlockSparseTensor, + order: Union[List[int], np.ndarray], + transposed_partition: Optional[int] = None) -> Tuple[BaseCharge, Dict, int]: + """ + Args: + indices: A symmetric tensor. + order: The new order of indices. + permutation: An np.ndarray of int for reshuffling the data, + typically the output of a prior call to `transpose`. Passing `permutation` + can greatly speed up the transposition. + return_permutation: If `True`, return the the permutation data. + Returns: + + """ + if len(order) != len(indices): + raise ValueError( + "`len(order)={}` is different form `len(indices)={}`".format( + len(order), len(indices))) + flat_indices, flat_charges, flat_flows, flat_strides, flat_order, transposed_partition = flatten_meta_data( + indices, order, transposed_partition) + if transposed_partition is None: + transposed_partition = _find_best_partition( + [len(flat_charges[n]) for n in flat_order]) + + return _find_transposed_diagonal_sparse_blocks( + flat_charges, + flat_flows, + tr_partition=transposed_partition, + order=flat_order) + + def tensordot( tensor1: BlockSparseTensor, tensor2: BlockSparseTensor, @@ -1028,8 +911,8 @@ def tensordot( raise ValueError("axes1 and axes2 have incompatible elementary" " shapes {} and {}".format(elementary_1, elementary_2)) if not np.all( - np.array([i.flow for i in elementary_1]) == - (-1) * np.array([i.flow for i in elementary_2])): + np.array([i.flow for i in elementary_1]) == np.array( + [not i.flow for i in elementary_2])): raise ValueError("axes1 and axes2 have incompatible elementary" " flows {} and {}".format( np.array([i.flow for i in elementary_1]), @@ -1052,7 +935,6 @@ def tensordot( new_order1 = free_axes1 + list(axes1) new_order2 = list(axes2) + free_axes2 - #t1 = time.time() #get the flattened indices for the output tensor left_indices = [] @@ -1063,29 +945,20 @@ def tensordot( right_indices.extend(tensor2.indices[n].get_elementary_indices()) indices = left_indices + right_indices - for n, i in enumerate(indices): - i.name = 'index_{}'.format(n) if i.name is None else i.name - index_names = [i.name for i in indices] - unique = np.unique(index_names) - #rename indices if they are not unique - if len(unique) < len(index_names): - for n, i in enumerate(indices): - i.name = 'index_{}'.format(n) - - t1 = time.time() _, flat_charges1, flat_flows1, flat_strides1, flat_order1, tr_partition1 = flatten_meta_data( tensor1.indices, new_order1, len(free_axes1)) - charges1, tr_sparse_blocks_1 = _find_transposed_diagonal_sparse_blocks( - flat_charges1, flat_flows1, flat_order1, tr_partition1) - + tr_sparse_blocks_1, charges1, shapes_1 = _find_transposed_diagonal_sparse_blocks( + flat_charges1, flat_flows1, tr_partition1, flat_order1) _, flat_charges2, flat_flows2, flat_strides2, flat_order2, tr_partition2 = flatten_meta_data( tensor2.indices, new_order2, len(axes2)) - charges2, tr_sparse_blocks_2 = _find_transposed_diagonal_sparse_blocks( - flat_charges2, flat_flows2, flat_order2, tr_partition2) - - dt1 = time.time() - t1 - print('time spent in _compute_transposition_data: {}'.format(dt1)) - common_charges = charges1.intersect(charges2) + tr_sparse_blocks_2, charges2, shapes_2 = _find_transposed_diagonal_sparse_blocks( + flat_charges2, flat_flows2, tr_partition2, flat_order2) + #common_charges = charges1.intersect(charges2) + common_charges, label_to_common_1, label_to_common_2 = intersect( + charges1.unique_charges, + charges2.unique_charges, + axis=1, + return_indices=True) #initialize the data-vector of the output with zeros; if final_order is not None: @@ -1093,60 +966,58 @@ def tensordot( #as a transposition of the final tensor final_indices = [indices[n] for n in final_order] _, reverse_order = np.unique(final_order, return_index=True) - t1 = time.time() - charges_final, sparse_blocks_final = _compute_transposed_sparse_blocks( + + flat_final_indices, flat_final_charges, flat_final_flows, flat_final_strides, flat_final_order, tr_partition = flatten_meta_data( final_indices, reverse_order, len(free_axes1)) - dt2 = time.time() - t1 - print('time spent in _compute_transposition_data: {}'.format(dt2)) - num_nonzero_elements = np.sum([len(t[0]) for t in sparse_blocks_final]) + sparse_blocks_final, charges_final, shapes_final = _find_transposed_diagonal_sparse_blocks( + flat_final_charges, flat_final_flows, tr_partition, flat_final_order) + + num_nonzero_elements = np.sum([len(v) for v in sparse_blocks_final]) data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) + label_to_common_final = intersect( + charges_final.unique_charges, + common_charges, + axis=1, + return_indices=True)[1] + + for n in range(common_charges.shape[1]): + n1 = label_to_common_1[n] + n2 = label_to_common_2[n] + nf = label_to_common_final[n] + + data[sparse_blocks_final[nf].ravel()] = ( + tensor1.data[tr_sparse_blocks_1[n1].reshape( + shapes_1[:, n1])] @ tensor2.data[tr_sparse_blocks_2[n2].reshape( + shapes_2[:, n2])]).ravel() - t1 = time.time() - for n in range(len(common_charges)): - c = common_charges.get_item(n) - permutation1 = tr_sparse_blocks_1[np.nonzero(charges1 == c)[0][0]] - permutation2 = tr_sparse_blocks_2[np.nonzero(charges1 == c)[0][0]] - permutationfinal = sparse_blocks_final[np.nonzero( - charges_final == c)[0][0]] - res = np.matmul( - np.reshape(tensor1.data[permutation1[0]], permutation1[1]), - np.reshape(tensor2.data[permutation2[0]], permutation2[1])) - data[permutationfinal[0]] = res.flat - - dt3 = time.time() - t1 - print('time spent doing matmul: {}'.format(dt3)) - - print('total: {}'.format(dt1 + dt2 + dt3)) return BlockSparseTensor(data=data, indices=final_indices) else: #Note: `cs` may contain charges that are not present in `common_charges` - t1 = time.time() charges = [i.charges for i in indices] flows = [i.flow for i in indices] - cs, sparse_blocks = _find_diagonal_sparse_blocks(charges, flows, - len(left_indices)) - print('time spent finding sparse blocks: {}'.format(time.time() - t1)) - #print('finding sparse positions', time.time() - t1) - num_nonzero_elements = np.sum([len(v[0]) for v in sparse_blocks]) + sparse_blocks, cs, shapes = _find_diagonal_sparse_blocks( + charges, flows, len(left_indices)) + num_nonzero_elements = np.sum([len(v) for v in sparse_blocks]) #Note that empty is not a viable choice here. data = np.zeros( num_nonzero_elements, dtype=np.result_type(tensor1.dtype, tensor2.dtype)) - t1 = time.time() - for n in range(len(common_charges)): - c = common_charges.get_item(n) - permutation1 = tr_sparse_blocks_1[np.nonzero(charges1 == c)[0][0]] - permutation2 = tr_sparse_blocks_2[np.nonzero(charges2 == c)[0][0]] - sparse_block = sparse_blocks[np.nonzero(cs == c)[0][0]] - b1 = np.reshape(tensor1.data[permutation1[0]], permutation1[1]) - b2 = np.reshape(tensor2.data[permutation2[0]], permutation2[1]) - res = np.matmul(b1, b2) - data[sparse_block[0]] = res.flat - #print('tensordot', time.time() - t1) - print('time spent doing matmul: {}'.format(time.time() - t1)) + + label_to_common_final = intersect( + cs.unique_charges, common_charges, axis=1, return_indices=True)[1] + + for n in range(common_charges.shape[1]): + n1 = label_to_common_1[n] + n2 = label_to_common_2[n] + nf = label_to_common_final[n] + + data[sparse_blocks[nf].ravel()] = ( + tensor1.data[tr_sparse_blocks_1[n1].reshape( + shapes_1[:, n1])] @ tensor2.data[tr_sparse_blocks_2[n2].reshape( + shapes_2[:, n2])]).ravel() return BlockSparseTensor(data=data, indices=indices) @@ -1173,41 +1044,12 @@ def flatten_meta_data(indices, order, partition): return flat_elementary_indices, flat_charges, flat_flows, flat_strides, flat_order, new_partition -def _compute_transposed_sparse_blocks( - indices: BlockSparseTensor, - order: Union[List[int], np.ndarray], - transposed_partition: Optional[int] = None -) -> Tuple[Union[BaseCharge, ChargeCollection], Dict, int]: - """ - Args: - indices: A symmetric tensor. - order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. - Returns: - - """ - if len(order) != len(indices): - raise ValueError( - "`len(order)={}` is different form `len(indices)={}`".format( - len(order), len(indices))) - flat_indices, flat_charges, flat_flows, flat_strides, flat_order, transposed_partition = flatten_meta_data( - indices, order, transposed_partition) - if transposed_partition is None: - transposed_partition = _find_best_partition( - [len(flat_charges[n]) for n in flat_order]) - - cs, blocks = _find_transposed_diagonal_sparse_blocks( - flat_charges, flat_flows, flat_order, transposed_partition) - return cs, blocks +##################################################### DEPRECATED ROUTINES ############################ -def _find_transposed_diagonal_sparse_blocks( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]], order: np.ndarray, tr_partition: int -) -> Tuple[Union[BaseCharge, ChargeCollection], List[np.ndarray]]: +def _find_transposed_diagonal_sparse_blocks_old( + charges: List[BaseCharge], flows: List[Union[bool, int]], order: np.ndarray, + tr_partition: int) -> Tuple[BaseCharge, List[np.ndarray]]: """ Given the meta data and underlying data of a symmetric matrix, compute the dense positions of all diagonal blocks and return them in a dict. @@ -1245,7 +1087,6 @@ def _find_transposed_diagonal_sparse_blocks( denoting the dense positions of the non-zero elements and `e[1]` is a tuple corresponding to the blocks' matrix shape """ - t11 = time.time() _check_flows(flows) if len(flows) != len(charges): raise ValueError("`len(flows)` is different from `len(charges) ") @@ -1269,22 +1110,23 @@ def _find_transposed_diagonal_sparse_blocks( fused = unique_tr_row_charges + unique_tr_column_charges tr_li, tr_ri = np.divmod( - np.nonzero(fused == unique_tr_column_charges.zero_charge)[0], + np.nonzero(fused == unique_tr_column_charges.identity_charges)[0], len(unique_tr_column_charges)) - t1 = time.time() - row_locations = find_dense_positions( + + row_ind, row_locations = reduce_charges( charges=tr_row_charges, flows=tr_row_flows, - target_charges=unique_tr_row_charges[tr_li], + target_charges=unique_tr_row_charges.charges[:, tr_li], + return_locations=True, strides=tr_row_strides) - column_locations = find_dense_positions( + col_ind, column_locations = reduce_charges( charges=tr_column_charges, flows=tr_column_flows, - target_charges=unique_tr_column_charges[tr_ri], - strides=tr_column_strides, - store_dual=True) - print('find_dense_positions: ', time.time() - t1) + target_charges=unique_tr_column_charges.charges[:, tr_ri], + return_locations=True, + strides=tr_column_strides) + partition = _find_best_partition([len(c) for c in charges]) fused_row_charges = fuse_charges(charges[:partition], flows[:partition]) fused_column_charges = fuse_charges(charges[partition:], flows[partition:]) @@ -1299,13 +1141,13 @@ def _find_transposed_diagonal_sparse_blocks( flows[:partition]) fused = unique_row_charges + unique_column_charges li, ri = np.divmod( - np.nonzero(fused == unique_column_charges.zero_charge)[0], + np.nonzero(fused == unique_column_charges.identity_charges)[0], len(unique_column_charges)) common_charges, label_to_row, label_to_column = unique_row_charges.intersect( - unique_column_charges * (-1), return_indices=True) - - tmp = -np.ones(len(unique_column_charges), dtype=np.int16) + unique_column_charges * True, return_indices=True) + num_blocks = len(label_to_row) + tmp = -np.ones(len(unique_row_charges), dtype=np.int16) for n in range(len(label_to_row)): tmp[label_to_row[n]] = n @@ -1320,35 +1162,27 @@ def _find_transposed_diagonal_sparse_blocks( common_charges) blocks = [] - t1 = time.time() - for c in unique_tr_row_charges[tr_li]: - rlocs = row_locations[c] - clocs = column_locations[c] + for n in range(num_blocks): + rlocs = row_locations[row_ind.charge_labels == n] + clocs = column_locations[col_ind.charge_labels == n] orig_row_posL, orig_col_posL = np.divmod(rlocs, np.uint32(column_dimension)) orig_row_posR, orig_col_posR = np.divmod(clocs, np.uint32(column_dimension)) inds = (start_positions[np.add.outer(orig_row_posL, orig_row_posR)] + column_lookup[np.add.outer(orig_col_posL, orig_col_posR)]).ravel() blocks.append([inds, (len(rlocs), len(clocs))]) - print('doing divmods and other: ', time.time() - t1) - t1 = time.time() charges_out = unique_tr_row_charges[tr_li] - print('computing charges: ', time.time() - t1) - print('total in _find_transposed_sparse_blocks: ', time.time() - t11) return charges_out, blocks -##################################################### DEPRECATED ROUTINES ############################ - - def _find_diagonal_dense_blocks( - row_charges: List[Union[BaseCharge, ChargeCollection]], - column_charges: List[Union[BaseCharge, ChargeCollection]], + row_charges: List[BaseCharge], + column_charges: List[BaseCharge], row_flows: List[Union[bool, int]], column_flows: List[Union[bool, int]], row_strides: Optional[np.ndarray] = None, column_strides: Optional[np.ndarray] = None, -) -> Tuple[Union[BaseCharge, ChargeCollection], List[np.ndarray]]: +) -> Tuple[BaseCharge, List[np.ndarray]]: """ Deprecated @@ -1407,9 +1241,8 @@ def _find_diagonal_dense_blocks( #get the charges common to rows and columns (only those matter) fused = unique_row_charges + unique_column_charges li, ri = np.divmod( - np.nonzero(fused == unique_column_charges.zero_charge)[0], + np.nonzero(fused == unique_column_charges.identity_charges)[0], len(unique_column_charges)) - #print('_find_diagonal_sparse_blocks: unique charges ', time.time() - t1) if ((row_strides is None) and (column_strides is not None)) or ((row_strides is not None) and (column_strides is None)): @@ -1578,8 +1411,8 @@ def _find_diagonal_dense_blocks( # return out -def _compute_sparse_lookups(row_charges: Union[BaseCharge, ChargeCollection], - row_flows, column_charges, column_flows): +def _compute_sparse_lookups(row_charges: BaseCharge, row_flows, column_charges, + column_flows): """ Compute lookup tables for looking up how dense index positions map to sparse index positions for the diagonal blocks a symmetric matrix. @@ -1629,129 +1462,133 @@ def _get_stride_arrays(dims): return [np.arange(dims[n]) * strides[n] for n in range(len(dims))] -# def combine_indices_reduced( -# charges: List[BaseCharge], -# flows: np.ndarray, -# target_charges: np.ndarray, -# return_locactions: Optional[bool] = False, -# strides: Optional[np.ndarray] = np.zeros(0)) -> (SymIndex, np.ndarray): -# """ -# Add quantum numbers arising from combining two or more indices into a -# single index, keeping only the quantum numbers that appear in 'kept_qnums'. -# Equilvalent to using "combine_indices" followed by "reduce", but is -# generally much more efficient. -# Args: -# indices (List[SymIndex]): list of SymIndex. -# arrows (np.ndarray): vector of bools describing index orientations. -# kept_qnums (np.ndarray): n-by-m array describing qauntum numbers of the -# qnums which should be kept with 'n' the number of symmetries. -# return_locs (bool, optional): if True then return the location of the kept -# values of the fused indices -# strides (np.ndarray, optional): index strides with which to compute the -# return_locs of the kept elements. Defaults to trivial strides (based on -# row major order) if ommitted. -# Returns: -# SymIndex: the fused index after reduction. -# np.ndarray: locations of the fused SymIndex qnums that were kept. -# """ - -# num_inds = len(charges) -# tensor_dims = [len(c) for c in charges] +def reduce_charges( + charges: List[BaseCharge], + flows: Iterable[bool], + target_charges: np.ndarray, + return_locations: Optional[bool] = False, + strides: Optional[np.ndarray] = None) -> Tuple[BaseCharge, np.ndarray]: + """ + Add quantum numbers arising from combining two or more charges into a + single index, keeping only the quantum numbers that appear in 'target_charges'. + Equilvalent to using "combine_charges" followed by "reduce", but is + generally much more efficient. + Args: + charges (List[SymIndex]): list of SymIndex. + flows (np.ndarray): vector of bools describing index orientations. + target_charges (np.ndarray): n-by-m array describing qauntum numbers of the + qnums which should be kept with 'n' the number of symmetries. + return_locations (bool, optional): if True then return the location of the kept + values of the fused charges + strides (np.ndarray, optional): index strides with which to compute the + return_locations of the kept elements. Defaults to trivial strides (based on + row major order) if ommitted. + Returns: + SymIndex: the fused index after reduction. + np.ndarray: locations of the fused SymIndex qnums that were kept. + """ -# if len(charges) == 1: -# # reduce single index -# if strides.size == 0: -# strides = np.array([1], dtype=np.uint32) -# return indices[0].dual(arrows[0]).reduce( -# kept_qnums, return_locs=return_locs, strides=strides[0]) + num_inds = len(charges) + tensor_dims = [len(c) for c in charges] -# else: -# # find size-balanced partition of indices -# partition_loc = find_balanced_partition(tensor_dims) - -# # compute quantum numbers for each partition -# left_ind = combine_indices(indices[:partition_loc], arrows[:partition_loc]) -# right_ind = combine_indices(indices[partition_loc:], arrows[partition_loc:]) - -# # compute combined qnums -# comb_qnums = fuse_qnums(left_ind.unique_qnums, right_ind.unique_qnums, -# indices[0].syms) -# [unique_comb_qnums, comb_labels] = np.unique( -# comb_qnums, return_inverse=True, axis=1) -# num_unique = unique_comb_qnums.shape[1] - -# # intersect combined qnums and kept_qnums -# reduced_qnums, label_to_unique, label_to_kept = intersect2d( -# unique_comb_qnums, kept_qnums, axis=1, return_indices=True) -# map_to_kept = -np.ones(num_unique, dtype=np.int16) -# for n in range(len(label_to_unique)): -# map_to_kept[label_to_unique[n]] = n -# new_comb_labels = map_to_kept[comb_labels].reshape( -# [left_ind.num_unique, right_ind.num_unique]) -# if return_locs: -# if (strides.size != 0): -# # computed locations based on non-trivial strides -# row_pos = combine_index_strides(tensor_dims[:partition_loc], -# strides[:partition_loc]) -# col_pos = combine_index_strides(tensor_dims[partition_loc:], -# strides[partition_loc:]) - -# # reduce combined qnums to include only those in kept_qnums -# reduced_rows = [0] * left_ind.num_unique -# row_locs = [0] * left_ind.num_unique -# for n in range(left_ind.num_unique): -# temp_label = new_comb_labels[n, right_ind.ind_labels] -# temp_keep = temp_label >= 0 -# reduced_rows[n] = temp_label[temp_keep] -# row_locs[n] = col_pos[temp_keep] - -# reduced_labels = np.concatenate( -# [reduced_rows[n] for n in left_ind.ind_labels]) -# reduced_locs = np.concatenate([ -# row_pos[n] + row_locs[left_ind.ind_labels[n]] -# for n in range(left_ind.dim) -# ]) + if len(charges) == 1: + # reduce single index + if strides is None: + strides = np.array([1], dtype=np.uint32) + return charges[0].dual(flows[0]).reduce( + target_charges, return_locations=return_locations, strides=strides[0]) -# return SymIndex(reduced_qnums, reduced_labels, -# indices[0].syms), reduced_locs - -# else: # trivial strides -# # reduce combined qnums to include only those in kept_qnums -# reduced_rows = [0] * left_ind.num_unique -# row_locs = [0] * left_ind.num_unique -# for n in range(left_ind.num_unique): -# temp_label = new_comb_labels[n, right_ind.ind_labels] -# temp_keep = temp_label >= 0 -# reduced_rows[n] = temp_label[temp_keep] -# row_locs[n] = np.where(temp_keep)[0] - -# reduced_labels = np.concatenate( -# [reduced_rows[n] for n in left_ind.ind_labels]) -# reduced_locs = np.concatenate([ -# n * right_ind.dim + row_locs[left_ind.ind_labels[n]] -# for n in range(left_ind.dim) -# ]) + else: + # find size-balanced partition of charges + partition = _find_best_partition(tensor_dims) + + # compute quantum numbers for each partition + left_ind = fuse_charges(charges[:partition], flows[:partition]) + right_ind = fuse_charges(charges[partition:], flows[partition:]) + + # compute combined qnums + comb_qnums = fuse_ndarray_charges(left_ind.unique_charges, + right_ind.unique_charges, + charges[0].charge_types) + [unique_comb_qnums, comb_labels] = np.unique( + comb_qnums, return_inverse=True, axis=1) + num_unique = unique_comb_qnums.shape[1] + + # intersect combined qnums and target_charges + reduced_qnums, label_to_unique, label_to_kept = intersect( + unique_comb_qnums, target_charges, axis=1, return_indices=True) + map_to_kept = -np.ones(num_unique, dtype=np.int16) + for n in range(len(label_to_unique)): + map_to_kept[label_to_unique[n]] = n + new_comb_labels = map_to_kept[comb_labels].reshape( + [left_ind.num_unique, right_ind.num_unique]) + if return_locations: + if strides is not None: + # computed locations based on non-trivial strides + row_pos = combine_index_strides(tensor_dims[:partition], + strides[:partition]) + col_pos = combine_index_strides(tensor_dims[partition:], + strides[partition:]) + + # reduce combined qnums to include only those in target_charges + reduced_rows = [0] * left_ind.num_unique + row_locs = [0] * left_ind.num_unique + for n in range(left_ind.num_unique): + temp_label = new_comb_labels[n, right_ind.charge_labels] + temp_keep = temp_label >= 0 + reduced_rows[n] = temp_label[temp_keep] + row_locs[n] = col_pos[temp_keep] + + reduced_labels = np.concatenate( + [reduced_rows[n] for n in left_ind.charge_labels]) + reduced_locs = np.concatenate([ + row_pos[n] + row_locs[left_ind.charge_labels[n]] + for n in range(left_ind.dim) + ]) + obj = charges[0].__new__(type(charges[0])) + obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) + return obj, reduced_locs + + else: # trivial strides + # reduce combined qnums to include only those in target_charges + reduced_rows = [0] * left_ind.num_unique + row_locs = [0] * left_ind.num_unique + for n in range(left_ind.num_unique): + temp_label = new_comb_labels[n, right_ind.charge_labels] + temp_keep = temp_label >= 0 + reduced_rows[n] = temp_label[temp_keep] + row_locs[n] = np.where(temp_keep)[0] + + reduced_labels = np.concatenate( + [reduced_rows[n] for n in left_ind.charge_labels]) + reduced_locs = np.concatenate([ + n * right_ind.dim + row_locs[left_ind.charge_labels[n]] + for n in range(left_ind.dim) + ]) + obj = charges[0].__new__(type(charges[0])) + obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) -# return SymIndex(reduced_qnums, reduced_labels, -# indices[0].syms), reduced_locs + return obj, reduced_locs -# else: -# # reduce combined qnums to include only those in kept_qnums -# reduced_rows = [0] * left_ind.num_unique -# for n in range(left_ind.num_unique): -# temp_label = new_comb_labels[n, right_ind.ind_labels] -# reduced_rows[n] = temp_label[temp_label >= 0] + else: + # reduce combined qnums to include only those in target_charges + reduced_rows = [0] * left_ind.num_unique + for n in range(left_ind.num_unique): + temp_label = new_comb_labels[n, right_ind.charge_labels] + reduced_rows[n] = temp_label[temp_label >= 0] -# reduced_labels = np.concatenate( -# [reduced_rows[n] for n in left_ind.ind_labels]) + reduced_labels = np.concatenate( + [reduced_rows[n] for n in left_ind.charge_labels]) + obj = charges[0].__new__(type(charges[0])) + obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) -# return SymIndex(reduced_qnums, reduced_labels, indices[0].syms) + return obj def reduce_to_target_charges( - charges: List[Union[BaseCharge, ChargeCollection]], + charges: List[BaseCharge], flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection], + target_charges: BaseCharge, strides: Optional[np.ndarray] = None, return_positions: Optional[bool] = False) -> np.ndarray: """ @@ -1885,7 +1722,10 @@ def reduce_to_target_charges( n * len(right_charges) + row_locations[left_inverse[n]] for n in range(len(left_charges)) ]) - return relevant_charges[charge_labels], inds + obj = charges[0].__new__(type(charges[0])) + obj.__init__(relevant_charges.unique_charges, charge_labels, + charges[0].charge_types) + return obj, inds else: final_relevant_labels = [None] * len(unique_left) @@ -1895,15 +1735,17 @@ def reduce_to_target_charges( final_relevant_labels[n] = labels[lookup] charge_labels = np.concatenate( [final_relevant_labels[n] for n in left_inverse]) - return relevant_charges[charge_labels] + obj = charges[0].__new__(type(charges[0])) + obj.__init__(relevant_charges.unique_charges, charge_labels, + charges[0].charge_types) + return obj -def find_sparse_positions_new( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[int, bool]], - target_charges: Union[BaseCharge, ChargeCollection], - strides: Optional[np.ndarray] = None, - store_dual: Optional[bool] = False) -> np.ndarray: +def find_sparse_positions_new(charges: List[BaseCharge], + flows: List[Union[int, bool]], + target_charges: BaseCharge, + strides: Optional[np.ndarray] = None, + store_dual: Optional[bool] = False) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector of `fused_charges` resulting from fusing all elements of `charges` @@ -2025,3 +1867,362 @@ def find_sparse_positions_new( ]) return relevant_charges[charge_labels], inds + + +def find_sparse_positions(charges: List[BaseCharge], + flows: List[Union[int, bool]], + target_charges: BaseCharge) -> Dict: + """ + Find the sparse locations of elements (i.e. the index-values within + the SPARSE tensor) in the vector `fused_charges` (resulting from + fusing `left_charges` and `right_charges`) + that have a value of `target_charges`, assuming that all elements + different from `target_charges` are `0`. + For example, given + ``` + left_charges = [-2,0,1,0,0] + right_charges = [-1,0,2,1] + target_charges = [0,1] + fused_charges = fuse_charges([left_charges, right_charges],[1,1]) + print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] + ``` 0 1 2 3 4 5 6 7 8 + we want to find the all different blocks + that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, + together with their corresponding sparse index-values of the data in the sparse array, + assuming that all elements in `fused_charges` different from `target_charges` are 0. + + `find_sparse_blocks` returns a dict mapping integers `target_charge` + to an array of integers denoting the sparse locations of elements within + `fused_charges`. + For the above example, we get: + * `target_charge=0`: [0,1,3,5,7] + * `target_charge=1`: [2,4,6,8] + Args: + charges: An np.ndarray of integer charges. + flows: The flow direction of the left charges. + target_charges: The target charges. + Returns: + dict: Mapping integers to np.ndarray of integers. + """ + _check_flows(flows) + if len(charges) == 1: + fused_charges = charges[0] * flows[0] + unique_charges = fused_charges.unique() + target_charges = target_charges.unique() + relevant_target_charges = unique_charges.intersect(target_charges) + relevant_fused_charges = fused_charges[fused_charges.isin( + relevant_target_charges)] + return { + c: np.nonzero(relevant_fused_charges == c)[0] + for c in relevant_target_charges + } + partition = _find_best_partition([len(c) for c in charges]) + left_charges = fuse_charges(charges[:partition], flows[:partition]) + right_charges = fuse_charges(charges[partition:], flows[partition:]) + + # unique_target_charges, inds = target_charges.unique(return_index=True) + # target_charges = target_charges[np.sort(inds)] + unique_left, left_inverse = left_charges.unique(return_inverse=True) + unique_right, right_inverse, right_dims = right_charges.unique( + return_inverse=True, return_counts=True) + + fused_unique = unique_left + unique_right + unique_inds = np.nonzero(fused_unique == target_charges) + relevant_positions = unique_inds[0].astype(np.uint32) + tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, + len(unique_right)) + + relevant_unique_left_inds = np.unique(tmp_inds_left) + left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) + left_lookup[relevant_unique_left_inds] = np.arange( + len(relevant_unique_left_inds)) + relevant_unique_right_inds = np.unique(tmp_inds_right) + right_lookup = np.empty( + np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) + right_lookup[relevant_unique_right_inds] = np.arange( + len(relevant_unique_right_inds)) + + left_charge_labels = np.nonzero( + np.expand_dims(left_inverse, 1) == np.expand_dims( + relevant_unique_left_inds, 0)) + relevant_left_inverse = np.arange(len(left_charge_labels[0])) + + right_charge_labels = np.expand_dims(right_inverse, 1) == np.expand_dims( + relevant_unique_right_inds, 0) + right_block_information = {} + for n in relevant_unique_left_inds: + ri = np.nonzero((unique_left[n] + unique_right).isin(target_charges))[0] + tmp_inds = np.nonzero(right_charge_labels[:, right_lookup[ri]]) + right_block_information[n] = [ri, np.arange(len(tmp_inds[0])), tmp_inds[1]] + + relevant_right_inverse = np.arange(len(right_charge_labels[0])) + + #generate a degeneracy vector which for each value r in relevant_right_charges + #holds the corresponding number of non-zero elements `relevant_right_charges` + #that can add up to `target_charges`. + degeneracy_vector = np.empty(len(left_charge_labels[0]), dtype=np.uint32) + for n in range(len(relevant_unique_left_inds)): + degeneracy_vector[relevant_left_inverse[ + left_charge_labels[1] == n]] = np.sum(right_dims[tmp_inds_right[ + tmp_inds_left == relevant_unique_left_inds[n]]]) + + start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( + np.uint32) + out = {} + for n in range(len(target_charges)): + block = [] + if len(unique_inds) > 1: + lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], + len(unique_right)) + else: + lis, ris = np.divmod(unique_inds[0], len(unique_right)) + + for m in range(len(lis)): + ri_tmp, arange, tmp_inds = right_block_information[lis[m]] + block.append( + np.add.outer( + start_positions[relevant_left_inverse[left_charge_labels[1] == + left_lookup[lis[m]]]], + arange[tmp_inds == np.nonzero( + ri_tmp == ris[m])[0]]).ravel().astype(np.uint32)) + out[target_charges[n]] = np.concatenate(block) + return out + + +# def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], +# flows: List[Union[int, bool]], +# target_charges: Union[BaseCharge, ChargeCollection], +# strides: Optional[np.ndarray] = None, +# store_dual: Optional[bool] = False) -> Dict: +# """ +# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) +# in the vector of `fused_charges` resulting from fusing all elements of `charges` +# that have a value of `target_charge`. +# For example, given +# ``` +# charges = [[-2,0,1,0,0],[-1,0,2,1]] +# target_charge = 0 +# fused_charges = fuse_charges(charges,[1,1]) +# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] +# ``` +# we want to find the index-positions of charges +# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, +# within the dense array. As one additional wrinkle, `charges` +# is a subset of the permuted charges of a tensor with rank R > len(charges), +# and `stride_arrays` are their corresponding range of strides, i.e. + +# ``` +# R=5 +# D = [2,3,4,5,6] +# tensor_flows = np.random.randint(-1,2,R) +# tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] +# order = np.arange(R) +# np.random.shuffle(order) +# tensor_strides = [360, 120, 30, 6, 1] + +# charges = [tensor_charges[order[n]] for n in range(3)] +# flows = [tensor_flows[order[n]] for n in range(len(3))] +# strides = [tensor_stride[order[n]] for n in range(3)] +# _ = _find_transposed_dense_positions(charges, flows, 0, strides) + +# ``` +# `_find_transposed_dense_blocks` returns an np.ndarray containing the +# index-positions of these elements calculated using `stride_arrays`. +# The result only makes sense in conjuction with the complementary +# data computed from the complementary +# elements in`tensor_charges`, +# `tensor_strides` and `tensor_flows`. +# This routine is mainly used in `_find_diagonal_dense_blocks`. + +# Args: +# charges: A list of BaseCharge or ChargeCollection. +# flows: The flow directions of the `charges`. +# target_charge: The target charge. +# strides: The strides for the `charges` subset. +# if `None`, natural stride ordering is assumed. + +# Returns: +# dict +# """ + +# _check_flows(flows) +# out = {} +# if store_dual: +# store_charges = target_charges * (-1) +# else: +# store_charges = target_charges + +# if len(charges) == 1: +# fused_charges = charges[0] * flows[0] +# inds = np.nonzero(fused_charges == target_charges) +# if len(target_charges) > 1: +# for n in range(len(target_charges)): +# i = inds[0][inds[1] == n] +# if len(i) == 0: +# continue +# if strides is not None: +# permuted_inds = strides[0] * np.arange(len(charges[0])) +# out[store_charges.get_item(n)] = permuted_inds[i] +# else: +# out[store_charges.get_item(n)] = i +# return out +# else: +# if strides is not None: +# permuted_inds = strides[0] * np.arange(len(charges[0])) +# out[store_charges.get_item(n)] = permuted_inds[inds[0]] +# else: +# out[store_charges.get_item(n)] = inds[0] +# return out + +# partition = _find_best_partition([len(c) for c in charges]) +# left_charges = fuse_charges(charges[:partition], flows[:partition]) +# right_charges = fuse_charges(charges[partition:], flows[partition:]) +# if strides is not None: +# stride_arrays = [ +# np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) +# ] +# permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) +# permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) + +# # unique_target_charges, inds = target_charges.unique(return_index=True) +# # target_charges = target_charges[np.sort(inds)] +# unique_left, left_inverse = left_charges.unique(return_inverse=True) +# unique_right, right_inverse = right_charges.unique(return_inverse=True) + +# fused_unique = unique_left + unique_right +# unique_inds = np.nonzero(fused_unique == target_charges) + +# relevant_positions = unique_inds[0] +# tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, +# len(unique_right)) + +# relevant_unique_left_inds = np.unique(tmp_inds_left) +# left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) +# left_lookup[relevant_unique_left_inds] = np.arange( +# len(relevant_unique_left_inds)) +# relevant_unique_right_inds = np.unique(tmp_inds_right) +# right_lookup = np.empty( +# np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) +# right_lookup[relevant_unique_right_inds] = np.arange( +# len(relevant_unique_right_inds)) + +# left_charge_labels = np.nonzero( +# np.expand_dims(left_inverse, 1) == np.expand_dims( +# relevant_unique_left_inds, 0)) +# right_charge_labels = np.nonzero( +# np.expand_dims(right_inverse, 1) == np.expand_dims( +# relevant_unique_right_inds, 0)) + +# len_right = len(right_charges) + +# for n in range(len(target_charges)): +# if len(unique_inds) > 1: +# lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], +# len(unique_right)) +# else: +# lis, ris = np.divmod(unique_inds[0], len(unique_right)) +# dense_positions = [] +# left_positions = [] +# lookup = [] +# for m in range(len(lis)): +# li = lis[m] +# ri = ris[m] +# dense_left_positions = (left_charge_labels[0][ +# left_charge_labels[1] == left_lookup[li]]).astype(np.uint32) +# dense_right_positions = (right_charge_labels[0][ +# right_charge_labels[1] == right_lookup[ri]]).astype(np.uint32) +# if strides is None: +# positions = np.expand_dims(dense_left_positions * len_right, +# 1) + np.expand_dims(dense_right_positions, 0) +# else: +# positions = np.expand_dims( +# permuted_left_inds[dense_left_positions], 1) + np.expand_dims( +# permuted_right_inds[dense_right_positions], 0) + +# dense_positions.append(positions) +# left_positions.append(dense_left_positions) +# lookup.append( +# np.stack([ +# np.arange(len(dense_left_positions), dtype=np.uint32), +# np.full(len(dense_left_positions), fill_value=m, dtype=np.uint32) +# ], +# axis=1)) + +# if len(lookup) > 0: +# ind_sort = np.argsort(np.concatenate(left_positions)) +# it = np.concatenate(lookup, axis=0) +# table = it[ind_sort, :] +# out[store_charges.get_item(n)] = np.concatenate([ +# dense_positions[table[n, 1]][table[n, 0], :].astype(np.uint32) +# for n in range(table.shape[0]) +# ]) +# else: +# out[store_charges.get_item(n)] = np.array([]) + +# return out + +# def _find_diagonal_sparse_blocks_old(charges: List[BaseCharge], +# flows: List[Union[bool, int]], +# partition: int) -> Tuple[BaseCharge, List]: +# """ +# Given the meta data and underlying data of a symmetric matrix, compute +# all diagonal blocks and return them in a dict. +# `row_charges` and `column_charges` are lists of np.ndarray. The tensor +# is viewed as a matrix with rows given by fusing `row_charges` and +# columns given by fusing `column_charges`. + +# Args: +# charges: A list of charges. +# flows: A list of flows. +# partition: The location of the partition of `charges` into rows and colums. +# Returns: +# return common_charges, blocks, start_positions, row_locations, column_degeneracies +# List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. +# List[np.ndarray]: A list containing the blocks. +# """ +# _check_flows(flows) +# if len(flows) != len(charges): +# raise ValueError("`len(flows)` is different from `len(charges)`") +# row_charges = charges[:partition] +# row_flows = flows[:partition] +# column_charges = charges[partition:] +# column_flows = flows[partition:] + +# #get the unique column-charges +# #we only care about their degeneracies, not their order; that's much faster +# #to compute since we don't have to fuse all charges explicitly +# #`compute_fused_charge_degeneracies` multiplies flows into the column_charges +# unique_column_charges, column_dims = compute_fused_charge_degeneracies( +# column_charges, column_flows) +# unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) +# #get the charges common to rows and columns (only those matter) +# common_charges, label_to_row, label_to_column = unique_row_charges.intersect( +# unique_column_charges * True, return_indices=True) + +# #convenience container for storing the degeneracies of each +# #column charge +# #column_degeneracies = dict(zip(unique_column_charges, column_dims)) +# #column_degeneracies = dict(zip(unique_column_charges * True, column_dims)) +# print(common_charges) +# row_locations = find_sparse_positions( +# charges=row_charges, flows=row_flows, target_charges=common_charges) + +# degeneracy_vector = np.empty( +# np.sum([len(v) for v in row_locations.values()]), dtype=np.uint32) +# #for each charge `c` in `common_charges` we generate a boolean mask +# #for indexing the positions where `relevant_column_charges` has a value of `c`. +# for c in common_charges: +# degeneracy_vector[row_locations[c]] = column_degeneracies[c] + +# start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( +# np.uint32) +# blocks = [] + +# for c in common_charges: +# #numpy broadcasting is substantially faster than kron! +# rlocs = row_locations[c] +# rlocs.sort() #sort in place (we need it again later) +# cdegs = column_degeneracies[c] +# inds = np.ravel(np.add.outer(start_positions[rlocs], np.arange(cdegs))) +# blocks.append([inds, (len(rlocs), cdegs)]) +# return common_charges, blocks diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 9be4be39b..19fe6a35c 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -28,50 +28,143 @@ class BaseCharge: def __init__(self, charges: np.ndarray, - charge_labels: Optional[np.ndarray] = None) -> None: - if charges.dtype is not np.int16: - raise TypeError("`charges` have to be of dtype `np.int16`") - if charge_labels.dtype is not np.int16: - raise TypeError("`charge_labels` have to be of dtype `np.int16`") - + charge_labels: Optional[np.ndarray] = None, + charge_types: Optional[List[Type["BaseCharge"]]] = None) -> None: + self.charge_types = charge_types + if charges.ndim == 1: + charges = np.expand_dims(charges, 0) if charge_labels is None: - self.unique_charges, charge_labels = np.unique( - charges, return_inverse=True) - self.charge_labels = charge_labels.astype(np.uint16) - + self.unique_charges, self.charge_labels = np.unique( + charges.astype(np.int16), return_inverse=True, axis=1) + self.charge_labels = self.charge_labels.astype(np.int16) else: - self.unique_charges = charges - self.charge_labels = charge_labels.astype(np.uint16) + if charge_labels.dtype not in (np.int16, np.int16): + raise TypeError("`charge_labels` have to be of dtype `np.int16`") + + self.unique_charges = charges.astype(np.int16) + self.charge_labels = charge_labels.astype(np.int16) + + @property + def dim(self): + return len(self.charge_labels) + + @property + def num_symmetries(self) -> int: + """ + Return the number of different charges in `ChargeCollection`. + """ + return self.unique_charges.shape[0] + + @property + def num_unique(self) -> int: + """ + Return the number of different charges in `ChargeCollection`. + """ + return self.unique_charges.shape[1] + + @property + def identity_charges(self) -> np.ndarray: + """ + Give the identity charge associated to a symmetries type in `charge_types`. + Args: + charge_types: A list of charge-types. + Returns: + nd.array: vector of identity charges for each symmetry in self + """ + unique_charges = np.expand_dims( + np.asarray([ct.identity_charge() for ct in self.charge_types], + dtype=np.int16), 1) + charge_labels = np.zeros(1, dtype=np.int16) + obj = self.__new__(type(self)) + obj.__init__(unique_charges, charge_labels, self.charge_types) + return obj def __add__(self, other: "BaseCharge") -> "BaseCharge": + """ + Fuse `self` with `other`. + Args: + other: A `BaseCharge` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + # fuse the unique charges from each index, then compute new unique charges - comb_qnums = self.fuse(self.unique_charges, other.unique_charges) - [unique_charges, new_labels] = np.unique(comb_qnums, return_inverse=True) - new_labels = new_labels.reshape( - len(self.unique_charges), len(other.unique_charges)).astype(np.uint16) - - # find new labels using broadcasting (could use np.tile but less efficient) - charge_labels = new_labels[( - self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.uint16) + comb_charges = fuse_ndarray_charges(self.unique_charges, + other.unique_charges, self.charge_types) + [unique_charges, charge_labels] = np.unique( + comb_charges, return_inverse=True, axis=1) + charge_labels = charge_labels.reshape(self.unique_charges.shape[1], + other.unique_charges.shape[1]).astype( + np.int16) + + # find new labels using broadcasting + charge_labels = charge_labels[( + self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.int16) ).ravel(), (other.charge_labels[None, :] + - np.zeros([len(self), 1], dtype=np.uint16)).ravel()] + np.zeros([len(self), 1], dtype=np.int16)).ravel()] + obj = self.__new__(type(self)) - obj.__init__(unique_charges, charge_labels) + obj.__init__(unique_charges, charge_labels, self.charge_types) + return obj - def __len__(self): - return len(self.charge_labels) + def dual(self, take_dual: Optional[bool] = False) -> np.ndarray: + if take_dual: + unique_dual_charges = np.stack([ + self.charge_types[n].dual_charges(self.unique_charges[n, :]) + for n in range(len(self.charge_types)) + ], + axis=0) - @property - def charges(self) -> np.ndarray: - return self.unique_charges[self.charge_labels] + obj = self.__new__(type(self)) + obj.__init__(unique_dual_charges, self.charge_labels, self.charge_types) + return obj + return self @property - def dtype(self): - return self.unique_charges.dtype + def charges(self): + return self.unique_charges[:, self.charge_labels] def __repr__(self): - return str(type(self)) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' + return str( + type(self)) + '\n' + 'charges: \n' + self.charges.__repr__() + '\n' + + def __len__(self): + return len(self.charge_labels) + + def __mul__(self, number: bool) -> "BaseCharge": + if not isinstance(number, (bool, np.bool_)): + print(type(number)) + raise ValueError( + "can only multiply by `True` or `False`, found {}".format(number)) + return self.dual(number) + + def intersect(self, other, assume_unique=False, + return_indices=False) -> (np.ndarray, np.ndarray, np.ndarray): + if isinstance(other, type(self)): + out = intersect( + self.unique_charges, + other.unique_charges, + assume_unique=True, + axis=1, + return_indices=return_indices) + obj = self.__new__(type(self)) + else: + out = intersect( + self.unique_charges, + np.asarray(other), + axis=1, + assume_unique=assume_unique, + return_indices=return_indices) + obj = self.__new__(type(self)) + if return_indices == True: + obj.__init__( + charges=out[0], + charge_labels=np.arange(len(out[0]), dtype=np.int16), + charge_types=self.charge_types, + ) + return obj, out[1], out[2] + return obj def unique(self, return_index=False, @@ -101,8 +194,9 @@ def unique(self, """ obj = self.__new__(type(self)) obj.__init__( - self.unique_charges, - charge_labels=np.arange(len(self.unique_charges), dtype=np.uint16)) + charges=self.unique_charges, + charge_labels=np.arange(self.unique_charges.shape[1], dtype=np.int16), + charge_types=self.charge_types) out = [obj] if return_index: @@ -120,87 +214,71 @@ def unique(self, if len(out) == 3: return out[0], out[1], out[2] - def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: - """ - Test each element of `BaseCharge` if it is in `targets`. Returns - an `np.ndarray` of `dtype=bool`. - Args: - targets: The test elements - Returns: - np.ndarray: An array of `bool` type holding the result of the comparison. - """ - if isinstance(targets, type(self)): - targets = targets.unique_charges - targets = np.asarray(targets) - common, label_to_unique, label_to_targets = np.intersect1d( - self.unique_charges, targets, return_indices=True) - if len(common) == 0: - return np.full(len(self.charge_labels), fill_value=False, dtype=np.bool) - return np.isin(self.charge_labels, label_to_unique) - - def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: - """ - """ + @property + def dtype(self): + return self.unique_charges.dtype - if isinstance(target, type(self)): - target = target.unique_charges - target = np.asarray(target) - return target in self.unique_charges + @property + def degeneracies(self): + return np.sum( + np.expand_dims(self.charge_labels, 1) == np.expand_dims( + np.arange(self.unique_charges.shape[1], dtype=np.int16), 0), + axis=0) - def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: + def reduce(self, + target_charges: np.ndarray, + return_locations: bool = False, + strides: int = 1) -> ("SymIndex", np.ndarray): """ - Find indices where `BaseCharge` equals `target_charges`. - `target` is a single integer encoding all symmetries of - `BaseCharge` + Reduce the dim of a SymIndex to keep only the index values that intersect target_charges Args: - target: integerger encoding charges. + target_charges (np.ndarray): array of unique quantum numbers to keep. + return_locations (bool, optional): if True, also return the output index + locations of target values. Returns: - np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals - `target` and `False` everywhere else. + SymIndex: index of reduced dimension. + np.ndarray: output index locations of target values. """ - if isinstance(target, type(self)): - target = target.charges - elif isinstance(target, (np.integer, int)): - target = np.asarray([target]) - target = np.asarray(target) - tmp = np.full(len(target), fill_value=-1, dtype=np.int16) - - _, label_to_unique, label_to_target = np.intersect1d( - self.unique_charges, target, return_indices=True) - tmp[label_to_target] = label_to_unique - return np.squeeze( - np.expand_dims(self.charge_labels, 1) == np.expand_dims(tmp, 0)) - - @property - def zero_charge(self): + if isinstance(target_charges, (np.integer, int)): + target_charges = np.asarray([target_charges], dtype=np.int16) + if target_charges.ndim == 1: + target_charges = np.expand_dims(target_charges, 0) + target_charges = np.asarray(target_charges, dtype=np.int16) + # find intersection of index charges and target charges + reduced_charges, label_to_unique, label_to_target = intersect( + self.unique_charges, target_charges, axis=1, return_indices=True) + num_unique = len(label_to_unique) + + # construct the map to the reduced charges + map_to_reduced = np.full(self.dim, fill_value=-1, dtype=np.int16) + map_to_reduced[label_to_unique] = np.arange(num_unique, dtype=np.int16) + + # construct the map to the reduced charges + reduced_ind_labels = map_to_reduced[self.charge_labels] + reduced_locs = reduced_ind_labels >= 0 + new_ind_labels = reduced_ind_labels[reduced_locs].astype(np.int16) obj = self.__new__(type(self)) - obj.__init__( - np.asarray([self.dtype.type(0)]), np.asarray([0], dtype=np.uint16)) - return obj - - def __iter__(self): - return iter(self.charges) - - def intersect(self, - other: "BaseCharge", - return_indices: Optional[bool] = False) -> "BaseCharge": - if return_indices: - charges, comm1, comm2 = np.intersect1d( - self.charges, other.charges, return_indices=return_indices) - else: - charges = np.intersect1d(self.charges, other.charges) + obj.__init__(reduced_charges, new_ind_labels, self.charge_types) - obj = self.__new__(type(self)) - obj.__init__(charges, np.arange(len(charges), dtype=np.uint16)) - if return_indices: - return obj, comm1.astype(np.uint16), comm2.astype(np.uint16) + if return_locations: + return obj, strides * np.flatnonzero(reduced_locs).astype(np.uint32) return obj + def __matmul__(self, other): + #some checks + if len(self) != len(other): + raise ValueError( + '__matmul__ requires charges to have the same number of elements') + charges = np.concatenate([self.charges, other.charges], axis=0) + charge_types = self.charge_types + other.charge_types + return BaseCharge( + charges=charges, charge_labels=None, charge_types=charge_types) + def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": """ Return the charge-element at position `n`, wrapped into a `BaseCharge` object. - Args: + Args: n: An integer or `np.ndarray`. Returns: BaseCharge: The charges at `n`. @@ -209,43 +287,47 @@ def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": if isinstance(n, (np.integer, int)): n = np.asarray([n]) obj = self.__new__(type(self)) - obj.__init__(self.unique_charges, self.charge_labels[n]) + obj.__init__(self.unique_charges, self.charge_labels[n], self.charge_types) return obj - def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Return the charge-element at position `n`. - Args: - n: An integer or `np.ndarray`. - Returns: - np.ndarray: The charges at `n`. - """ - return self.charges[n] + def __eq__(self, + target_charges: Union[np.ndarray, "BaseCharge"]) -> np.ndarray: - def __mul__(self, number: Union[bool, int]) -> "U1Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - #outflowing charges - if number in (0, False, -1): - return U1Charge( - self.dual_charges(self.unique_charges), self.charge_labels) - #inflowing charges - if number in (1, True): - return U1Charge(self.unique_charges, self.charge_labels) + if isinstance(target_charges, type(self)): + targets = np.unique( + target_charges.unique_charges[:, target_charges.charge_labels], + axis=1) + else: + print(isinstance(target_charges, type(self))) + print(type(target_charges), type(self)) + targets = np.unique(target_charges, axis=1) + inds = np.nonzero( + np.logical_and.reduce( + np.expand_dims(self.unique_charges, 2) == np.expand_dims( + targets, 1), + axis=0))[0] + return np.expand_dims(self.charge_labels, 1) == np.expand_dims(inds, 0) + + def isin(self, target_charges: Union[np.ndarray, "BaseCharge"]) -> np.ndarray: - @property - def dual(self, charges): - return self.dual_charges + if isinstance(target_charges, type(self)): + targets = target_charges.unique_charges + else: + targets = np.unique(target_charges, axis=1) + tmp = np.expand_dims(self.unique_charges, 2) == np.expand_dims(targets, 1) + inds = np.nonzero( + np.logical_or.reduce(np.logical_and.reduce(tmp, axis=0), axis=1))[0] + + return np.isin(self.charge_labels, inds) class U1Charge(BaseCharge): def __init__(self, charges: np.ndarray, - charge_labels: Optional[np.ndarray] = None) -> None: - super().__init__(charges, charge_labels) + charge_labels: Optional[np.ndarray] = None, + charge_types: Optional[List[Type["BaseCharge"]]] = None) -> None: + super().__init__(charges, charge_labels, charge_types=[type(self)]) @staticmethod def fuse(charge1, charge2): @@ -255,498 +337,111 @@ def fuse(charge1, charge2): def dual_charges(charges): return charges * charges.dtype.type(-1) + @staticmethod + def identity_charge(): + return np.int16(0) -class Z2Charge(BaseCharge): - """ - A simple charge class for Z2 symmetries. - """ - - def __init__(self, - charges: List[np.ndarray], - shifts: Optional[np.ndarray] = None) -> None: - if isinstance(charges, np.ndarray): - charges = [charges] - - if shifts is None: - itemsizes = [c.dtype.itemsize for c in charges] - if not np.all([i == 1 for i in itemsizes]): - # martin: This error could come back at us, but I'll leave it for now - warnings.warn( - "Z2 charges can be entirely stored in " - "np.int8, but found dtypes = {}. Converting to np.int8.".format( - [c.dtype for c in charges])) - - charges = [c.astype(np.int8) for c in charges] - - super().__init__(charges, shifts) - - def __add__(self, other: "Z2Charge") -> "Z2Charge": - """ - Fuse the charges of `self` with charges of `other`, and - return a new `Z2Charge` object holding the result. - Args: - other: A `Z2Charge` object. - Returns: - Z2Charge: The result of fusing `self` with `other`. - """ - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse Z2-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, Z2Charge): - raise TypeError( - "can only add objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - fused = np.reshape( - np.bitwise_xor(self.charges[:, None], other.charges[None, :]), - len(self.charges) * len(other.charges)) - - return Z2Charge(charges=[fused], shifts=self.shifts) - - def __sub__(self, other: "Z2Charge") -> "Z2Charge": - """ - Subtract charges of `other` from charges of `self` and - return a new `Z2Charge` object holding the result. - Note that ofr Z2 charges, subtraction and addition are identical - Args: - other: A `Z2Charge` object. - Returns: - Z2Charge: The result of fusing `self` with `other`. - """ - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse Z2-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, Z2Charge): - raise TypeError( - "can only subtract objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - return self.__add__(other) - - def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": - itemsize = np.sum(self._itemsizes + other._itemsizes) - if itemsize > 8: - raise TypeError("Number of bits required to store all charges " - "in a single int is larger than 64") - dtype = np.int16 #need at least np.int16 to store two charges - if itemsize > 2: - dtype = np.int32 - if itemsize > 4: - dtype = np.int64 - - charges = np.left_shift( - self.charges.astype(dtype), - 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) - - shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) - return Z2Charge(charges=[charges], shifts=shifts) - - def __mul__(self, number: Union[bool, int]) -> "Z2Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - #Z2 is self-dual - return Z2Charge(charges=[self.charges], shifts=self.shifts) - - def __rmul__(self, number: Union[bool, int]) -> "Z2Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - - return self.__mul__(number) - - @property - def dual_charges(self): - #Z2 charges are self-dual - return self.charges - - def equals(self, target_charges: Iterable) -> np.ndarray: - if not np.all(np.isin(target_charges, np.asarray([0, 1]))): - raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( - np.unique(target_charges))) - return super().equals(target_charges) + @classmethod + def random(cls, minval: int, maxval: int, dimension: tuple): + charges = np.random.randint(minval, maxval, dimension, dtype=np.int16) + return cls(charges=charges) -class ChargeCollection: +def fuse_ndarray_charges(charges_A: np.ndarray, charges_B: np.ndarray, + charge_types: List[Type[BaseCharge]]) -> np.ndarray: """ - + Fuse the quantum numbers of two indices under their kronecker addition. + Args: + charges_A (np.ndarray): n-by-D1 dimensional array integers encoding charges, + with n the number of symmetries and D1 the index dimension. + charges__B (np.ndarray): n-by-D2 dimensional array of charges. + charge_types: A list of types of the charges. + Returns: + np.ndarray: n-by-(D1 * D2) dimensional array of the fused charges. """ + comb_charges = [0] * len(charge_types) + for n in range(len(charge_types)): + comb_charges[n] = charge_types[n].fuse(charges_A[n, :], charges_B[n, :]) - class Iterator: + return np.concatenate( + comb_charges, axis=0).reshape(len(charge_types), len(comb_charges[0])) - def __init__(self, data: np.ndarray): - self.n = 0 - self.data = data - def __next__(self): - if self.n < self.data.shape[0]: - result = self.data[self.n, :] - self.n += 1 - return tuple(result) #this makes a copy! - else: - raise StopIteration +def intersect(A: np.ndarray, + B: np.ndarray, + axis=0, + assume_unique=False, + return_indices=False) -> (np.ndarray, np.ndarray, np.ndarray): + """ + Extends numpy's intersect1d to find the row or column-wise intersection of + two 2d arrays. Takes identical input to numpy intersect1d. + Args: + A, B (np.ndarray): arrays of matching widths and datatypes + Returns: + ndarray: sorted 1D array of common rows/cols between the input arrays + ndarray: the indices of the first occurrences of the common values in A. + Only provided if return_indices is True. + ndarray: the indices of the first occurrences of the common values in B. + Only provided if return_indices is True. + """ + #see https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays + if A.ndim == 1: + return np.intersect1d( + A, B, assume_unique=assume_unique, return_indices=return_indices) + + elif A.ndim == 2: + if axis == 0: + ncols = A.shape[1] + if A.shape[1] != B.shape[1]: + raise ValueError("array widths must match to intersect") + + dtype = { + 'names': ['f{}'.format(i) for i in range(ncols)], + 'formats': ncols * [A.dtype] + } + if return_indices: + C, A_locs, B_locs = np.intersect1d( + A.view(dtype), + B.view(dtype), + assume_unique=assume_unique, + return_indices=return_indices) + return C.view(A.dtype).reshape(-1, ncols), A_locs, B_locs + C = np.intersect1d( + A.view(dtype), B.view(dtype), assume_unique=assume_unique) + return C.view(A.dtype).reshape(-1, ncols) + + elif axis == 1: + #@Glen: why the copy here? + out = intersect( + A.T.copy(), + B.T.copy(), + axis=0, + assume_unique=assume_unique, + return_indices=return_indices) + if return_indices: + return out[0].T, out[1], out[2] + return out.T - def __init__(self, - charges: List[BaseCharge], - shifts: Optional[List[np.ndarray]] = None, - stacked_charges: Optional[np.ndarray] = None) -> None: - if not isinstance(charges, list): - raise TypeError("only list allowed for argument `charges` " - "in BaseCharge.__init__(charges)") - if (shifts is not None) and (stacked_charges is None): - raise ValueError( - "Found `shifts == None` and `stacked_charges != None`." - "`shifts` and `stacked_charges` can only be passed together.") - if (shifts is None) and (stacked_charges is not None): - raise ValueError( - "Found `shifts != None` and `stacked_charges == None`." - "`shifts` and `stacked_charges` can only be passed together.") - self.charges = [] - if stacked_charges is None: - if not np.all([len(c) == len(charges[0]) for c in charges]): - raise ValueError("not all charges have the same length. " - "Got lengths = {}".format([len(c) for c in charges])) - for n in range(len(charges)): - if not isinstance(charges[n], BaseCharge): - raise TypeError( - "`ChargeCollection` can only be initialized " - "with a list of `BaseCharge`. Found {} instead".format( - [type(charges[n]) for n in range(len(charges))])) - - self._stacked_charges = np.stack([c.charges for c in charges], axis=1) - for n in range(len(charges)): - charge = charges[n].__new__(type(charges[n])) - charge.__init__(self._stacked_charges[:, n], shifts=charges[n].shifts) - self.charges.append(charge) else: - if len(shifts) != stacked_charges.shape[1]: - raise ValueError("`len(shifts)` = {} is different from " - "`stacked_charges.shape[1]` = {}".format( - len(shifts), stacked_charges.shape[1])) - - if stacked_charges.shape[1] != len(charges): - raise ValueError("`len(charges) and shape[1] of `stacked_charges` " - "have to be the same.") - for n in range(len(charges)): - charge = charges[n].__new__(type(charges[n])) - charge.__init__(stacked_charges[:, n], shifts=shifts[n]) - self.charges.append(charge) - self._stacked_charges = stacked_charges - - @classmethod - def from_charge_types(cls, charge_types: Type, shifts: List[np.ndarray], - stacked_charges: np.ndarray): - if len(charge_types) != stacked_charges.shape[1]: - raise ValueError("`len(charge_types) and shape[1] of `stacked_charges` " - "have to be the same.") - if len(charge_types) != len(shifts): - raise ValueError( - "`len(charge_types) and `len(shifts)` have to be the same.") - charges = [ - charge_types[n].__new__(charge_types[n]) - for n in range(len(charge_types)) - ] - return cls(charges=charges, stacked_charges=stacked_charges, shifts=shifts) - - @property - def num_charges(self) -> int: - """ - Return the number of different charges in `ChargeCollection`. - """ - return self._stacked_charges.shape[1] - - def get_item(self, n: int) -> Tuple: - """ - Returns the `n-th` charge-tuple of ChargeCollection in a tuple. - """ - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - return tuple(self._stacked_charges[n, :].flat) - - def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Returns the `n-th` charge-tuples of ChargeCollection in an np.ndarray. - """ - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - return self._stacked_charges[n, :] - - def __getitem__(self, n: Union[np.ndarray, int]) -> "ChargeCollection": - - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - - array = self._stacked_charges[n, :] - - return self.from_charge_types( - charge_types=[type(c) for c in self.charges], - shifts=[c.shifts for c in self.charges], - stacked_charges=array) - # if self.num_charges == 1: - # array = np.expand_dims(array, 0) - - # if len(array.shape) == 2: - # if array.shape[1] == 1: - # array = np.squeeze(array, axis=1) - # if len(array.shape) == 0: - # array = np.asarray([array]) - - # charges = [] - # if np.prod(array.shape) == 0: - # for n in range(len(self.charges)): - # charge = self.charges[n].__new__(type(self.charges[n])) - # charge.__init__( - # charges=[np.empty(0, dtype=self.charges[n].dtype)], - # shifts=self.charges[n].shifts) - # charges.append(charge) - - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj - - # if len(array.shape) == 1: - # array = np.expand_dims(array, 1) - - # for m in range(len(self.charges)): - # charge = self.charges[m].__new__(type(self.charges[m])) - # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) - # charges.append(charge) - - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj - - def __iter__(self): - return self.Iterator(self._stacked_charges) - - def __add__(self, other: "Charge") -> "Charge": - """ - Fuse `self` with `other`. - Args: - other: A `ChargeCollection` object. - Returns: - Charge: The result of fusing `self` with `other`. - """ - return ChargeCollection( - [c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) - - def __sub__(self, other: "Charge") -> "Charge": - """ - Subtract `other` from `self`. - Args: - other: A `ChargeCollection` object. - Returns: - Charge: The result of fusing `self` with `other`. - """ - return ChargeCollection( - [c1 - c2 for c1, c2 in zip(self.charges, other.charges)]) - - def __repr__(self): - text = str(type(self)) + '\n ' - for n in range(len(self.charges)): - tmp = self.charges[n].__repr__() - tmp = tmp.replace('\n', '\n\t') - text += (tmp + '\n') - return text - - def __len__(self): - return len(self.charges[0]) + raise NotImplementedError( + "intersection can only be performed on first or second axis") - def __mul__(self, number: Union[bool, int]) -> "Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - return ChargeCollection(charges=[c * number for c in self.charges]) + else: + raise NotImplementedError( + "intersect is only implemented for 1d or 2d arrays") - def __rmul__(self, number: Union[bool, int]) -> "Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - - return self.__mul__(number) - - def isin(self, targets: Union[Iterable, "ChargeCollection"]): - if isinstance(targets, type(self)): - _targets = [t for t in targets] - return np.logical_or.reduce([ - np.logical_and.reduce([ - np.isin(self._stacked_charges[:, n], _targets[m][n]) - for n in range(len(_targets[m])) - ]) - for m in range(len(_targets)) - ]) - - def __contains__(self, targets: Union[Iterable, "ChargeCollection"]): - if isinstance(targets, type(self)): - if len(targets) > 1: - raise ValueError( - '__contains__ expects a single input, found {} inputs'.format( - len(targets))) - - _targets = targets.get_item(0) - return np.any( - np.logical_and.reduce([ - np.isin(self._stacked_charges[:, n], _targets[n]) - for n in range(len(_targets)) - ])) - - def unique( - self, - return_index=False, - return_inverse=False, - return_counts=False, - ) -> Tuple["ChargeCollection", np.ndarray, np.ndarray, np.ndarray]: - """ - Compute the unique charges in `BaseCharge`. - See np.unique for a more detailed explanation. This function - does the same but instead of a np.ndarray, it returns the unique - elements in a `BaseCharge` object. - Args: - return_index: If `True`, also return the indices of `self.charges` (along the specified axis, - if provided, or in the flattened array) that result in the unique array. - return_inverse: If `True`, also return the indices of the unique array (for the specified - axis, if provided) that can be used to reconstruct `self.charges`. - return_counts: If `True`, also return the number of times each unique item appears - in `self.charges`. - Returns: - BaseCharge: The sorted unique values. - np.ndarray: The indices of the first occurrences of the unique values in the - original array. Only provided if `return_index` is True. - np.ndarray: The indices to reconstruct the original array from the - unique array. Only provided if `return_inverse` is True. - np.ndarray: The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - """ - result = np.unique( - np.stack([self.charges[n].charges for n in range(len(self.charges))], - axis=1), - return_index=return_index, - return_inverse=return_inverse, - return_counts=return_counts, - axis=0) - charges = [] - if not (return_index or return_inverse or return_counts): - for n in range(len(self.charges)): - obj = self.charges[n].__new__(type(self.charges[n])) - obj.__init__(charges=[result[:, n]], shifts=self.charges[n].shifts) - charges.append(obj) - return ChargeCollection(charges) - for n in range(len(self.charges)): - obj = self.charges[n].__new__(type(self.charges[n])) - obj.__init__(charges=[result[0][:, n]], shifts=self.charges[n].shifts) - charges.append(obj) - out = ChargeCollection(charges) - return tuple([out] + [result[n] for n in range(1, len(result))]) - - def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: - if len(target_charges) != len(self.charges): - raise ValueError( - "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" - .format(len(target_charges), len(self.charges))) - return np.logical_and.reduce([ - self.charges[n].equals(target_charges[n]) - for n in range(len(target_charges)) - ]) - - def __eq__(self, target_charges: Iterable): - raise NotImplementedError() - if isinstance(target_charges, type(self)): - target_charges = np.stack([c.charges for c in target_charges.charges], - axis=1) - target_charges = np.asarray(target_charges) - if target_charges.ndim == 1: - target_charges = np.expand_dims(target_charges, 0) - if target_charges.shape[1] != len(self.charges): - raise ValueError( - "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" - .format(len(target_charges), len(self.charges))) - return np.logical_and.reduce( - self._stacked_charges == target_charges, axis=1) - - def concatenate(self, - others: Union["ChargeCollection", List["ChargeCollection"]]): - """ - Concatenate `self.charges` with `others.charges`. - Args: - others: List of `BaseCharge` objects. - Returns: - BaseCharge: The concatenated charges. - """ - if isinstance(others, type(self)): - others = [others] - - charges = [ - self.charges[n].concatenate([o.charges[n] - for o in others]) - for n in range(len(self.charges)) - ] - return ChargeCollection(charges) - - @property - def dtype(self): - return np.result_type(*[c.dtype for c in self.charges]) - - @property - def zero_charge(self): - obj = self.__new__(type(self)) - obj.__init__(charges=[c.zero_charge for c in self.charges]) - return obj - - def intersect(self, - other: "ChargeCollection", - return_indices: Optional[bool] = False) -> "ChargeCollection": - if return_indices: - ua, ia = self.unique(return_index=True) - ub, ib = other.unique(return_index=True) - conc = ua.concatenate(ub) - uab, iab, cntab = conc.unique(return_index=True, return_counts=True) - intersection = uab[cntab == 2] - comm1 = np.argmax( - np.logical_and.reduce( - np.repeat( - np.expand_dims(self._stacked_charges, 2), - intersection._stacked_charges.shape[0], - axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), - axis=1), - axis=0) - comm2 = np.argmax( - np.logical_and.reduce( - np.repeat( - np.expand_dims(other._stacked_charges, 2), - intersection._stacked_charges.shape[0], - axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), - axis=1), - axis=0) - return intersection, comm1, comm2 - - else: - self_unique = self.unique() - other_unique = other.unique() - concatenated = self_unique.concatenate(other_unique) - tmp_unique, counts = concatenated.unique(return_counts=True) - return tmp_unique[counts == 2] - - -def fuse_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: +def fuse_charges(charges: List[BaseCharge], flows: List[bool]) -> BaseCharge: """ Fuse all `charges` into a new charge. - Charges are fused from "right to left", + Charges are fused from "right to left", in accordance with row-major order. Args: charges: A list of charges to be fused. flows: A list of flows, one for each element in `charges`. Returns: - ChargeCollection: The result of fusing `charges`. + BaseCharge: The result of fusing `charges`. """ if len(charges) != len(flows): raise ValueError( @@ -761,12 +456,12 @@ def fuse_charges( def fuse_degeneracies(degen1: Union[List, np.ndarray], degen2: Union[List, np.ndarray]) -> np.ndarray: """ - Fuse degeneracies `degen1` and `degen2` of two leg-charges - by simple kronecker product. `degen1` and `degen2` typically belong to two + Fuse degeneracies `degen1` and `degen2` of two leg-charges + by simple kronecker product. `degen1` and `degen2` typically belong to two consecutive legs of `BlockSparseTensor`. Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns `[10, 100, 20, 200, 30, 300]`. - When using row-major ordering of indices in `BlockSparseTensor`, + When using row-major ordering of indices in `BlockSparseTensor`, the position of `degen1` should be "to the left" of the position of `degen2`. Args: degen1: Iterable of integers @@ -776,3 +471,416 @@ def fuse_degeneracies(degen1: Union[List, np.ndarray], """ return np.reshape(degen1[:, None] * degen2[None, :], len(degen1) * len(degen2)) + + +# class BaseCharge: + +# def __init__(self, +# charges: np.ndarray, +# charge_labels: Optional[np.ndarray] = None) -> None: +# if charges.dtype != np.int16: +# raise TypeError("`charges` have to be of dtype `np.int16`") + +# if charge_labels is None: + +# self.unique_charges, charge_labels = np.unique( +# charges, return_inverse=True) +# self.charge_labels = charge_labels.astype(np.int16) + +# else: +# if charge_labels.dtype not in (np.int16, np.int16): +# raise TypeError("`charge_labels` have to be of dtype `np.int16`") + +# self.unique_charges = charges +# self.charge_labels = charge_labels.astype(np.int16) + +# def __add__(self, other: "BaseCharge") -> "BaseCharge": +# # fuse the unique charges from each index, then compute new unique charges +# comb_qnums = self.fuse(self.unique_charges, other.unique_charges) +# [unique_charges, charge_labels] = np.unique(comb_qnums, return_inverse=True) +# charge_labels = charge_labels.reshape( +# len(self.unique_charges), len(other.unique_charges)).astype(np.int16) + +# # find new labels using broadcasting (could use np.tile but less efficient) +# charge_labels = charge_labels[( +# self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.int16) +# ).ravel(), (other.charge_labels[None, :] + +# np.zeros([len(self), 1], dtype=np.int16)).ravel()] +# obj = self.__new__(type(self)) +# obj.__init__(unique_charges, charge_labels) +# return obj + +# def __len__(self): +# return len(self.charge_labels) + +# def dual(self, take_dual: Optional[bool] = False) -> "BaseCharge": +# if take_dual: +# obj = self.__new__(type(self)) +# obj.__init__(self.dual_charges(self.unique_charges), self.charge_labels) +# return obj +# return self + +# @property +# def num_symmetries(self) -> int: +# """ +# Return the number of different charges in `ChargeCollection`. +# """ +# return self.unique_charges.shape[0] + +# def charges(self) -> np.ndarray: +# return self.unique_charges[self.charge_labels] + +# @property +# def dim(self): +# return len(self.charge_labels) + +# @property +# def dtype(self): +# return self.unique_charges.dtype + +# def __repr__(self): +# return str( +# type(self)) + '\n' + 'charges: ' + self.charges().__repr__() + '\n' + +# @property +# def degeneracies(self): +# return np.sum( +# np.expand_dims(self.charge_labels, 1) == np.expand_dims( +# np.arange(len(self.unique_charges), dtype=np.int16), 0), +# axis=0) + +# def unique(self, +# return_index=False, +# return_inverse=False, +# return_counts=False +# ) -> Tuple["BaseCharge", np.ndarray, np.ndarray, np.ndarray]: +# """ +# Compute the unique charges in `BaseCharge`. +# See np.unique for a more detailed explanation. This function +# does the same but instead of a np.ndarray, it returns the unique +# elements in a `BaseCharge` object. +# Args: +# return_index: If `True`, also return the indices of `self.charges` (along the specified axis, +# if provided, or in the flattened array) that result in the unique array. +# return_inverse: If `True`, also return the indices of the unique array (for the specified +# axis, if provided) that can be used to reconstruct `self.charges`. +# return_counts: If `True`, also return the number of times each unique item appears +# in `self.charges`. +# Returns: +# BaseCharge: The sorted unique values. +# np.ndarray: The indices of the first occurrences of the unique values in the +# original array. Only provided if `return_index` is True. +# np.ndarray: The indices to reconstruct the original array from the +# unique array. Only provided if `return_inverse` is True. +# np.ndarray: The number of times each of the unique values comes up in the +# original array. Only provided if `return_counts` is True. +# """ +# obj = self.__new__(type(self)) +# obj.__init__( +# self.unique_charges, +# charge_labels=np.arange(len(self.unique_charges), dtype=np.int16)) + +# out = [obj] +# if return_index: +# _, index = np.unique(self.charge_labels, return_index=True) +# out.append(index) +# if return_inverse: +# out.append(self.charge_labels) +# if return_counts: +# out.append(self.degeneracies) +# if len(out) == 1: +# return out[0] +# if len(out) == 2: +# return out[0], out[1] +# if len(out) == 3: +# return out[0], out[1], out[2] + +# def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: +# """ +# Test each element of `BaseCharge` if it is in `targets`. Returns +# an `np.ndarray` of `dtype=bool`. +# Args: +# targets: The test elements +# Returns: +# np.ndarray: An array of `bool` type holding the result of the comparison. +# """ +# if isinstance(targets, type(self)): +# targets = targets.unique_charges +# targets = np.asarray(targets) +# common, label_to_unique, label_to_targets = np.intersect1d( +# self.unique_charges, targets, return_indices=True) +# if len(common) == 0: +# return np.full(len(self.charge_labels), fill_value=False, dtype=np.bool) +# return np.isin(self.charge_labels, label_to_unique) + +# def __iter__(self): +# return iter(self.charges()) + +# def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": +# """ +# Return the charge-element at position `n`, wrapped into a `BaseCharge` +# object. +# Args: +# n: An integer or `np.ndarray`. +# Returns: +# BaseCharge: The charges at `n`. +# """ + +# if isinstance(n, (np.integer, int)): +# n = np.asarray([n]) +# obj = self.__new__(type(self)) +# obj.__init__(self.unique_charges, self.charge_labels[n]) +# return obj + +# def __mul__(self, number: bool) -> "BaseCharge": +# if not isinstance(number, bool): +# raise ValueError( +# "can only multiply by `True` or `False`, found {}".format(number)) + +# return self.dual(number) + +# def intersect(self, other, return_indices=False): +# out = np.intersect1d( +# self.unique_charges, +# other.unique_charges, +# assume_unique=True, +# return_indices=return_indices) +# obj = self.__new__(type(self)) +# if not return_indices: +# obj.__init__(out, np.arange(len(out), dtype=np.int16)) +# return obj +# obj.__init__(out[0], np.arange(len(out), dtype=np.int16)) +# return obj, out[1], out[2] + +# def reduce(self, +# target_charges: np.ndarray, +# return_locs: bool = False, +# strides: int = 1) -> ("SymIndex", np.ndarray): +# """ +# Reduce the dim of a SymIndex to keep only the index values that intersect target_charges +# Args: +# target_charges (np.ndarray): array of unique quantum numbers to keep. +# return_locs (bool, optional): if True, also return the output index +# locations of target values. +# Returns: +# SymIndex: index of reduced dimension. +# np.ndarray: output index locations of target values. +# """ +# if isinstance(target_charges, (int, np.integer)): +# target_charges = np.asarray([target_charges]) +# target_charges = np.asarray(target_charges, dtype=np.int16) +# # find intersection of index charges and target charges +# reduced_charges, label_to_unique, label_to_target = intersect( +# self.unique_charges, target_charges, axis=1, return_indices=True) + +# num_unique = len(label_to_unique) + +# # construct the map to the reduced charges +# map_to_reduced = np.full(self.dim, fill_value=-1, dtype=np.int16) +# map_to_reduced[label_to_unique] = np.arange(num_unique, dtype=np.int16) + +# # construct the map to the reduced charges +# reduced_ind_labels = map_to_reduced[self.charge_labels] +# reduced_locs = reduced_ind_labels >= 0 +# new_ind_labels = reduced_ind_labels[reduced_locs].astype(np.int16) +# obj = self.__new__(type(self)) + +# obj.__init__(reduced_charges, new_ind_labels) +# if return_locs: +# return obj, strides * np.flatnonzero(reduced_locs).astype(np.uint32) +# return obj + +# class ChargeCollection: + +# def __init__(self, +# charge_types: List[Type[BaseCharge]], +# charges: np.ndarray, +# charge_labels: Optional[np.ndarray] = None) -> None: +# self.charge_types = charge_types +# if charges.ndim == 1: +# charges = np.expand_dims(charges, 0) +# if charge_labels is None: +# self.unique_charges, self.charge_labels = np.unique( +# charges.astype(np.int16), return_inverse=True, axis=1) +# self.charge_labels = self.charge_labels.astype(np.int16) +# else: +# if charge_labels.dtype not in (np.int16, np.int16): +# raise TypeError("`charge_labels` have to be of dtype `np.int16`") + +# self.unique_charges = charges.astype(np.int16) +# self.charge_labels = charge_labels.astype(np.int16) + +# @property +# def dim(self): +# return len(self.charge_labels) + +# @property +# def num_symmetries(self) -> int: +# """ +# Return the number of different charges in `ChargeCollection`. +# """ +# return self.unique_charges.shape[0] + +# @property +# def identity_charges(self) -> np.ndarray: +# """ +# Give the identity charge associated to a symmetries type in `charge_types`. +# Args: +# charge_types: A list of charge-types. +# Returns: +# nd.array: vector of identity charges for each symmetry in self +# """ +# unique_charges = np.expand_dims( +# np.asarray([ct.identity for ct in self.charge_types], dtype=np.int16), +# 1) +# charge_labels = np.zeros(1, dtype=np.int16) +# obj = self.__new__(type(self)) +# obj.__init__(unique_charges, charge_labels) +# return obj + +# def __add__(self, other: "ChargeCollection") -> "ChargeCollection": +# """ +# Fuse `self` with `other`. +# Args: +# other: A `ChargeCollection` object. +# Returns: +# Charge: The result of fusing `self` with `other`. +# """ + +# # fuse the unique charges from each index, then compute new unique charges +# comb_charges = fuse_ndarray_charges(self.unique_charges, +# other.unique_charges, self.charge_types) +# [unique_charges, charge_labels] = np.unique( +# comb_charges, return_inverse=True, axis=1) +# charge_labels = charge_labels.reshape(self.unique_charges.shape[1], +# other.unique_charges.shape[1]).astype( +# np.int16) + +# # find new labels using broadcasting +# charge_labels = charge_labels[( +# self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.int16) +# ).ravel(), (other.charge_labels[None, :] + +# np.zeros([len(self), 1], dtype=np.int16)).ravel()] +# return ChargeCollection(self.charge_types, unique_charges, charge_labels) + +# def dual(self, take_dual: Optional[bool] = False) -> np.ndarray: +# if take_dual: +# unique_dual_charges = np.stack([ +# self.charge_types[n].dual_charges(self.unique_charges[n, :]) +# for n in range(len(self.charge_types)) +# ], +# axis=0) +# return ChargeCollection(self.charge_types, unique_dual_charges, +# self.charge_labels) +# return self + +# def charges(self): +# return self.unique_charges[:, self.charge_labels] + +# def __repr__(self): +# return str( +# type(self)) + '\n' + 'charges: \n' + self.charges().__repr__() + '\n' + +# def __len__(self): +# return len(self.charge_labels) + +# def __mul__(self, number: bool) -> "ChargeCollection": +# if not isinstance(number, bool): +# raise ValueError( +# "can only multiply by `True` or `False`, found {}".format(number)) +# return self.dual(number) + +# def unique(self, +# return_index=False, +# return_inverse=False, +# return_counts=False +# ) -> Tuple["BaseCharge", np.ndarray, np.ndarray, np.ndarray]: +# """ +# Compute the unique charges in `BaseCharge`. +# See np.unique for a more detailed explanation. This function +# does the same but instead of a np.ndarray, it returns the unique +# elements in a `BaseCharge` object. +# Args: +# return_index: If `True`, also return the indices of `self.charges` (along the specified axis, +# if provided, or in the flattened array) that result in the unique array. +# return_inverse: If `True`, also return the indices of the unique array (for the specified +# axis, if provided) that can be used to reconstruct `self.charges`. +# return_counts: If `True`, also return the number of times each unique item appears +# in `self.charges`. +# Returns: +# BaseCharge: The sorted unique values. +# np.ndarray: The indices of the first occurrences of the unique values in the +# original array. Only provided if `return_index` is True. +# np.ndarray: The indices to reconstruct the original array from the +# unique array. Only provided if `return_inverse` is True. +# np.ndarray: The number of times each of the unique values comes up in the +# original array. Only provided if `return_counts` is True. +# """ + +# obj = ChargeCollection( +# self.charge_types, +# self.unique_charges, +# charge_labels=np.arange(self.unique_charges.shape[1], dtype=np.int16)) + +# out = [obj] +# if return_index: +# _, index = np.unique(self.charge_labels, return_index=True) +# out.append(index) +# if return_inverse: +# out.append(self.charge_labels) +# if return_counts: +# _, cnts = np.unique(self.charge_labels, return_counts=True) +# out.append(cnts) +# if len(out) == 1: +# return out[0] +# if len(out) == 2: +# return out[0], out[1] +# if len(out) == 3: +# return out[0], out[1], out[2] + +# @property +# def dtype(self): +# return self.unique_charges.dtype + +# @property +# def degeneracies(self): +# return np.sum( +# np.expand_dims(self.charge_labels, 1) == np.expand_dims( +# np.arange(self.unique_charges.shape[1], dtype=np.int16), 0), +# axis=0) + +# def reduce(self, +# target_charges: np.ndarray, +# return_locs: bool = False, +# strides: int = 1) -> ("SymIndex", np.ndarray): +# """ +# Reduce the dim of a SymIndex to keep only the index values that intersect target_charges +# Args: +# target_charges (np.ndarray): array of unique quantum numbers to keep. +# return_locs (bool, optional): if True, also return the output index +# locations of target values. +# Returns: +# SymIndex: index of reduced dimension. +# np.ndarray: output index locations of target values. +# """ +# target_charges = np.asarray(target_charges, dtype=np.int16) +# # find intersection of index charges and target charges +# reduced_charges, label_to_unique, label_to_target = intersect( +# self.unique_charges, target_charges, axis=1, return_indices=True) +# num_unique = len(label_to_unique) + +# # construct the map to the reduced charges +# map_to_reduced = np.full(self.dim, fill_value=-1, dtype=np.int16) +# map_to_reduced[label_to_unique] = np.arange(num_unique, dtype=np.int16) + +# # construct the map to the reduced charges +# reduced_ind_labels = map_to_reduced[self.charge_labels] +# reduced_locs = reduced_ind_labels >= 0 +# new_ind_labels = reduced_ind_labels[reduced_locs].astype(np.int16) + +# if return_locs: +# return ChargeCollection( +# self.charge_types, reduced_charges, +# new_ind_labels), strides * np.flatnonzero(reduced_locs).astype( +# np.uint32) +# return ChargeCollection(self.charge_types, reduced_charges, new_ind_labels) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index b5e8ec339..fc2d033d9 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -16,7 +16,7 @@ from __future__ import division from __future__ import print_function import numpy as np -from tensornetwork.block_tensor.charge import BaseCharge, ChargeCollection +from tensornetwork.block_tensor.charge import BaseCharge import copy from typing import List, Union, Any, Optional, Tuple, Text @@ -29,17 +29,12 @@ class Index: """ def __init__(self, - charges: Union[ChargeCollection, BaseCharge], + charges: BaseCharge, flow: int, name: Optional[Text] = None, left_child: Optional["Index"] = None, right_child: Optional["Index"] = None): - if isinstance(charges, BaseCharge): - self._charges = charges #ChargeCollection([charges]) - elif isinstance(charges, ChargeCollection) or (charges is None): - self._charges = charges - else: - raise TypeError("Unknown type {}".format(type(charges))) + self._charges = charges #ChargeCollection([charges]) self.flow = flow self.left_child = left_child self.right_child = right_child @@ -126,7 +121,7 @@ def charges(self): def fuse_index_pair(left_index: Index, right_index: Index, - flow: Optional[int] = 1) -> Index: + flow: Optional[int] = False) -> Index: """ Fuse two consecutive indices (legs) of a symmetric tensor. Args: @@ -145,7 +140,7 @@ def fuse_index_pair(left_index: Index, charges=None, flow=flow, left_child=left_index, right_child=right_index) -def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: +def fuse_indices(indices: List[Index], flow: Optional[int] = False) -> Index: """ Fuse a list of indices (legs) of a symmetric tensor. Args: From 6f7caac91ba4bf4c4e33309d893fc2d403f93280 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 14:45:13 -0500 Subject: [PATCH 184/212] fix bug in unique --- tensornetwork/block_tensor/charge.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 19fe6a35c..760453b46 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -197,7 +197,6 @@ def unique(self, charges=self.unique_charges, charge_labels=np.arange(self.unique_charges.shape[1], dtype=np.int16), charge_types=self.charge_types) - out = [obj] if return_index: _, index = np.unique(self.charge_labels, return_index=True) @@ -213,6 +212,8 @@ def unique(self, return out[0], out[1] if len(out) == 3: return out[0], out[1], out[2] + if len(out) == 4: + return out[0], out[1], out[2], out[3] @property def dtype(self): From 2646f26fd6e12f185bf3721894035e7168222220 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 14:45:17 -0500 Subject: [PATCH 185/212] adding/removing tests --- .../block_tensor/block_tensor_test.py | 529 ++++++--------- tensornetwork/block_tensor/charge_test.py | 608 +++--------------- tensornetwork/block_tensor/index_test.py | 104 +-- 3 files changed, 323 insertions(+), 918 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 015c2c9b6..c941c4544 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -1,151 +1,20 @@ import numpy as np import pytest -from tensornetwork.block_tensor.charge import U1Charge, ChargeCollection, fuse_charges +from tensornetwork.block_tensor.charge import U1Charge, fuse_charges from tensornetwork.block_tensor.index import Index -from tensornetwork.block_tensor.block_tensor import _find_diagonal_dense_blocks, _find_diagonal_sparse_blocks, compute_num_nonzero, find_sparse_positions, find_dense_positions, BlockSparseTensor, fuse_ndarrays, _find_values_in_fused +from tensornetwork.block_tensor.block_tensor import compute_num_nonzero, reduce_charges, BlockSparseTensor, fuse_ndarrays np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] -# def test_test_num_nonzero_consistency(): -# B = 4 -# D = 100 -# rank = 4 - -# qs = [[ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) -# for _ in range(2) -# ] -# for _ in range(rank)] -# charges1 = [U1Charge(qs[n]) for n in range(rank)] -# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] -# charges3 = [ -# ChargeCollection([U1Charge(qs[n][m]) -# for m in range(2)]) -# for n in range(rank) -# ] -# flows = [1, 1, 1, -1] -# n1 = compute_num_nonzero(charges1, flows) -# n2 = compute_num_nonzero(charges3, flows) -# n3 = compute_num_nonzero(charges3, flows) -# assert n1 == n2 - -# def test_find_sparse_positions_consistency(): -# B = 4 -# D = 100 -# rank = 4 - -# qs = [[ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) -# for _ in range(2) -# ] -# for _ in range(rank)] -# charges1 = [U1Charge(qs[n]) for n in range(rank)] -# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] -# charges3 = [ -# ChargeCollection([U1Charge(qs[n][m]) -# for m in range(2)]) -# for n in range(rank) -# ] - -# data1 = find_sparse_positions( -# charges=charges1, -# flows=[1, 1, 1, 1], -# target_charges=charges1[0].zero_charge) -# data2 = find_sparse_positions( -# charges=charges2, -# flows=[1, 1, 1, 1], -# target_charges=charges2[0].zero_charge) -# data3 = find_sparse_positions( -# charges=charges3, -# flows=[1, 1, 1, 1], -# target_charges=charges3[0].zero_charge) - -# nz1 = np.asarray(list(data1.values())[0]) -# nz2 = np.asarray(list(data2.values())[0]) -# nz3 = np.asarray(list(data3.values())[0]) -# assert np.all(nz1 == nz2) -# assert np.all(nz1 == nz3) - -# def test_find_dense_positions_consistency(): -# B = 5 -# D = 20 -# rank = 4 - -# qs = [[ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) -# for _ in range(2) -# ] -# for _ in range(rank)] -# charges1 = [U1Charge(qs[n]) for n in range(rank)] -# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] -# charges3 = [ -# ChargeCollection([U1Charge(qs[n][m]) -# for m in range(2)]) -# for n in range(rank) -# ] -# flows = [1, 1, 1, -1] -# data1 = find_dense_positions( -# charges=charges1, flows=flows, target_charge=charges1[0].zero_charge) -# data2 = find_dense_positions( -# charges=charges2, flows=flows, target_charge=charges2[0].zero_charge) -# data3 = find_dense_positions( -# charges=charges3, flows=flows, target_charge=charges3[0].zero_charge) - -# nz = compute_num_nonzero(charges1, flows) -# assert nz == len(data1) -# assert len(data1) == len(data2) -# assert len(data1) == len(data3) - -# def test_find_diagonal_sparse_blocks_consistency(): -# B = 5 -# D = 20 -# rank = 4 - -# qs = [[ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) -# for _ in range(2) -# ] -# for _ in range(rank)] -# charges1 = [U1Charge(qs[n]) for n in range(rank)] -# charges2 = [ChargeCollection([charges1[n]]) for n in range(rank)] -# charges3 = [ -# ChargeCollection([U1Charge(qs[n][m]) -# for m in range(2)]) -# for n in range(rank) -# ] - -# _, _, start_positions1, _, _ = _find_diagonal_sparse_blocks( -# row_charges=[charges1[0], charges1[1]], -# column_charges=[charges1[2], charges1[3]], -# row_flows=[1, 1], -# column_flows=[1, -1], -# return_data=False) - -# _, _, start_positions2, _, _ = _find_diagonal_sparse_blocks( -# row_charges=[charges2[0], charges2[1]], -# column_charges=[charges2[2], charges2[3]], -# row_flows=[1, 1], -# column_flows=[1, -1], -# return_data=False) - -# _, _, start_positions3, _, _ = _find_diagonal_sparse_blocks( -# row_charges=[charges3[0], charges3[1]], -# column_charges=[charges3[2], charges3[3]], -# row_flows=[1, 1], -# column_flows=[1, -1], -# return_data=False) -# assert np.all(start_positions1 == start_positions2) -# assert np.all(start_positions1 == start_positions3) - @pytest.mark.parametrize("dtype", np_dtypes) def test_block_sparse_init(dtype): D = 10 #bond dimension B = 10 #number of blocks rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 + flows = np.asarray([False for _ in range(rank)]) + flows[-2::] = True charges = [ U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)) for _ in range(rank) @@ -164,191 +33,141 @@ def test_block_sparse_init(dtype): assert len(A.data) == num_elements -@pytest.mark.parametrize("dtype", np_dtypes) -def test_get_diagonal_blocks(dtype): - D = 10 #bond dimension - B = 10 #number of blocks - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)) - for _ in range(rank) - ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - num_elements = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) - A = BlockSparseTensor.random(indices=indices, dtype=dtype) - A.reshape((100, 100)) - _, blocks, _, _, _ = A._get_diagonal_blocks(return_data=False) - assert num_elements == np.sum([len(v[0]) for v in blocks]) - - def test_find_dense_positions(): left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) - target_charge = 0 + target_charge = np.zeros((1, 1), dtype=np.int16) fused_charges = fuse_ndarrays([left_charges, right_charges]) - dense_positions = find_dense_positions( - [U1Charge(left_charges), U1Charge(right_charges)], [1, 1], - U1Charge(np.asarray([target_charge]))) - np.testing.assert_allclose(dense_positions[0], - np.nonzero(fused_charges == target_charge)[0]) - - -def test_find_dense_positions_2(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index( - charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - n1 = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) - - i01 = indices[0] * indices[1] - i23 = indices[2] * indices[3] - positions = find_dense_positions([i01.charges, i23.charges], [1, 1], - U1Charge(np.asarray([0]))) - assert len(positions[0]) == n1 - - -def test_find_sparse_positions(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index( - charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - n1 = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) - i01 = indices[0] * indices[1] - i23 = indices[2] * indices[3] - unique_row_charges = np.unique(i01.charges.charges) - unique_column_charges = np.unique(i23.charges.charges) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - blocks = find_sparse_positions([i01.charges, i23.charges], [1, 1], - target_charges=U1Charge(np.asarray([0]))) - assert sum([len(v) for v in blocks.values()]) == n1 - np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) - - -def test_find_sparse_positions_2(): - D = 1000 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - index = Index(charges=U1Charge(charges), flow=1, name='index0') - targets = np.asarray([-1, 0, 1]) - blocks = find_sparse_positions([index.charges], [index.flow], - target_charges=U1Charge(targets)) - - inds = np.isin(charges, targets) - relevant_charges = charges[inds] - blocks_ = {t: np.nonzero(relevant_charges == t)[0] for t in targets} - assert np.all( - np.asarray(list(blocks.keys())) == np.asarray(list(blocks_.keys()))) - for k in blocks.keys(): - assert np.all(blocks[k] == blocks_[k]) - - -def test_find_sparse_positions_3(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - flows = [1, -1] - - rank = len(flows) - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index( - charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - i1, i2 = indices - common_charges = np.intersect1d(i1.charges.charges, i2.charges.charges) - row_locations = find_sparse_positions( - charges=[i1.charges, i2.charges], - flows=flows, - target_charges=U1Charge(common_charges)) - fused = (i1 * i2).charges - relevant = fused.charges[np.isin(fused.charges, common_charges)] - for k, v in row_locations.items(): - np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) - - -# def test_dense_transpose(): -# Ds = [10, 11, 12] #bond dimension -# rank = len(Ds) + dense_positions = reduce_charges( + [U1Charge(left_charges), U1Charge(right_charges)], [False, False], + target_charge, + return_locations=True) + np.testing.assert_allclose( + dense_positions[1], + np.nonzero(fused_charges == target_charge[0, 0])[0]) + + +# def test_find_dense_positions_2(): +# D = 40 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# rank = 4 # flows = np.asarray([1 for _ in range(rank)]) # flows[-2::] = -1 -# charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# for _ in range(rank) +# ] # indices = [ -# Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# Index( +# charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) # for n in range(rank) # ] -# A = BlockSparseTensor.random(indices=indices, dtype=np.float64) -# B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) -# A.transpose((1, 0, 2)) -# np.testing.assert_allclose(A.data, B.flat) - -# B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) -# A.transpose((1, 0, 2)) - -# np.testing.assert_allclose(A.data, B.flat) - - -@pytest.mark.parametrize("R", [1, 2]) -def test_find_diagonal_dense_blocks(R): - rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] - cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] - charges = rs + cs - - left_fused = fuse_charges(charges[0:R], [1] * R) - right_fused = fuse_charges(charges[R:], [1] * R) - left_unique = left_fused.unique() - right_unique = right_fused.unique() - zero = left_unique.zero_charge - blocks = {} - rdim = len(right_fused) - for lu in left_unique: - linds = np.nonzero(left_fused == lu)[0] - rinds = np.nonzero(right_fused == lu * (-1))[0] - if (len(linds) > 0) and (len(rinds) > 0): - blocks[lu] = fuse_ndarrays([linds * rdim, rinds]) - comm, blocks_ = _find_diagonal_dense_blocks(rs, cs, [1] * R, [1] * R) - for n in range(len(comm)): - assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) - - -# #@pytest.mark.parametrize("dtype", np_dtypes) -# def test_find_diagonal_dense_blocks_2(): -# R = 1 +# n1 = compute_num_nonzero([i.charges for i in indices], +# [i.flow for i in indices]) + +# i01 = indices[0] * indices[1] +# i23 = indices[2] * indices[3] +# positions = find_dense_positions([i01.charges, i23.charges], [1, 1], +# U1Charge(np.asarray([0]))) +# assert len(positions[0]) == n1 + +# def test_find_sparse_positions(): +# D = 40 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# rank = 4 +# flows = np.asarray([1 for _ in range(rank)]) +# flows[-2::] = -1 +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# for _ in range(rank) +# ] +# indices = [ +# Index( +# charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# n1 = compute_num_nonzero([i.charges for i in indices], +# [i.flow for i in indices]) +# i01 = indices[0] * indices[1] +# i23 = indices[2] * indices[3] +# unique_row_charges = np.unique(i01.charges.charges) +# unique_column_charges = np.unique(i23.charges.charges) +# common_charges = np.intersect1d( +# unique_row_charges, -unique_column_charges, assume_unique=True) +# blocks = find_sparse_positions([i01.charges, i23.charges], [1, 1], +# target_charges=U1Charge(np.asarray([0]))) +# assert sum([len(v) for v in blocks.values()]) == n1 +# np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) + +# def test_find_sparse_positions_2(): +# D = 1000 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# index = Index(charges=U1Charge(charges), flow=1, name='index0') +# targets = np.asarray([-1, 0, 1]) +# blocks = find_sparse_positions([index.charges], [index.flow], +# target_charges=U1Charge(targets)) + +# inds = np.isin(charges, targets) +# relevant_charges = charges[inds] +# blocks_ = {t: np.nonzero(relevant_charges == t)[0] for t in targets} +# assert np.all( +# np.asarray(list(blocks.keys())) == np.asarray(list(blocks_.keys()))) +# for k in blocks.keys(): +# assert np.all(blocks[k] == blocks_[k]) + +# def test_find_sparse_positions_3(): +# D = 40 #bond dimension +# B = 4 #number of blocks +# dtype = np.int16 #the dtype of the quantum numbers +# flows = [1, -1] + +# rank = len(flows) +# charges = [ +# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) +# for _ in range(rank) +# ] +# indices = [ +# Index( +# charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) +# for n in range(rank) +# ] +# i1, i2 = indices +# common_charges = np.intersect1d(i1.charges.charges, i2.charges.charges) +# row_locations = find_sparse_positions( +# charges=[i1.charges, i2.charges], +# flows=flows, +# target_charges=U1Charge(common_charges)) +# fused = (i1 * i2).charges +# relevant = fused.charges[np.isin(fused.charges, common_charges)] +# for k, v in row_locations.items(): +# np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) + +# # def test_dense_transpose(): +# # Ds = [10, 11, 12] #bond dimension +# # rank = len(Ds) +# # flows = np.asarray([1 for _ in range(rank)]) +# # flows[-2::] = -1 +# # charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] +# # indices = [ +# # Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) +# # for n in range(rank) +# # ] +# # A = BlockSparseTensor.random(indices=indices, dtype=np.float64) +# # B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) +# # A.transpose((1, 0, 2)) +# # np.testing.assert_allclose(A.data, B.flat) + +# # B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) +# # A.transpose((1, 0, 2)) + +# # np.testing.assert_allclose(A.data, B.flat) + +# @pytest.mark.parametrize("R", [1, 2]) +# def test_find_diagonal_dense_blocks(R): # rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] # cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] # charges = rs + cs @@ -369,38 +188,60 @@ def test_find_diagonal_dense_blocks(R): # for n in range(len(comm)): # assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) +# # #@pytest.mark.parametrize("dtype", np_dtypes) +# # def test_find_diagonal_dense_blocks_2(): +# # R = 1 +# # rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] +# # cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] +# # charges = rs + cs + +# # left_fused = fuse_charges(charges[0:R], [1] * R) +# # right_fused = fuse_charges(charges[R:], [1] * R) +# # left_unique = left_fused.unique() +# # right_unique = right_fused.unique() +# # zero = left_unique.zero_charge +# # blocks = {} +# # rdim = len(right_fused) +# # for lu in left_unique: +# # linds = np.nonzero(left_fused == lu)[0] +# # rinds = np.nonzero(right_fused == lu * (-1))[0] +# # if (len(linds) > 0) and (len(rinds) > 0): +# # blocks[lu] = fuse_ndarrays([linds * rdim, rinds]) +# # comm, blocks_ = _find_diagonal_dense_blocks(rs, cs, [1] * R, [1] * R) +# # for n in range(len(comm)): +# # assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) + +# @pytest.mark.parametrize("R", [1, 2]) +# def test_find_diagonal_dense_blocks_transposed(R): +# order = np.arange(2 * R) +# np.random.shuffle(order) +# rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] +# cs = [U1Charge(np.random.randint(-4, 4, 40)) for _ in range(R)] +# charges = rs + cs +# dims = np.asarray([len(c) for c in charges]) +# strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) +# stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(2 * R)] -@pytest.mark.parametrize("R", [1, 2]) -def test_find_diagonal_dense_blocks_transposed(R): - order = np.arange(2 * R) - np.random.shuffle(order) - rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] - cs = [U1Charge(np.random.randint(-4, 4, 40)) for _ in range(R)] - charges = rs + cs - dims = np.asarray([len(c) for c in charges]) - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(2 * R)] - - left_fused = fuse_charges([charges[n] for n in order[0:R]], [1] * R) - right_fused = fuse_charges([charges[n] for n in order[R:]], [1] * R) - lstrides = fuse_ndarrays([stride_arrays[n] for n in order[0:R]]) - rstrides = fuse_ndarrays([stride_arrays[n] for n in order[R:]]) - - left_unique = left_fused.unique() - right_unique = right_fused.unique() - blocks = {} - rdim = len(right_fused) - for lu in left_unique: - linds = np.nonzero(left_fused == lu)[0] - rinds = np.nonzero(right_fused == lu * (-1))[0] - if (len(linds) > 0) and (len(rinds) > 0): - tmp = fuse_ndarrays([linds * rdim, rinds]) - blocks[lu] = _find_values_in_fused(tmp, lstrides, rstrides) +# left_fused = fuse_charges([charges[n] for n in order[0:R]], [1] * R) +# right_fused = fuse_charges([charges[n] for n in order[R:]], [1] * R) +# lstrides = fuse_ndarrays([stride_arrays[n] for n in order[0:R]]) +# rstrides = fuse_ndarrays([stride_arrays[n] for n in order[R:]]) - comm, blocks_ = _find_diagonal_dense_blocks([charges[n] for n in order[0:R]], - [charges[n] for n in order[R:]], - [1] * R, [1] * R, - row_strides=strides[order[0:R]], - column_strides=strides[order[R:]]) - for n in range(len(comm)): - assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) +# left_unique = left_fused.unique() +# right_unique = right_fused.unique() +# blocks = {} +# rdim = len(right_fused) +# for lu in left_unique: +# linds = np.nonzero(left_fused == lu)[0] +# rinds = np.nonzero(right_fused == lu * (-1))[0] +# if (len(linds) > 0) and (len(rinds) > 0): +# tmp = fuse_ndarrays([linds * rdim, rinds]) +# blocks[lu] = _find_values_in_fused(tmp, lstrides, rstrides) + +# comm, blocks_ = _find_diagonal_dense_blocks([charges[n] for n in order[0:R]], +# [charges[n] for n in order[R:]], +# [1] * R, [1] * R, +# row_strides=strides[order[0:R]], +# column_strides=strides[order[R:]]) +# for n in range(len(comm)): +# assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) diff --git a/tensornetwork/block_tensor/charge_test.py b/tensornetwork/block_tensor/charge_test.py index 2094a68b4..3e975a9cd 100644 --- a/tensornetwork/block_tensor/charge_test.py +++ b/tensornetwork/block_tensor/charge_test.py @@ -1,7 +1,7 @@ import numpy as np import pytest # pylint: disable=line-too-long -from tensornetwork.block_tensor.charge import ChargeCollection, BaseCharge, U1Charge, Z2Charge, fuse_degeneracies +from tensornetwork.block_tensor.charge import BaseCharge, U1Charge, fuse_degeneracies from tensornetwork.block_tensor.block_tensor import fuse_ndarrays @@ -15,48 +15,19 @@ def test_fuse_degeneracies(): def test_U1Charge_charges(): D = 100 B = 6 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - - merged_charges = np.left_shift(charges[0].astype(np.int64), - 16) + charges[1].astype(np.int64) + charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) q1 = U1Charge(charges) - assert np.all(q1.charges == merged_charges) + assert np.all(q1.charges == charges) def test_U1Charge_dual(): D = 100 B = 6 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ] - merged_charges = np.left_shift(charges[0].astype(np.int64), - 16) + charges[1].astype(np.int64) + charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) q1 = U1Charge(charges) - assert np.all(q1.dual_charges == -merged_charges) - - -def test_BaseCharge_raises(): - D = 100 - B = 6 - with pytest.raises(TypeError): - q1 = BaseCharge([ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int64) - for _ in range(2) - ]) - with pytest.raises(ValueError): - q1 = BaseCharge([ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(2) - ], - shifts=[16, 0]) - with pytest.raises(TypeError): - BaseCharge(np.random.randint(0, 4, 10).astype(np.int16), shifts=[16, 0]) + assert np.all(q1.dual(True).charges == -charges) def test_U1Charge_fusion(): @@ -64,12 +35,12 @@ def test_U1Charge_fusion(): def run_test(): D = 2000 B = 6 - O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) P1 = np.random.randint(0, B + 1, D).astype(np.int16) P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - Q1 = np.random.randint(1, B + 1, D).astype(np.int8) - Q2 = np.random.randint(1, B + 1, D).astype(np.int8) + Q1 = np.random.randint(1, B + 1, D).astype(np.int16) + Q2 = np.random.randint(1, B + 1, D).astype(np.int16) charges_1 = [O1, O2] charges_2 = [P1, P2] @@ -78,17 +49,20 @@ def run_test(): fused_1 = fuse_ndarrays(charges_1) fused_2 = fuse_ndarrays(charges_2) fused_3 = fuse_ndarrays(charges_3) - q1 = U1Charge([O1, P1, Q1]) - q2 = U1Charge([O2, P2, Q2]) + q1 = U1Charge(O1) @ U1Charge(P1) @ U1Charge(Q1) + q2 = U1Charge(O2) @ U1Charge(P2) @ U1Charge(Q2) - target = np.random.randint(-B // 2, B // 2 + 1, 3) + target = BaseCharge( + charges=np.random.randint(-B, B, (3, 1), dtype=np.int16), + charge_labels=None, + charge_types=[U1Charge, U1Charge, U1Charge]) q12 = q1 + q2 - nz_1 = np.nonzero(q12.equals(target))[0] - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + nz_1 = np.nonzero(q12 == target)[0] + i1 = fused_1 == target.charges[0, 0] + i2 = fused_2 == target.charges[1, 0] + i3 = fused_3 == target.charges[2, 0] + nz_2 = np.nonzero(np.logical_and.reduce([i1, i2, i3]))[0] return nz_1, nz_2 nz_1, nz_2 = run_test() @@ -103,15 +77,15 @@ def test_U1Charge_multiple_fusion(): def run_test(): D = 300 B = 4 - O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - O3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + O3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) P1 = np.random.randint(0, B + 1, D).astype(np.int16) P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) P3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - Q1 = np.random.randint(1, B + 1, D).astype(np.int8) - Q2 = np.random.randint(0, B + 1, D).astype(np.int8) - Q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) + Q1 = np.random.randint(1, B + 1, D).astype(np.int16) + Q2 = np.random.randint(0, B + 1, D).astype(np.int16) + Q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) charges_1 = [O1, O2, O3] charges_2 = [P1, P2, P3] @@ -120,18 +94,22 @@ def run_test(): fused_1 = fuse_ndarrays(charges_1) fused_2 = fuse_ndarrays(charges_2) fused_3 = fuse_ndarrays(charges_3) - q1 = U1Charge([O1, P1, Q1]) - q2 = U1Charge([O2, P2, Q2]) - q3 = U1Charge([O3, P3, Q3]) + q1 = U1Charge(O1) @ U1Charge(P1) @ U1Charge(Q1) + q2 = U1Charge(O2) @ U1Charge(P2) @ U1Charge(Q2) + q3 = U1Charge(O3) @ U1Charge(P3) @ U1Charge(Q3) + + target = BaseCharge( + charges=np.random.randint(-B, B, (3, 1), dtype=np.int16), + charge_labels=None, + charge_types=[U1Charge, U1Charge, U1Charge]) - target = np.random.randint(-B // 2, B // 2 + 1, 3) q123 = q1 + q2 + q3 - nz_1 = np.nonzero(q123.equals(target))[0] - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + nz_1 = np.nonzero(q123 == target)[0] + i1 = fused_1 == target.charges[0, 0] + i2 = fused_2 == target.charges[1, 0] + i3 = fused_3 == target.charges[2, 0] + nz_2 = np.nonzero(np.logical_and.reduce([i1, i2, i3]))[0] return nz_1, nz_2 nz_1, nz_2 = run_test() @@ -162,18 +140,20 @@ def run_test(): fused_1 = fuse_ndarrays(charges_1) fused_2 = fuse_ndarrays(charges_2) fused_3 = fuse_ndarrays(charges_3) - q1 = U1Charge([O1, P1, Q1]) - q2 = U1Charge([O2, P2, Q2]) - q3 = U1Charge([O3, P3, Q3]) - - target = np.random.randint(-B // 2, B // 2 + 1, 3) - q123 = q1 + q2 * (-1) + q3 - - nz_1 = np.nonzero(q123.equals(target))[0] - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + q1 = U1Charge(O1) @ U1Charge(P1) @ U1Charge(Q1) + q2 = U1Charge(O2) @ U1Charge(P2) @ U1Charge(Q2) + q3 = U1Charge(O3) @ U1Charge(P3) @ U1Charge(Q3) + + target = BaseCharge( + charges=np.random.randint(-B, B, (3, 1), dtype=np.int16), + charge_labels=None, + charge_types=[U1Charge, U1Charge, U1Charge]) + q123 = q1 + q2 * True + q3 + nz_1 = np.nonzero(q123 == target)[0] + i1 = fused_1 == target.charges[0, 0] + i2 = fused_2 == target.charges[1, 0] + i3 = fused_3 == target.charges[2, 0] + nz_2 = np.nonzero(np.logical_and.reduce([i1, i2, i3]))[0] return nz_1, nz_2 nz_1, nz_2 = run_test() @@ -201,55 +181,21 @@ def run_test(): fused_1 = fuse_ndarrays(charges_1) fused_2 = fuse_ndarrays(charges_2) fused_3 = fuse_ndarrays(charges_3) - q1 = U1Charge([O1, P1, Q1]) - q2 = U1Charge([O2, P2, Q2]) - - target = np.random.randint(-B // 2, B // 2 + 1, 3) - q12 = q1 + q2 * (-1) - - nz_1 = np.nonzero(q12.equals(target))[0] - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] - return nz_1, nz_2 - - nz_1, nz_2 = run_test() - while len(nz_1) == 0: - nz_1, nz_2 = run_test() - assert np.all(nz_1 == nz_2) - - -def test_U1Charge_sub(): - - def run_test(): - D = 2000 - B = 6 - O1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - O2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int8) - P1 = np.random.randint(0, B + 1, D).astype(np.int16) - P2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - Q1 = np.random.randint(1, B + 1, D).astype(np.int8) - Q2 = np.random.randint(1, B + 1, D).astype(np.int8) - - charges_1 = [O1, -O2] - charges_2 = [P1, -P2] - charges_3 = [Q1, -Q2] - fused_1 = fuse_ndarrays(charges_1) - fused_2 = fuse_ndarrays(charges_2) - fused_3 = fuse_ndarrays(charges_3) - q1 = U1Charge([O1, P1, Q1]) - q2 = U1Charge([O2, P2, Q2]) + q1 = U1Charge(O1) @ U1Charge(P1) @ U1Charge(Q1) + q2 = U1Charge(O2) @ U1Charge(P2) @ U1Charge(Q2) - target = np.random.randint(-B // 2, B // 2 + 1, 3) - q12 = q1 - q2 + target = BaseCharge( + charges=np.random.randint(-B, B, (3, 1), dtype=np.int16), + charge_labels=None, + charge_types=[U1Charge, U1Charge, U1Charge]) + q12 = q1 + q2 * True - nz_1 = np.nonzero(q12.equals(target))[0] - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] + nz_1 = np.nonzero(q12 == target)[0] + i1 = fused_1 == target.charges[0, 0] + i2 = fused_2 == target.charges[1, 0] + i3 = fused_3 == target.charges[2, 0] + nz_2 = np.nonzero(np.logical_and.reduce([i1, i2, i3]))[0] return nz_1, nz_2 nz_1, nz_2 = run_test() @@ -265,428 +211,46 @@ def test_U1Charge_matmul(): C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) C3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q1 = U1Charge([C1]) - q2 = U1Charge([C2]) - q3 = U1Charge([C3]) + q1 = U1Charge(C1) + q2 = U1Charge(C2) + q3 = U1Charge(C3) Q = q1 @ q2 @ q3 - Q_ = U1Charge([C1, C2, C3]) + Q_ = BaseCharge( + np.stack([C1, C2, C3], axis=0), + charge_labels=None, + charge_types=[U1Charge, U1Charge, U1Charge]) assert np.all(Q.charges == Q_.charges) - #assert Q.offsets == Q_.offsets - assert np.all(Q.shifts == Q_.shifts) - - -def test_Z2Charge_fusion(): - - def fuse_z2_charges(c1, c2): - return np.reshape( - np.bitwise_xor(c1[:, None], c2[None, :]), - len(c1) * len(c2)) - - def run_test(): - D = 1000 - O1 = np.random.randint(0, 2, D).astype(np.int8) - O2 = np.random.randint(0, 2, D).astype(np.int8) - P1 = np.random.randint(0, 2, D).astype(np.int8) - P2 = np.random.randint(0, 2, D).astype(np.int8) - Q1 = np.random.randint(0, 2, D).astype(np.int8) - Q2 = np.random.randint(0, 2, D).astype(np.int8) - - charges_1 = [O1, O2] - charges_2 = [P1, P2] - charges_3 = [Q1, Q2] - - fused_1 = fuse_z2_charges(*charges_1) - fused_2 = fuse_z2_charges(*charges_2) - fused_3 = fuse_z2_charges(*charges_3) - - q1 = Z2Charge([O1, P1, Q1]) - q2 = Z2Charge([O2, P2, Q2]) - - target = np.random.randint(0, 2, 3) - q12 = q1 + q2 - - nz_1 = np.nonzero(q12.equals(target))[0] - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] - return nz_1, nz_2 - - nz_1, nz_2 = run_test() - while len(nz_1) == 0: - nz_1, nz_2 = run_test() - assert np.all(nz_1 == nz_2) - - -def test_Z2Charge_sub(): - - def fuse_z2_charges(c1, c2): - return np.reshape( - np.bitwise_xor(c1[:, None], c2[None, :]), - len(c1) * len(c2)) - - def run_test(): - D = 1000 - O1 = np.random.randint(0, 2, D).astype(np.int8) - O2 = np.random.randint(0, 2, D).astype(np.int8) - P1 = np.random.randint(0, 2, D).astype(np.int8) - P2 = np.random.randint(0, 2, D).astype(np.int8) - Q1 = np.random.randint(0, 2, D).astype(np.int8) - Q2 = np.random.randint(0, 2, D).astype(np.int8) - - charges_1 = [O1, O2] - charges_2 = [P1, P2] - charges_3 = [Q1, Q2] - - fused_1 = fuse_z2_charges(*charges_1) - fused_2 = fuse_z2_charges(*charges_2) - fused_3 = fuse_z2_charges(*charges_3) - - q1 = Z2Charge([O1, P1, Q1]) - q2 = Z2Charge([O2, P2, Q2]) - - target = np.random.randint(0, 2, 3) - q12 = q1 - q2 - - nz_1 = np.nonzero(q12.equals(target))[0] - i1 = fused_1 == target[0] - i2 = fused_2 == target[1] - i3 = fused_3 == target[2] - nz_2 = np.nonzero(np.logical_and(np.logical_and(i1, i2), i3))[0] - return nz_1, nz_2 - - nz_1, nz_2 = run_test() - while len(nz_1) == 0: - nz_1, nz_2 = run_test() - assert np.all(nz_1 == nz_2) - - -def test_Z2Charge_matmul(): - D = 1000 - C1 = np.random.randint(0, 2, D).astype(np.int8) - C2 = np.random.randint(0, 2, D).astype(np.int8) - C3 = np.random.randint(0, 2, D).astype(np.int8) - - q1 = Z2Charge([C1]) - q2 = Z2Charge([C2]) - q3 = Z2Charge([C3]) - - Q = q1 @ q2 @ q3 - Q_ = Z2Charge([C1, C2, C3]) - assert np.all(Q.charges == Q_.charges) - assert np.all(Q.shifts == Q_.shifts) - - -def test_ChargeCollection_init_from_stacked(): - c = ChargeCollection( - [BaseCharge(None, None), BaseCharge(None, None)], - shifts=[[0], [0]], - stacked_charges=np.random.randint(0, 10, (10, 2))) - - -def test_Charge_U1_add(): - q1 = ChargeCollection( - [U1Charge([np.asarray([0, 1])]), - U1Charge([np.asarray([-2, 3])])]) - q2 = ChargeCollection( - [U1Charge([np.asarray([2, 3])]), - U1Charge([np.asarray([-1, 4])])]) - expected = [np.asarray([2, 3, 3, 4]), np.asarray([-3, 2, 2, 7])] - q12 = q1 + q2 - for n in range(len(q12.charges)): - np.testing.assert_allclose(expected[n], q12.charges[n].charges) - - -def test_Charge_U1_sub(): - q1 = ChargeCollection( - [U1Charge([np.asarray([0, 1])]), - U1Charge([np.asarray([-2, 3])])]) - q2 = ChargeCollection( - [U1Charge([np.asarray([2, 3])]), - U1Charge([np.asarray([-1, 4])])]) - expected = [np.asarray([-2, -3, -1, -2]), np.asarray([-1, -6, 4, -1])] - q12 = q1 - q2 - for n in range(len(q12.charges)): - np.testing.assert_allclose(expected[n], q12.charges[n].charges) - - -def test_Charge_Z2_add(): - q1 = ChargeCollection([ - Z2Charge([np.asarray([0, 1]).astype(np.int8)]), - Z2Charge([np.asarray([1, 0]).astype(np.int8)]) - ]) - q2 = ChargeCollection([ - Z2Charge([np.asarray([0, 0]).astype(np.int8)]), - Z2Charge([np.asarray([1, 1]).astype(np.int8)]) - ]) - expected = [np.asarray([0, 0, 1, 1]), np.asarray([0, 0, 1, 1])] - q12 = q1 + q2 - for n in range(len(q12.charges)): - np.testing.assert_allclose(expected[n], q12.charges[n].charges) - - -def test_Charge_Z2_sub(): - q1 = ChargeCollection([ - Z2Charge([np.asarray([0, 1]).astype(np.int8)]), - Z2Charge([np.asarray([1, 0]).astype(np.int8)]) - ]) - q2 = ChargeCollection([ - Z2Charge([np.asarray([0, 0]).astype(np.int8)]), - Z2Charge([np.asarray([1, 1]).astype(np.int8)]) - ]) - expected = [np.asarray([0, 0, 1, 1]), np.asarray([0, 0, 1, 1])] - q12 = q1 - q2 - for n in range(len(q12.charges)): - np.testing.assert_allclose(expected[n], q12.charges[n].charges) - - -def test_Charge_Z2_U1_add(): - q1 = ChargeCollection([ - Z2Charge([np.asarray([0, 1]).astype(np.int8)]), - U1Charge([np.asarray([-2, 3]).astype(np.int8)]) - ]) - q2 = ChargeCollection([ - Z2Charge([np.asarray([0, 0]).astype(np.int8)]), - U1Charge([np.asarray([-1, 4]).astype(np.int8)]) - ]) - expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] - - q12 = q1 + q2 - for n in range(len(q12.charges)): - np.testing.assert_allclose(expected[n], q12.charges[n].charges) - - -def test_Charge_add_Z2_U1_raises(): - q1 = ChargeCollection([ - Z2Charge([np.asarray([0, 1]).astype(np.int8)]), - Z2Charge([np.asarray([-2, 3]).astype(np.int8)]) - ]) - q2 = ChargeCollection( - [U1Charge([np.asarray([0, 0])]), - U1Charge([np.asarray([-1, 4])])]) - expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] - with pytest.raises(TypeError): - q12 = q1 + q2 - - -def test_Charge_sub_Z2_U1_raises(): - q1 = ChargeCollection([ - Z2Charge([np.asarray([0, 1]).astype(np.int8)]), - Z2Charge([np.asarray([-2, 3]).astype(np.int8)]) - ]) - q2 = ChargeCollection( - [U1Charge([np.asarray([0, 0])]), - U1Charge([np.asarray([-1, 4])])]) - expected = [np.asarray([0, 0, 1, 1]), np.asarray([-3, 2, 2, 7])] - with pytest.raises(TypeError): - q12 = q1 - q2 def test_BaseCharge_eq(): D = 3000 B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - Q = BaseCharge(charges=[q1, q2]) - target_charge = np.asarray([ - np.random.randint(-B // 2, B // 2 + 1), - np.random.randint(-B // 2 - 1, B // 2 + 2) - ]) - assert np.all( - (Q == np.left_shift(target_charge[0], 16) + target_charge[1] - ) == np.logical_and(q1 == target_charge[0], q2 == target_charge[1])) - - -def test_BaseCharge_equals(): - D = 3000 - B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - Q = BaseCharge(charges=[q1, q2]) + c1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) + c2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) + q1 = U1Charge(c1) + q2 = U1Charge(c2) + Q = q1 @ q2 target_charge = np.asarray([ - np.random.randint(-B // 2, B // 2 + 1), - np.random.randint(-B // 2 - 1, B // 2 + 2) + np.random.randint(-B // 2, B // 2 + 1, dtype=np.int16), + np.random.randint(-B // 2 - 1, B // 2 + 2, dtype=np.int16) ]) + T = U1Charge(np.asarray([target_charge[0]])) @ U1Charge( + np.asarray([target_charge[1]])) assert np.all( - (Q.equals(target_charge) - ) == np.logical_and(q1 == target_charge[0], q2 == target_charge[1])) + (np.squeeze(Q == T) + ) == np.logical_and(c1 == target_charge[0], c2 == target_charge[1])) def test_BaseCharge_unique(): D = 3000 B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - Q = BaseCharge(charges=[q1, q2]) + q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16) + Q = BaseCharge(charges=q, charge_types=[U1Charge, U1Charge]) expected = np.unique( - Q.charges, - return_index=True, - return_inverse=True, - return_counts=True, - axis=0) + q, return_index=True, return_inverse=True, return_counts=True, axis=1) actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) assert np.all(actual[0].charges == expected[0]) assert np.all(actual[1] == expected[1]) assert np.all(actual[2] == expected[2]) assert np.all(actual[3] == expected[3]) - - -def test_Charge_U1_U1_equals(): - D = 3000 - B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - p1 = np.random.randint(-B // 2 - 2, B // 2 + 3, D).astype(np.int16) - Q = ChargeCollection(charges=[U1Charge([q1, q2]), U1Charge(p1)]) - target_q = [ - np.random.randint(-B // 2, B // 2 + 1), - np.random.randint(-B // 2 - 1, B // 2 + 2) - ] - target_p = [np.random.randint(-B // 2 - 2, B // 2 + 3)] - target_charge = [target_q, target_p] - assert np.all((Q.equals(target_charge)) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) - - -def test_Charge_U1_U1_eq(): - D = 3000 - B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - p1 = np.random.randint(-B // 2 - 2, B // 2 + 3, D).astype(np.int16) - Q = ChargeCollection(charges=[U1Charge([q1, q2]), U1Charge(p1)]) - target_q = [ - np.random.randint(-B // 2, B // 2 + 1), - np.random.randint(-B // 2 - 1, B // 2 + 2) - ] - target_q_shifted = np.left_shift(target_q[0], 16) + target_q[1] - target_p = np.random.randint(-B // 2 - 2, B // 2 + 3) - target_charge = [target_q_shifted, target_p] - assert np.all((Q == target_charge) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p])) - - -def test_Charge_Z2_Z2_equals(): - D = 3000 - q1 = np.random.randint(0, 2, D).astype(np.int8) - q2 = np.random.randint(0, 2, D).astype(np.int8) - p1 = np.random.randint(0, 2, D).astype(np.int8) - Q = ChargeCollection(charges=[Z2Charge([q1, q2]), Z2Charge(p1)]) - target_q = [np.random.randint(0, 2), np.random.randint(0, 2)] - target_p = [np.random.randint(0, 2)] - target_charge = [target_q, target_p] - assert np.all((Q.equals(target_charge)) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) - - -def test_Charge_Z2_Z2_eq(): - D = 3000 - q1 = np.random.randint(0, 2, D).astype(np.int8) - q2 = np.random.randint(0, 2, D).astype(np.int8) - p1 = np.random.randint(0, 2, D).astype(np.int8) - Q = ChargeCollection(charges=[Z2Charge([q1, q2]), Z2Charge(p1)]) - target_q = [np.random.randint(0, 2), np.random.randint(0, 2)] - target_q_shifted = np.left_shift(target_q[0], 8) + target_q[1] - target_p = np.random.randint(0, 2) - target_charge = [target_q_shifted, target_p] - assert np.all((Q == target_charge) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p])) - - -def test_Charge_U1_Z2_equals(): - D = 3000 - B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - p1 = np.random.randint(0, 2, D).astype(np.int8) - Q = ChargeCollection(charges=[U1Charge([q1, q2]), Z2Charge(p1)]) - target_q = [ - np.random.randint(-B // 2, B // 2 + 1), - np.random.randint(-B // 2 - 1, B // 2 + 2) - ] - target_p = [np.random.randint(0, 2)] - target_charge = [target_q, target_p] - assert np.all((Q.equals(target_charge)) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p[0]])) - - -def test_Charge_U1_Z2_eq(): - D = 3000 - B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - p1 = np.random.randint(0, 2, D).astype(np.int8) - Q = ChargeCollection(charges=[U1Charge([q1, q2]), Z2Charge(p1)]) - target_q = [ - np.random.randint(-B // 2, B // 2 + 1), - np.random.randint(-B // 2 - 1, B // 2 + 2) - ] - target_q_shifted = np.left_shift(target_q[0], 16) + target_q[1] - target_p = np.random.randint(0, 2) - target_charge = [target_q_shifted, target_p] - assert np.all((Q == target_charge) == np.logical_and.reduce( - [q1 == target_q[0], q2 == target_q[1], p1 == target_p])) - - -def test_Charge_U1_U1_unique(): - D = 3000 - B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - p1 = np.random.randint(-B // 2 - 2, B // 2 + 3, D).astype(np.int16) - Q = ChargeCollection(charges=[U1Charge([q1, q2]), U1Charge(p1)]) - expected = np.unique( - np.stack([Q.charges[0].charges, Q.charges[1].charges], axis=1), - return_index=True, - return_inverse=True, - return_counts=True, - axis=0) - actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) - assert np.all(actual[0].charges[0].charges == expected[0][:, 0]) - assert np.all(actual[0].charges[1].charges == expected[0][:, 1]) - assert np.all(actual[1] == expected[1]) - assert np.all(actual[2] == expected[2]) - assert np.all(actual[3] == expected[3]) - - -def test_Charge_Z2_Z2_unique(): - D = 3000 - B = 5 - q1 = np.random.randint(0, 2, D).astype(np.int8) - q2 = np.random.randint(0, 2, D).astype(np.int8) - p1 = np.random.randint(0, 2, D).astype(np.int8) - Q = ChargeCollection(charges=[Z2Charge([q1, q2]), Z2Charge(p1)]) - expected = np.unique( - np.stack([Q.charges[0].charges, Q.charges[1].charges], axis=1), - return_index=True, - return_inverse=True, - return_counts=True, - axis=0) - actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) - assert np.all(actual[0].charges[0].charges == expected[0][:, 0]) - assert np.all(actual[0].charges[1].charges == expected[0][:, 1]) - assert np.all(actual[1] == expected[1]) - assert np.all(actual[2] == expected[2]) - assert np.all(actual[3] == expected[3]) - - -def test_Charge_U1_Z2_unique(): - D = 3000 - B = 5 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - q2 = np.random.randint(-B // 2 - 1, B // 2 + 2, D).astype(np.int16) - p1 = np.random.randint(0, 2, D).astype(np.int8) - Q = ChargeCollection(charges=[U1Charge([q1, q2]), Z2Charge(p1)]) - expected = np.unique( - np.stack([Q.charges[0].charges, Q.charges[1].charges], axis=1), - return_index=True, - return_inverse=True, - return_counts=True, - axis=0) - actual = Q.unique(return_index=True, return_inverse=True, return_counts=True) - assert np.all(actual[0].charges[0].charges == expected[0][:, 0]) - assert np.all(actual[0].charges[1].charges == expected[0][:, 1]) - assert np.all(actual[1] == expected[1]) - assert np.all(actual[2] == expected[2]) - assert np.all(actual[3] == expected[3]) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 3d2e1c391..438984952 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,66 +1,66 @@ import numpy as np # pylint: disable=line-too-long from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_indices -from tensornetwork.block_tensor.charge import U1Charge, Z2Charge, ChargeCollection +from tensornetwork.block_tensor.charge import U1Charge, BaseCharge def test_index_fusion_mul(): D = 10 B = 4 dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 + q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 + q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + i1 = Index(charges=q1, flow=False, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 i12 = i1 * i2 assert i12.left_child is i1 assert i12.right_child is i2 for n in range(len(i12.charges.charges)): - assert np.all(i12.charges == (q1 + q2).charges) + assert np.all(i12.charges.charges == (q1 + q2).charges) def test_fuse_indices(): D = 10 B = 4 dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 + q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 + i1 = Index(charges=q1, flow=False, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 i12 = fuse_indices([i1, i2]) assert i12.left_child is i1 assert i12.right_child is i2 for n in range(len(i12.charges.charges)): - assert np.all(i12.charges == (q1 + q2).charges) + assert np.all(i12.charges.charges == (q1 + q2).charges) def test_split_index(): D = 10 B = 4 dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 + q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 + q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 + i1 = Index(charges=q1, flow=False, name='index1') #index on leg 1 + i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 i12 = i1 * i2 i1_, i2_ = split_index(i12) assert i1 is i1_ assert i2 is i2_ - np.testing.assert_allclose(q1.charges, i1.charges) - np.testing.assert_allclose(q2.charges, i2.charges) - np.testing.assert_allclose(q1.charges, i1_.charges) - np.testing.assert_allclose(q2.charges, i2_.charges) + np.testing.assert_allclose(q1.charges, i1.charges.charges) + np.testing.assert_allclose(q2.charges, i2.charges.charges) + np.testing.assert_allclose(q1.charges, i1_.charges.charges) + np.testing.assert_allclose(q2.charges, i2_.charges.charges) assert i1_.name == 'index1' assert i2_.name == 'index2' assert i1_.flow == i1.flow @@ -71,14 +71,14 @@ def test_elementary_indices(): D = 10 B = 4 dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - q3 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - q4 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)]) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q3, flow=1, name='index3') - i4 = Index(charges=q4, flow=1, name='index4') + q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + q3 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + q4 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) + i1 = Index(charges=q1, flow=False, name='index1') + i2 = Index(charges=q2, flow=False, name='index2') + i3 = Index(charges=q3, flow=False, name='index3') + i4 = Index(charges=q4, flow=False, name='index4') i12 = i1 * i2 i34 = i3 * i4 @@ -101,23 +101,23 @@ def test_elementary_indices(): assert elmt1234[2].flow == i3.flow assert elmt1234[3].flow == i4.flow - np.testing.assert_allclose(q1.charges, i1.charges) - np.testing.assert_allclose(q2.charges, i2.charges) - np.testing.assert_allclose(q3.charges, i3.charges) - np.testing.assert_allclose(q4.charges, i4.charges) + np.testing.assert_allclose(q1.charges, i1.charges.charges) + np.testing.assert_allclose(q2.charges, i2.charges.charges) + np.testing.assert_allclose(q3.charges, i3.charges.charges) + np.testing.assert_allclose(q4.charges, i4.charges.charges) def test_leave(): D = 10 B = 4 dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 + q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 + q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') + i1 = Index(charges=q1, flow=False, name='index1') + i2 = Index(charges=q2, flow=False, name='index2') assert i1.is_leave assert i2.is_leave @@ -129,15 +129,15 @@ def test_copy(): D = 10 B = 4 dtype = np.int16 - q1 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - q2 = U1Charge([np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)]) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q1, flow=-1, name='index3') - i4 = Index(charges=q2, flow=-1, name='index4') + q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 + q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, + D).astype(dtype)) #quantum numbers on leg 1 + + i1 = Index(charges=q1, flow=False, name='index1') + i2 = Index(charges=q2, flow=False, name='index2') + i3 = Index(charges=q1, flow=True, name='index3') + i4 = Index(charges=q2, flow=True, name='index4') i12 = i1 * i2 i34 = i3 * i4 From 71e542b1e10cc4e7175c8705ebf083c99220f976 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 14:59:14 -0500 Subject: [PATCH 186/212] add benchmark file --- tensornetwork/block_tensor/benchmarks.py | 171 +++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 tensornetwork/block_tensor/benchmarks.py diff --git a/tensornetwork/block_tensor/benchmarks.py b/tensornetwork/block_tensor/benchmarks.py new file mode 100644 index 000000000..710c75f7b --- /dev/null +++ b/tensornetwork/block_tensor/benchmarks.py @@ -0,0 +1,171 @@ +import tensornetwork as tn +import numpy as np +import time +from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, tensordot +from tensornetwork.block_tensor.index import Index +from tensornetwork.block_tensor.charge import U1Charge + + +def benchmark_1_U1(): + R = 6 + charges = [ + U1Charge( + np.asarray([-2, -1, -1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0, 1, 1, 2], + dtype=np.int16)) for n in range(R) + ] + + indsA = np.random.choice(np.arange(R), R // 2, replace=False) + indsB = np.random.choice(np.arange(R), R // 2, replace=False) + + flowsA = np.asarray([False] * R) + flowsB = np.asarray([False] * R) + + flowsB[indsB] = True + A = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsA[n], name='a{}'.format(n)) for n in range(R) + ]) + B = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsB[n], name='b{}'.format(n)) for n in range(R) + ]) + + final_order = np.arange(R) + np.random.shuffle(final_order) + t1 = time.time() + res = tensordot(A, B, (indsA, indsB), final_order) + print('BM 1- U1: {}s'.format(time.time() - t1)) + + +def benchmark_1_U1xU1(): + + R = 6 + charges = [ + U1Charge( + np.asarray([-2, -1, -1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0, 1, 1, 2], + dtype=np.int16)) + @ U1Charge( + np.asarray([0, -1, 1, 0, -1, -2, 0, -1, 1, 0, 2, 0, 0, -1, 1, 0], + dtype=np.int16)) for n in range(R) + ] + + indsA = np.random.choice(np.arange(R), R // 2, replace=False) + indsB = np.random.choice(np.arange(R), R // 2, replace=False) + + flowsA = np.asarray([False] * R) + flowsB = np.asarray([False] * R) + + flowsB[indsB] = True + A = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsA[n], name='a{}'.format(n)) for n in range(R) + ]) + B = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsB[n], name='b{}'.format(n)) for n in range(R) + ]) + + final_order = np.arange(R) + np.random.shuffle(final_order) + t1 = time.time() + res = tensordot(A, B, (indsA, indsB), final_order) + print('BM 1- U1xU1: {}s'.format(time.time() - t1)) + + +def benchmark_2_U1(): + R = 12 + charges = [ + U1Charge(np.asarray([-1, 0, 0, 1], dtype=np.int16)) for n in range(R) + ] + indsA = np.random.choice(np.arange(R), R // 2, replace=False) + indsB = np.random.choice(np.arange(R), R // 2, replace=False) + flowsA = np.asarray([False] * R) + flowsB = np.asarray([False] * R) + flowsB[indsB] = True + A = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsA[n], name='a{}'.format(n)) for n in range(R) + ]) + B = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsB[n], name='b{}'.format(n)) for n in range(R) + ]) + final_order = np.arange(R) + np.random.shuffle(final_order) + t1 = time.time() + res = tensordot(A, B, (indsA, indsB), final_order) + print('BM 2- U1: {}s'.format(time.time() - t1)) + + +def benchmark_2_U1xU1(): + R = 12 + charges = [ + U1Charge(np.asarray([-1, 0, 0, 1], dtype=np.int16)) @ U1Charge( + np.asarray([0, -1, 1, 0], dtype=np.int16)) for n in range(R) + ] + indsA = np.random.choice(np.arange(R), R // 2, replace=False) + indsB = np.random.choice(np.arange(R), R // 2, replace=False) + flowsA = np.asarray([False] * R) + flowsB = np.asarray([False] * R) + flowsB[indsB] = True + A = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsA[n], name='a{}'.format(n)) for n in range(R) + ]) + B = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsB[n], name='b{}'.format(n)) for n in range(R) + ]) + final_order = np.arange(R) + np.random.shuffle(final_order) + t1 = time.time() + res = tensordot(A, B, (indsA, indsB), final_order) + print('BM 2- U1xU1: {}s'.format(time.time() - t1)) + + +def benchmark_3_U1(): + R = 14 + charges = [ + U1Charge(np.asarray([-1, 0, 0, 1], dtype=np.int16)) for n in range(R) + ] + + indsA = np.random.choice(np.arange(R), R // 2, replace=False) + indsB = np.random.choice(np.arange(R), R // 2, replace=False) + flowsA = np.asarray([False] * R) + flowsB = np.asarray([False] * R) + flowsB[indsB] = True + A = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsA[n], name='a{}'.format(n)) for n in range(R) + ]) + B = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsB[n], name='b{}'.format(n)) for n in range(R) + ]) + final_order = np.arange(R) + np.random.shuffle(final_order) + t1 = time.time() + res = tensordot(A, B, (indsA, indsB), final_order) + print('BM 3- U1: {}s'.format(time.time() - t1)) + + +def benchmark_3_U1xU1(): + R = 14 + charges = [ + U1Charge(np.asarray([-1, 0, 0, 1], dtype=np.int16)) @ U1Charge( + np.asarray([0, -1, 1, 0], dtype=np.int16)) for n in range(R) + ] + indsA = np.random.choice(np.arange(R), R // 2, replace=False) + indsB = np.random.choice(np.arange(R), R // 2, replace=False) + flowsA = np.asarray([False] * R) + flowsB = np.asarray([False] * R) + flowsB[indsB] = True + A = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsA[n], name='a{}'.format(n)) for n in range(R) + ]) + B = BlockSparseTensor.random(indices=[ + Index(charges[n], flowsB[n], name='b{}'.format(n)) for n in range(R) + ]) + final_order = np.arange(R) + np.random.shuffle(final_order) + t1 = time.time() + res = tensordot(A, B, (indsA, indsB), final_order) + print('BM 3- U1xU1: {}s'.format(time.time() - t1)) + + +benchmark_1_U1() +benchmark_1_U1xU1() +benchmark_2_U1() +benchmark_2_U1xU1() +benchmark_3_U1() +benchmark_3_U1xU1() From b3165440beef5898dcbae1458189aa8b920e2865 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 15:00:18 -0500 Subject: [PATCH 187/212] adding files --- .../block_tensor/block_tensor_test.py | 65 +- tensornetwork/block_tensor/chargebkp.py | 1040 +++++++++++++++++ tensornetwork/block_tensor/chargebkp2.py | 778 ++++++++++++ 3 files changed, 1882 insertions(+), 1 deletion(-) create mode 100644 tensornetwork/block_tensor/chargebkp.py create mode 100644 tensornetwork/block_tensor/chargebkp2.py diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index c941c4544..2c8ada9c0 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -3,7 +3,7 @@ from tensornetwork.block_tensor.charge import U1Charge, fuse_charges from tensornetwork.block_tensor.index import Index -from tensornetwork.block_tensor.block_tensor import compute_num_nonzero, reduce_charges, BlockSparseTensor, fuse_ndarrays +from tensornetwork.block_tensor.block_tensor import compute_num_nonzero, reduce_charges, BlockSparseTensor, fuse_ndarrays, tensordot np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] @@ -47,6 +47,69 @@ def test_find_dense_positions(): np.nonzero(fused_charges == target_charge[0, 0])[0]) +def test_transpose(): + R = 4 + Ds = [20, 3, 4, 5] + final_order = np.arange(R) + np.random.shuffle(final_order) + charges = [U1Charge(np.random.randint(-5, 5, Ds[n])) for n in range(R)] + flows = np.full(R, fill_value=False, dtype=np.bool) + indices = [Index(charges[n], flows[n]) for n in range(R)] + A = BlockSparseTensor.random(indices=indices) + Adense = A.todense() + dense_res = np.transpose(Adense, final_order) + A.transpose(final_order) + np.testing.assert_allclose(dense_res, A.todense()) + + +def test_tensordot(): + R = 4 + DsA = [10, 12, 14, 16] + DsB = [14, 16, 18, 20] + chargesA = [U1Charge(np.random.randint(-5, 5, DsA[n])) for n in range(R // 2)] + commoncharges = [ + U1Charge(np.random.randint(-5, 5, DsA[n + R // 2])) for n in range(R // 2) + ] + chargesB = [ + U1Charge(np.random.randint(-5, 5, DsB[n + R // 2])) for n in range(R // 2) + ] + indsA = np.random.choice(np.arange(R), R // 2, replace=False) + indsB = np.random.choice(np.arange(R), R // 2, replace=False) + flowsA = np.full(R, False, dtype=np.bool) + flowsB = np.full(R, False, dtype=np.bool) + + flowsB[indsB] = True + indicesA = [None for _ in range(R)] + indicesB = [None for _ in range(R)] + for n in range(len(indsA)): + indicesA[indsA[n]] = Index(commoncharges[n], flowsA[indsA[n]]) + indicesB[indsB[n]] = Index(commoncharges[n], flowsB[indsB[n]]) + compA = list(set(np.arange(R)) - set(indsA)) + compB = list(set(np.arange(R)) - set(indsB)) + + for n in range(len(compA)): + indicesA[compA[n]] = Index(chargesA[n], flowsA[compA[n]]) + indicesB[compB[n]] = Index(chargesB[n], flowsB[compB[n]]) + indices_final = [] + for n in sorted(compA): + indices_final.append(indicesA[n]) + for n in sorted(compB): + indices_final.append(indicesB[n]) + shapes = tuple([i.dimension for i in indices_final]) + A = BlockSparseTensor.random(indices=indicesA) + B = BlockSparseTensor.random(indices=indicesB) + + final_order = np.arange(R) + np.random.shuffle(final_order) + Adense = A.todense() + Bdense = B.todense() + dense_res = np.transpose( + np.tensordot(Adense, Bdense, (indsA, indsB)), final_order) + + res = tensordot(A, B, (indsA, indsB), final_order=final_order) + np.testing.assert_allclose(dense_res, res.todense()) + + # def test_find_dense_positions_2(): # D = 40 #bond dimension # B = 4 #number of blocks diff --git a/tensornetwork/block_tensor/chargebkp.py b/tensornetwork/block_tensor/chargebkp.py new file mode 100644 index 000000000..46b9ea4d1 --- /dev/null +++ b/tensornetwork/block_tensor/chargebkp.py @@ -0,0 +1,1040 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensornetwork.network_components import Node, contract, contract_between +# pylint: disable=line-too-long +from tensornetwork.backends import backend_factory +import copy +import warnings +from typing import List, Union, Any, Optional, Tuple, Text, Iterable, Type + + +def _copy_charges(charges): + cs = [] + for n in range(len(charges)): + c = type(charges[n]).__new__(type( + charges[n])) #create a new charge object of type type(other) + c.__init__(charges[n].charges.copy()) + cs.append(c) + return cs + + +class BaseCharge: + """ + Base class for fundamental charges (i.e. for symmetries that + are not products of smaller groups) + """ + + def __init__(self, + charges: Optional[Union[List[np.ndarray], np.ndarray]] = None, + shifts: Optional[Union[List[int], np.ndarray]] = None) -> None: + """ + Initialize a BaseCharge object. + Args: + charges: Optional `np.ndarray` or list of `np.ndarray` of type `int` holdingn + the physical charges. If a list of `np,ndarray` is passed, the arrays are merged + into a single `np.ndarray` by `np.left_shift`-ing and adding up charges. The amount + of left-shift per `np,ndarray` is determined by its `dtype`. E.g. an `np,ndarray` of + `dtype=np.int16` is shifted by 16 bits. Charges are shifted and added moving from + small to large indices in `charges`. `BaseCharge` can hold at most 8 individual + charges of `dtype=np.int8` on 64-bit architectures. + shifts: An optional list of shifts, used for initializing a `BaseCharge` object from + an existing `BaseCharge` object. + """ + if charges is not None: + if isinstance(charges, np.ndarray): + charges = [charges] + self._itemsizes = [c.dtype.itemsize for c in charges] + if np.sum(self._itemsizes) > 8: + raise TypeError("number of bits required to store all charges " + "in a single int is larger than 64") + + if len(charges) > 1: + if shifts is not None: + raise ValueError("If `shifts` is passed, only a single charge array " + "can be passed. Got len(charges) = {}".format( + len(charges))) + if shifts is None: + dtype = np.int8 + if np.sum(self._itemsizes) > 1: + dtype = np.int16 + if np.sum(self._itemsizes) > 2: + dtype = np.int32 + if np.sum(self._itemsizes) > 4: + dtype = np.int64 + #multiply by eight to get number of bits + self.shifts = 8 * np.flip( + np.append(0, np.cumsum(np.flip( + self._itemsizes[1::])))).astype(dtype) + dtype_charges = [c.astype(dtype) for c in charges] + self.charges = np.sum([ + np.left_shift(dtype_charges[n], self.shifts[n]) + for n in range(len(dtype_charges)) + ], + axis=0).astype(dtype) + else: + if np.max(shifts) >= charges[0].dtype.itemsize * 8: + raise TypeError("shifts {} are incompatible with dtype {}".format( + shifts, charges[0].dtype)) + self.shifts = np.asarray(shifts) + self.charges = charges[0] + else: + self.charges = np.asarray([]) + self.shifts = np.asarray([]) + + def __add__(self, other: "BaseCharge") -> "BaseCharge": + """ + Fuse the charges of two `BaseCharge` objects and return a new + `BaseCharge` holding the result. + Args: + other: A `BaseChare` object. + Returns: + BaseCharge: The result of fusing `self` with `other`. + """ + raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") + + def __sub__(self, other: "BaseCharge") -> "BaseCharge": + """ + Subtract the charges of `other` from `self. + Returns a `BaseCharge` holding the result. + Args: + other: A `BaseChare` object. + Returns: + BaseCharge: The result subtracting `other` from `self`. + """ + + raise NotImplementedError("`__sub__` is not implemented for `BaseCharge`") + + def __matmul__(self, other: "BaseCharge") -> "BaseCharge": + """ + Build the direct product of two charges and return + it in a new `BaseCharge` object. + Args: + other: A `BaseCharge` object. + Returns: + BaseCharge: The direct product of `self` and `other`. + """ + raise NotImplementedError( + "`__matmul__` is not implemented for `BaseCharge`") + + def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Return the charge-element at position `n`. + Args: + n: An integer or `np.ndarray`. + Returns: + np.ndarray: The charges at `n`. + """ + return self.charges[n] + + def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Return the charge-element at position `n`. + Needed to provide a common interface with `ChargeCollection`. + Args: + n: An integer or `np.ndarray`. + Returns: + np.ndarray: The charges at `n`. + + """ + + return self.get_item(n) + + def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": + """ + Return the charge-element at position `n`, wrapped into a `BaseCharge` + object. + Args: + n: An integer or `np.ndarray`. + Returns: + BaseCharge: The charges at `n`. + """ + + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + charges = self.charges[n] + obj = self.__new__(type(self)) + obj.__init__(charges=[charges], shifts=self.shifts) + return obj + + @property + def num_symmetries(self): + """ + The number of individual symmetries stored in this object. + """ + return len(self.shifts) + + def __len__(self) -> int: + return np.prod(self.charges.shape) + + def __repr__(self): + return str(type(self)) + '\nshifts: ' + self.shifts.__repr__( + ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' + + @property + def dual_charges(self) -> np.ndarray: + raise NotImplementedError( + "`dual_charges` is not implemented for `BaseCharge`") + + def __mul__(self, number: Union[bool, int]) -> "BaseCharge": + """ + Multiply `self` with `number` from the left. + `number` can take values in `1,-1, 0, True, False`. + This multiplication is used to transform between charges and dual-charges. + Args: + number: Can can take values in `1,-1, 0, True, False`. + If `1,True`, return the original object + If `-1, 0, False` return a new `BaseCharge` holding the + dual-charges. + Returns: + BaseCharge: The result of `self * number` + """ + raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") + + def __rmul__(self, number: Union[bool, int]) -> "BaseCharge": + """ + Multiply `self` with `number` from the right. + `number` can take values in `1,-1, 0, True, False`. + This multiplication is used to transform between charges and dual-charges. + Args: + number: Can can take values in `1,-1, 0, True, False`. + If `1,True`, return the original object + If `-1, 0, False` return a new `BaseCharge` holding the + dual-charges. + Returns: + BaseCharge: The result of `number * self`. + """ + + raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") + + @property + def dtype(self): + return self.charges.dtype + + def unique(self, + return_index=False, + return_inverse=False, + return_counts=False + ) -> Tuple["BaseCharge", np.ndarray, np.ndarray, np.ndarray]: + """ + Compute the unique charges in `BaseCharge`. + See np.unique for a more detailed explanation. This function + does the same but instead of a np.ndarray, it returns the unique + elements in a `BaseCharge` object. + Args: + return_index: If `True`, also return the indices of `self.charges` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse: If `True`, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `self.charges`. + return_counts: If `True`, also return the number of times each unique item appears + in `self.charges`. + Returns: + BaseCharge: The sorted unique values. + np.ndarray: The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + np.ndarray: The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + np.ndarray: The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + """ + result = np.unique( + self.charges, + return_index=return_index, + return_inverse=return_inverse, + return_counts=return_counts) + if not (return_index or return_inverse or return_counts): + out = self.__new__(type(self)) + out.__init__([result], self.shifts) + return out + else: + out = self.__new__(type(self)) + out.__init__([result[0]], self.shifts) + return tuple([out] + [result[n] for n in range(1, len(result))]) + + def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: + """ + Test each element of `BaseCharge` if it is in `targets`. Returns + an `np.ndarray` of `dtype=bool`. + Args: + targets: The test elements + Returns: + np.ndarray: An array of `bool` type holding the result of the comparison. + """ + if isinstance(targets, type(self)): + if not np.all(self.shifts == targets.shifts): + raise ValueError( + "Cannot compare charges with different shifts {} and {}".format( + self.shifts, targets.shifts)) + + targets = targets.charges + targets = np.asarray(targets) + return np.isin(self.charges, targets) + + def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: + """ + Test each element of `BaseCharge` if it is in `targets`. Returns + an `np.ndarray` of `dtype=bool`. + Args: + targets: The test elements + Returns: + np.ndarray: An array of `bool` type holding the result of the comparison. + """ + + if isinstance(target, type(self)): + if not np.all(self.shifts == target.shifts): + raise ValueError( + "Cannot compare charges with different shifts {} and {}".format( + self.shifts, tparget.shifts)) + target = target.charges + target = np.asarray(target) + return target in self.charges + + def equals(self, target_charges: Iterable) -> np.ndarray: + """ + Find indices where `BaseCharge` equals `target_charges`. + `target_charges` has to be an array of the same lenghts + as `BaseCharge.shifts`, containing one integer per symmetry of + `BaseCharge` + Args: + target_charges: np.ndarray of integers encoding charges. + Returns: + np.ndarray: Boolean array with `True` where `BaseCharge` equals + `target_charges` and `False` everywhere else. + """ + if len(target_charges) != len(self.shifts): + raise ValueError("len(target_charges) = {} is different " + "from len(shifts) = {}".format( + len(target_charges), len(self.shifts))) + _target_charges = np.asarray(target_charges).astype(self.charges.dtype) + target = np.sum([ + np.left_shift(_target_charges[n], self.shifts[n]) + for n in range(len(self.shifts)) + ]) + return self.charges == target + + def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: + """ + Find indices where `BaseCharge` equals `target_charges`. + `target` is a single integer encoding all symmetries of + `BaseCharge` + Args: + target: integerger encoding charges. + Returns: + np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals + `target` and `False` everywhere else. + """ + if isinstance(target, type(self)): + return np.squeeze( + np.expand_dims(self.charges, 1) == np.expand_dims(target.charges, 0)) + return np.squeeze( + np.expand_dims(self.charges, 1) == np.expand_dims( + np.asarray(target), 0)) + + def concatenate(self, others: Union["BaseCharge", List["BaseCharge"]]): + """ + Concatenate `self.charges` with `others.charges`. + Args: + others: List of `BaseCharge` objects. + Returns: + BaseCharge: The concatenated charges. + """ + if isinstance(others, type(self)): + others = [others] + for o in others: + if not np.all(self.shifts == o.shifts): + raise ValueError( + "Cannot fuse charges with different shifts {} and {}".format( + self.shifts, o.shifts)) + + charges = np.concatenate( + [self.charges] + [o.charges for o in others], axis=0) + out = self.__new__(type(self)) + out.__init__([charges], self.shifts) + return out + + @property + def dtype(self): + return self.charges.dtype + + @property + def zero_charge(self): + obj = self.__new__(type(self)) + obj.__init__(charges=[np.asarray([self.dtype.type(0)])], shifts=self.shifts) + return obj + + def __iter__(self): + return iter(self.charges) + + def intersect(self, + other: "BaseCharge", + return_indices: Optional[bool] = False) -> "BaseCharge": + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot intersect charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if return_indices: + charges, comm1, comm2 = np.intersect1d( + self.charges, other.charges, return_indices=return_indices) + else: + charges = np.intersect1d(self.charges, other.charges) + + obj = self.__new__(type(self)) + obj.__init__(charges=[charges], shifts=self.shifts) + if return_indices: + return obj, comm1, comm2 + return obj + + +class U1Charge(BaseCharge): + """ + A simple charge class for a single U1 symmetry. + This class can store multiple U1 charges in a single + np.ndarray of integer dtype. Depending on the dtype of + the individual symmetries, this class can store: + * 8 np.int8 + * 4 np.int16 + * 2 np.int32 + * 1 np.int64 + or any suitable combination of dtypes, such that their + bite-sum remains below 64. + """ + + def __init__(self, + charges: List[np.ndarray], + shifts: Optional[np.ndarray] = None) -> None: + super().__init__(charges=charges, shifts=shifts) + + def __add__(self, other: "U1Charge") -> "U1Charge": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `U1Charge` object holding the result. + Args: + other: A `U1Charge` object. + Returns: + U1Charge: The result of fusing `self` with `other`. + """ + if self.num_symmetries != other.num_symmetries: + raise ValueError( + "cannot fuse charges with different number of symmetries") + + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse U1-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, U1Charge): + raise TypeError( + "can only add objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + fused = np.reshape(self.charges[:, None] + other.charges[None, :], + len(self.charges) * len(other.charges)) + return U1Charge(charges=[fused], shifts=self.shifts) + + def __sub__(self, other: "U1Charge") -> "U1Charge": + """ + Subtract the charges of `other` from charges of `self` and + return a new `U1Charge` object holding the result. + Args: + other: A `U1Charge` object. + Returns: + U1Charge: The result of fusing `self` with `other`. + """ + if self.num_symmetries != other.num_symmetries: + raise ValueError( + "cannot fuse charges with different number of symmetries") + + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse U1-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, U1Charge): + raise TypeError( + "can only subtract objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + + fused = np.reshape(self.charges[:, None] - other.charges[None, :], + len(self.charges) * len(other.charges)) + return U1Charge(charges=[fused], shifts=self.shifts) + + def __matmul__(self, other: Union["U1Charge", "U1Charge"]) -> "U1Charge": + itemsize = np.sum(self._itemsizes + other._itemsizes) + if itemsize > 8: + raise TypeError("Number of bits required to store all charges " + "in a single int is larger than 64") + dtype = np.int16 #need at least np.int16 to store two charges + if itemsize > 2: + dtype = np.int32 + if itemsize > 4: + dtype = np.int64 + + charges = np.left_shift( + self.charges.astype(dtype), + 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) + + shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) + return U1Charge(charges=[charges], shifts=shifts) + + def __mul__(self, number: Union[bool, int]) -> "U1Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + #outflowing charges + if number in (0, False, -1): + charges = self.dtype.type(-1) * self.charges + shifts = self.shifts + return U1Charge(charges=[charges], shifts=shifts) + #inflowing charges + if number in (1, True): + #Note: the returned U1Charge shares its data with self + return U1Charge(charges=[self.charges], shifts=self.shifts) + + # def __rmul__(self, number: Union[bool, int]) -> "U1Charge": + # raise + # print(number not in (True, False, 0, 1, -1)) + # if number not in (True, False, 0, 1, -1): + # raise ValueError( + # "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + # number)) + # return self.__mul__(number) + + @property + def dual_charges(self) -> np.ndarray: + #the dual of a U1 charge is its negative value + return self.charges * self.dtype.type(-1) + + +class Z2Charge(BaseCharge): + """ + A simple charge class for Z2 symmetries. + """ + + def __init__(self, + charges: List[np.ndarray], + shifts: Optional[np.ndarray] = None) -> None: + if isinstance(charges, np.ndarray): + charges = [charges] + + if shifts is None: + itemsizes = [c.dtype.itemsize for c in charges] + if not np.all([i == 1 for i in itemsizes]): + # martin: This error could come back at us, but I'll leave it for now + warnings.warn( + "Z2 charges can be entirely stored in " + "np.int8, but found dtypes = {}. Converting to np.int8.".format( + [c.dtype for c in charges])) + + charges = [c.astype(np.int8) for c in charges] + + super().__init__(charges, shifts) + + def __add__(self, other: "Z2Charge") -> "Z2Charge": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `Z2Charge` object holding the result. + Args: + other: A `Z2Charge` object. + Returns: + Z2Charge: The result of fusing `self` with `other`. + """ + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse Z2-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, Z2Charge): + raise TypeError( + "can only add objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + + fused = np.reshape( + np.bitwise_xor(self.charges[:, None], other.charges[None, :]), + len(self.charges) * len(other.charges)) + + return Z2Charge(charges=[fused], shifts=self.shifts) + + def __sub__(self, other: "Z2Charge") -> "Z2Charge": + """ + Subtract charges of `other` from charges of `self` and + return a new `Z2Charge` object holding the result. + Note that ofr Z2 charges, subtraction and addition are identical + Args: + other: A `Z2Charge` object. + Returns: + Z2Charge: The result of fusing `self` with `other`. + """ + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse Z2-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, Z2Charge): + raise TypeError( + "can only subtract objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + + return self.__add__(other) + + def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": + itemsize = np.sum(self._itemsizes + other._itemsizes) + if itemsize > 8: + raise TypeError("Number of bits required to store all charges " + "in a single int is larger than 64") + dtype = np.int16 #need at least np.int16 to store two charges + if itemsize > 2: + dtype = np.int32 + if itemsize > 4: + dtype = np.int64 + + charges = np.left_shift( + self.charges.astype(dtype), + 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) + + shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) + return Z2Charge(charges=[charges], shifts=shifts) + + def __mul__(self, number: Union[bool, int]) -> "Z2Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + #Z2 is self-dual + return Z2Charge(charges=[self.charges], shifts=self.shifts) + + def __rmul__(self, number: Union[bool, int]) -> "Z2Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + + return self.__mul__(number) + + @property + def dual_charges(self): + #Z2 charges are self-dual + return self.charges + + def equals(self, target_charges: Iterable) -> np.ndarray: + if not np.all(np.isin(target_charges, np.asarray([0, 1]))): + raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( + np.unique(target_charges))) + return super().equals(target_charges) + + +class ChargeCollection: + """ + + """ + + class Iterator: + + def __init__(self, data: np.ndarray): + self.n = 0 + self.data = data + + def __next__(self): + if self.n < self.data.shape[0]: + result = self.data[self.n, :] + self.n += 1 + return tuple(result) #this makes a copy! + else: + raise StopIteration + + def __init__(self, + charges: List[BaseCharge], + shifts: Optional[List[np.ndarray]] = None, + stacked_charges: Optional[np.ndarray] = None) -> None: + if not isinstance(charges, list): + raise TypeError("only list allowed for argument `charges` " + "in BaseCharge.__init__(charges)") + if (shifts is not None) and (stacked_charges is None): + raise ValueError( + "Found `shifts == None` and `stacked_charges != None`." + "`shifts` and `stacked_charges` can only be passed together.") + if (shifts is None) and (stacked_charges is not None): + raise ValueError( + "Found `shifts != None` and `stacked_charges == None`." + "`shifts` and `stacked_charges` can only be passed together.") + self.charges = [] + if stacked_charges is None: + if not np.all([len(c) == len(charges[0]) for c in charges]): + raise ValueError("not all charges have the same length. " + "Got lengths = {}".format([len(c) for c in charges])) + for n in range(len(charges)): + if not isinstance(charges[n], BaseCharge): + raise TypeError( + "`ChargeCollection` can only be initialized " + "with a list of `BaseCharge`. Found {} instead".format( + [type(charges[n]) for n in range(len(charges))])) + + self._stacked_charges = np.stack([c.charges for c in charges], axis=1) + for n in range(len(charges)): + charge = charges[n].__new__(type(charges[n])) + charge.__init__(self._stacked_charges[:, n], shifts=charges[n].shifts) + self.charges.append(charge) + else: + if len(shifts) != stacked_charges.shape[1]: + raise ValueError("`len(shifts)` = {} is different from " + "`stacked_charges.shape[1]` = {}".format( + len(shifts), stacked_charges.shape[1])) + + if stacked_charges.shape[1] != len(charges): + raise ValueError("`len(charges) and shape[1] of `stacked_charges` " + "have to be the same.") + for n in range(len(charges)): + charge = charges[n].__new__(type(charges[n])) + charge.__init__(stacked_charges[:, n], shifts=shifts[n]) + self.charges.append(charge) + self._stacked_charges = stacked_charges + + @classmethod + def from_charge_types(cls, charge_types: Type, shifts: List[np.ndarray], + stacked_charges: np.ndarray): + if len(charge_types) != stacked_charges.shape[1]: + raise ValueError("`len(charge_types) and shape[1] of `stacked_charges` " + "have to be the same.") + if len(charge_types) != len(shifts): + raise ValueError( + "`len(charge_types) and `len(shifts)` have to be the same.") + charges = [ + charge_types[n].__new__(charge_types[n]) + for n in range(len(charge_types)) + ] + return cls(charges=charges, stacked_charges=stacked_charges, shifts=shifts) + + @property + def num_charges(self) -> int: + """ + Return the number of different charges in `ChargeCollection`. + """ + return self._stacked_charges.shape[1] + + def get_item(self, n: int) -> Tuple: + """ + Returns the `n-th` charge-tuple of ChargeCollection in a tuple. + """ + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + return tuple(self._stacked_charges[n, :].flat) + + def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Returns the `n-th` charge-tuples of ChargeCollection in an np.ndarray. + """ + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + return self._stacked_charges[n, :] + + def __getitem__(self, n: Union[np.ndarray, int]) -> "ChargeCollection": + + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + + array = self._stacked_charges[n, :] + + return self.from_charge_types( + charge_types=[type(c) for c in self.charges], + shifts=[c.shifts for c in self.charges], + stacked_charges=array) + # if self.num_charges == 1: + # array = np.expand_dims(array, 0) + + # if len(array.shape) == 2: + # if array.shape[1] == 1: + # array = np.squeeze(array, axis=1) + # if len(array.shape) == 0: + # array = np.asarray([array]) + + # charges = [] + # if np.prod(array.shape) == 0: + # for n in range(len(self.charges)): + # charge = self.charges[n].__new__(type(self.charges[n])) + # charge.__init__( + # charges=[np.empty(0, dtype=self.charges[n].dtype)], + # shifts=self.charges[n].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + + # if len(array.shape) == 1: + # array = np.expand_dims(array, 1) + + # for m in range(len(self.charges)): + # charge = self.charges[m].__new__(type(self.charges[m])) + # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + + def __iter__(self): + return self.Iterator(self._stacked_charges) + + def __add__(self, other: "Charge") -> "Charge": + """ + Fuse `self` with `other`. + Args: + other: A `ChargeCollection` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + return ChargeCollection( + [c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) + + def __sub__(self, other: "Charge") -> "Charge": + """ + Subtract `other` from `self`. + Args: + other: A `ChargeCollection` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + return ChargeCollection( + [c1 - c2 for c1, c2 in zip(self.charges, other.charges)]) + + def __repr__(self): + text = str(type(self)) + '\n ' + for n in range(len(self.charges)): + tmp = self.charges[n].__repr__() + tmp = tmp.replace('\n', '\n\t') + text += (tmp + '\n') + return text + + def __len__(self): + return len(self.charges[0]) + + def __mul__(self, number: Union[bool, int]) -> "Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + return ChargeCollection(charges=[c * number for c in self.charges]) + + def __rmul__(self, number: Union[bool, int]) -> "Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + + return self.__mul__(number) + + def isin(self, targets: Union[Iterable, "ChargeCollection"]): + if isinstance(targets, type(self)): + _targets = [t for t in targets] + return np.logical_or.reduce([ + np.logical_and.reduce([ + np.isin(self._stacked_charges[:, n], _targets[m][n]) + for n in range(len(_targets[m])) + ]) + for m in range(len(_targets)) + ]) + + def __contains__(self, targets: Union[Iterable, "ChargeCollection"]): + if isinstance(targets, type(self)): + if len(targets) > 1: + raise ValueError( + '__contains__ expects a single input, found {} inputs'.format( + len(targets))) + + _targets = targets.get_item(0) + return np.any( + np.logical_and.reduce([ + np.isin(self._stacked_charges[:, n], _targets[n]) + for n in range(len(_targets)) + ])) + + def unique( + self, + return_index=False, + return_inverse=False, + return_counts=False, + ) -> Tuple["ChargeCollection", np.ndarray, np.ndarray, np.ndarray]: + """ + Compute the unique charges in `BaseCharge`. + See np.unique for a more detailed explanation. This function + does the same but instead of a np.ndarray, it returns the unique + elements in a `BaseCharge` object. + Args: + return_index: If `True`, also return the indices of `self.charges` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse: If `True`, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `self.charges`. + return_counts: If `True`, also return the number of times each unique item appears + in `self.charges`. + Returns: + BaseCharge: The sorted unique values. + np.ndarray: The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + np.ndarray: The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + np.ndarray: The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + """ + + result = np.unique( + np.stack([self.charges[n].charges for n in range(len(self.charges))], + axis=1), + return_index=return_index, + return_inverse=return_inverse, + return_counts=return_counts, + axis=0) + charges = [] + if not (return_index or return_inverse or return_counts): + for n in range(len(self.charges)): + obj = self.charges[n].__new__(type(self.charges[n])) + obj.__init__(charges=[result[:, n]], shifts=self.charges[n].shifts) + charges.append(obj) + return ChargeCollection(charges) + for n in range(len(self.charges)): + obj = self.charges[n].__new__(type(self.charges[n])) + obj.__init__(charges=[result[0][:, n]], shifts=self.charges[n].shifts) + charges.append(obj) + out = ChargeCollection(charges) + return tuple([out] + [result[n] for n in range(1, len(result))]) + + def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: + if len(target_charges) != len(self.charges): + raise ValueError( + "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" + .format(len(target_charges), len(self.charges))) + return np.logical_and.reduce([ + self.charges[n].equals(target_charges[n]) + for n in range(len(target_charges)) + ]) + + def __eq__(self, target_charges: Iterable): + raise NotImplementedError() + if isinstance(target_charges, type(self)): + target_charges = np.stack([c.charges for c in target_charges.charges], + axis=1) + target_charges = np.asarray(target_charges) + if target_charges.ndim == 1: + target_charges = np.expand_dims(target_charges, 0) + if target_charges.shape[1] != len(self.charges): + raise ValueError( + "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" + .format(len(target_charges), len(self.charges))) + return np.logical_and.reduce( + self._stacked_charges == target_charges, axis=1) + + def concatenate(self, + others: Union["ChargeCollection", List["ChargeCollection"]]): + """ + Concatenate `self.charges` with `others.charges`. + Args: + others: List of `BaseCharge` objects. + Returns: + BaseCharge: The concatenated charges. + """ + if isinstance(others, type(self)): + others = [others] + + charges = [ + self.charges[n].concatenate([o.charges[n] + for o in others]) + for n in range(len(self.charges)) + ] + return ChargeCollection(charges) + + @property + def dtype(self): + return np.result_type(*[c.dtype for c in self.charges]) + + @property + def zero_charge(self): + obj = self.__new__(type(self)) + obj.__init__(charges=[c.zero_charge for c in self.charges]) + return obj + + def intersect(self, + other: "ChargeCollection", + return_indices: Optional[bool] = False) -> "ChargeCollection": + if return_indices: + ua, ia = self.unique(return_index=True) + ub, ib = other.unique(return_index=True) + conc = ua.concatenate(ub) + uab, iab, cntab = conc.unique(return_index=True, return_counts=True) + intersection = uab[cntab == 2] + comm1 = np.argmax( + np.logical_and.reduce( + np.repeat( + np.expand_dims(self._stacked_charges, 2), + intersection._stacked_charges.shape[0], + axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), + axis=1), + axis=0) + comm2 = np.argmax( + np.logical_and.reduce( + np.repeat( + np.expand_dims(other._stacked_charges, 2), + intersection._stacked_charges.shape[0], + axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), + axis=1), + axis=0) + return intersection, comm1, comm2 + + else: + self_unique = self.unique() + other_unique = other.unique() + concatenated = self_unique.concatenate(other_unique) + tmp_unique, counts = concatenated.unique(return_counts=True) + return tmp_unique[counts == 2] + + +def fuse_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: + """ + Fuse all `charges` into a new charge. + Charges are fused from "right to left", + in accordance with row-major order. + + Args: + charges: A list of charges to be fused. + flows: A list of flows, one for each element in `charges`. + Returns: + ChargeCollection: The result of fusing `charges`. + """ + if len(charges) != len(flows): + raise ValueError( + "`charges` and `flows` are of unequal lengths {} != {}".format( + len(charges), len(flows))) + fused_charges = charges[0] * flows[0] + for n in range(1, len(charges)): + fused_charges = fused_charges + charges[n] * flows[n] + return fused_charges + + +def fuse_degeneracies(degen1: Union[List, np.ndarray], + degen2: Union[List, np.ndarray]) -> np.ndarray: + """ + Fuse degeneracies `degen1` and `degen2` of two leg-charges + by simple kronecker product. `degen1` and `degen2` typically belong to two + consecutive legs of `BlockSparseTensor`. + Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns + `[10, 100, 20, 200, 30, 300]`. + When using row-major ordering of indices in `BlockSparseTensor`, + the position of `degen1` should be "to the left" of the position of `degen2`. + Args: + degen1: Iterable of integers + degen2: Iterable of integers + Returns: + np.ndarray: The result of fusing `dege1` with `degen2`. + """ + return np.reshape(degen1[:, None] * degen2[None, :], + len(degen1) * len(degen2)) diff --git a/tensornetwork/block_tensor/chargebkp2.py b/tensornetwork/block_tensor/chargebkp2.py new file mode 100644 index 000000000..9be4be39b --- /dev/null +++ b/tensornetwork/block_tensor/chargebkp2.py @@ -0,0 +1,778 @@ +# Copyright 2019 The TensorNetwork Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensornetwork.network_components import Node, contract, contract_between +# pylint: disable=line-too-long +from tensornetwork.backends import backend_factory +import copy +import warnings +from typing import List, Union, Any, Optional, Tuple, Text, Iterable, Type + + +class BaseCharge: + + def __init__(self, + charges: np.ndarray, + charge_labels: Optional[np.ndarray] = None) -> None: + if charges.dtype is not np.int16: + raise TypeError("`charges` have to be of dtype `np.int16`") + if charge_labels.dtype is not np.int16: + raise TypeError("`charge_labels` have to be of dtype `np.int16`") + + if charge_labels is None: + self.unique_charges, charge_labels = np.unique( + charges, return_inverse=True) + self.charge_labels = charge_labels.astype(np.uint16) + + else: + self.unique_charges = charges + self.charge_labels = charge_labels.astype(np.uint16) + + def __add__(self, other: "BaseCharge") -> "BaseCharge": + # fuse the unique charges from each index, then compute new unique charges + comb_qnums = self.fuse(self.unique_charges, other.unique_charges) + [unique_charges, new_labels] = np.unique(comb_qnums, return_inverse=True) + new_labels = new_labels.reshape( + len(self.unique_charges), len(other.unique_charges)).astype(np.uint16) + + # find new labels using broadcasting (could use np.tile but less efficient) + charge_labels = new_labels[( + self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.uint16) + ).ravel(), (other.charge_labels[None, :] + + np.zeros([len(self), 1], dtype=np.uint16)).ravel()] + obj = self.__new__(type(self)) + obj.__init__(unique_charges, charge_labels) + return obj + + def __len__(self): + return len(self.charge_labels) + + @property + def charges(self) -> np.ndarray: + return self.unique_charges[self.charge_labels] + + @property + def dtype(self): + return self.unique_charges.dtype + + def __repr__(self): + return str(type(self)) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' + + def unique(self, + return_index=False, + return_inverse=False, + return_counts=False + ) -> Tuple["BaseCharge", np.ndarray, np.ndarray, np.ndarray]: + """ + Compute the unique charges in `BaseCharge`. + See np.unique for a more detailed explanation. This function + does the same but instead of a np.ndarray, it returns the unique + elements in a `BaseCharge` object. + Args: + return_index: If `True`, also return the indices of `self.charges` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse: If `True`, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `self.charges`. + return_counts: If `True`, also return the number of times each unique item appears + in `self.charges`. + Returns: + BaseCharge: The sorted unique values. + np.ndarray: The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + np.ndarray: The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + np.ndarray: The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + """ + obj = self.__new__(type(self)) + obj.__init__( + self.unique_charges, + charge_labels=np.arange(len(self.unique_charges), dtype=np.uint16)) + + out = [obj] + if return_index: + _, index = np.unique(self.charge_labels, return_index=True) + out.append(index) + if return_inverse: + out.append(self.charge_labels) + if return_counts: + _, cnts = np.unique(self.charge_labels, return_counts=True) + out.append(cnts) + if len(out) == 1: + return out[0] + if len(out) == 2: + return out[0], out[1] + if len(out) == 3: + return out[0], out[1], out[2] + + def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: + """ + Test each element of `BaseCharge` if it is in `targets`. Returns + an `np.ndarray` of `dtype=bool`. + Args: + targets: The test elements + Returns: + np.ndarray: An array of `bool` type holding the result of the comparison. + """ + if isinstance(targets, type(self)): + targets = targets.unique_charges + targets = np.asarray(targets) + common, label_to_unique, label_to_targets = np.intersect1d( + self.unique_charges, targets, return_indices=True) + if len(common) == 0: + return np.full(len(self.charge_labels), fill_value=False, dtype=np.bool) + return np.isin(self.charge_labels, label_to_unique) + + def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: + """ + """ + + if isinstance(target, type(self)): + target = target.unique_charges + target = np.asarray(target) + return target in self.unique_charges + + def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: + """ + Find indices where `BaseCharge` equals `target_charges`. + `target` is a single integer encoding all symmetries of + `BaseCharge` + Args: + target: integerger encoding charges. + Returns: + np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals + `target` and `False` everywhere else. + """ + if isinstance(target, type(self)): + target = target.charges + elif isinstance(target, (np.integer, int)): + target = np.asarray([target]) + target = np.asarray(target) + tmp = np.full(len(target), fill_value=-1, dtype=np.int16) + + _, label_to_unique, label_to_target = np.intersect1d( + self.unique_charges, target, return_indices=True) + tmp[label_to_target] = label_to_unique + return np.squeeze( + np.expand_dims(self.charge_labels, 1) == np.expand_dims(tmp, 0)) + + @property + def zero_charge(self): + obj = self.__new__(type(self)) + obj.__init__( + np.asarray([self.dtype.type(0)]), np.asarray([0], dtype=np.uint16)) + return obj + + def __iter__(self): + return iter(self.charges) + + def intersect(self, + other: "BaseCharge", + return_indices: Optional[bool] = False) -> "BaseCharge": + if return_indices: + charges, comm1, comm2 = np.intersect1d( + self.charges, other.charges, return_indices=return_indices) + else: + charges = np.intersect1d(self.charges, other.charges) + + obj = self.__new__(type(self)) + obj.__init__(charges, np.arange(len(charges), dtype=np.uint16)) + if return_indices: + return obj, comm1.astype(np.uint16), comm2.astype(np.uint16) + return obj + + def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": + """ + Return the charge-element at position `n`, wrapped into a `BaseCharge` + object. + Args: + n: An integer or `np.ndarray`. + Returns: + BaseCharge: The charges at `n`. + """ + + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + obj = self.__new__(type(self)) + obj.__init__(self.unique_charges, self.charge_labels[n]) + return obj + + def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Return the charge-element at position `n`. + Args: + n: An integer or `np.ndarray`. + Returns: + np.ndarray: The charges at `n`. + """ + return self.charges[n] + + def __mul__(self, number: Union[bool, int]) -> "U1Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + #outflowing charges + if number in (0, False, -1): + return U1Charge( + self.dual_charges(self.unique_charges), self.charge_labels) + #inflowing charges + if number in (1, True): + return U1Charge(self.unique_charges, self.charge_labels) + + @property + def dual(self, charges): + return self.dual_charges + + +class U1Charge(BaseCharge): + + def __init__(self, + charges: np.ndarray, + charge_labels: Optional[np.ndarray] = None) -> None: + super().__init__(charges, charge_labels) + + @staticmethod + def fuse(charge1, charge2): + return np.add.outer(charge1, charge2).ravel() + + @staticmethod + def dual_charges(charges): + return charges * charges.dtype.type(-1) + + +class Z2Charge(BaseCharge): + """ + A simple charge class for Z2 symmetries. + """ + + def __init__(self, + charges: List[np.ndarray], + shifts: Optional[np.ndarray] = None) -> None: + if isinstance(charges, np.ndarray): + charges = [charges] + + if shifts is None: + itemsizes = [c.dtype.itemsize for c in charges] + if not np.all([i == 1 for i in itemsizes]): + # martin: This error could come back at us, but I'll leave it for now + warnings.warn( + "Z2 charges can be entirely stored in " + "np.int8, but found dtypes = {}. Converting to np.int8.".format( + [c.dtype for c in charges])) + + charges = [c.astype(np.int8) for c in charges] + + super().__init__(charges, shifts) + + def __add__(self, other: "Z2Charge") -> "Z2Charge": + """ + Fuse the charges of `self` with charges of `other`, and + return a new `Z2Charge` object holding the result. + Args: + other: A `Z2Charge` object. + Returns: + Z2Charge: The result of fusing `self` with `other`. + """ + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse Z2-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, Z2Charge): + raise TypeError( + "can only add objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + + fused = np.reshape( + np.bitwise_xor(self.charges[:, None], other.charges[None, :]), + len(self.charges) * len(other.charges)) + + return Z2Charge(charges=[fused], shifts=self.shifts) + + def __sub__(self, other: "Z2Charge") -> "Z2Charge": + """ + Subtract charges of `other` from charges of `self` and + return a new `Z2Charge` object holding the result. + Note that ofr Z2 charges, subtraction and addition are identical + Args: + other: A `Z2Charge` object. + Returns: + Z2Charge: The result of fusing `self` with `other`. + """ + if not np.all(self.shifts == other.shifts): + raise ValueError( + "Cannot fuse Z2-charges with different shifts {} and {}".format( + self.shifts, other.shifts)) + if not isinstance(other, Z2Charge): + raise TypeError( + "can only subtract objects of identical types, found {} and {} instead" + .format(type(self), type(other))) + + return self.__add__(other) + + def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": + itemsize = np.sum(self._itemsizes + other._itemsizes) + if itemsize > 8: + raise TypeError("Number of bits required to store all charges " + "in a single int is larger than 64") + dtype = np.int16 #need at least np.int16 to store two charges + if itemsize > 2: + dtype = np.int32 + if itemsize > 4: + dtype = np.int64 + + charges = np.left_shift( + self.charges.astype(dtype), + 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) + + shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) + return Z2Charge(charges=[charges], shifts=shifts) + + def __mul__(self, number: Union[bool, int]) -> "Z2Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + #Z2 is self-dual + return Z2Charge(charges=[self.charges], shifts=self.shifts) + + def __rmul__(self, number: Union[bool, int]) -> "Z2Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + + return self.__mul__(number) + + @property + def dual_charges(self): + #Z2 charges are self-dual + return self.charges + + def equals(self, target_charges: Iterable) -> np.ndarray: + if not np.all(np.isin(target_charges, np.asarray([0, 1]))): + raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( + np.unique(target_charges))) + return super().equals(target_charges) + + +class ChargeCollection: + """ + + """ + + class Iterator: + + def __init__(self, data: np.ndarray): + self.n = 0 + self.data = data + + def __next__(self): + if self.n < self.data.shape[0]: + result = self.data[self.n, :] + self.n += 1 + return tuple(result) #this makes a copy! + else: + raise StopIteration + + def __init__(self, + charges: List[BaseCharge], + shifts: Optional[List[np.ndarray]] = None, + stacked_charges: Optional[np.ndarray] = None) -> None: + if not isinstance(charges, list): + raise TypeError("only list allowed for argument `charges` " + "in BaseCharge.__init__(charges)") + if (shifts is not None) and (stacked_charges is None): + raise ValueError( + "Found `shifts == None` and `stacked_charges != None`." + "`shifts` and `stacked_charges` can only be passed together.") + if (shifts is None) and (stacked_charges is not None): + raise ValueError( + "Found `shifts != None` and `stacked_charges == None`." + "`shifts` and `stacked_charges` can only be passed together.") + self.charges = [] + if stacked_charges is None: + if not np.all([len(c) == len(charges[0]) for c in charges]): + raise ValueError("not all charges have the same length. " + "Got lengths = {}".format([len(c) for c in charges])) + for n in range(len(charges)): + if not isinstance(charges[n], BaseCharge): + raise TypeError( + "`ChargeCollection` can only be initialized " + "with a list of `BaseCharge`. Found {} instead".format( + [type(charges[n]) for n in range(len(charges))])) + + self._stacked_charges = np.stack([c.charges for c in charges], axis=1) + for n in range(len(charges)): + charge = charges[n].__new__(type(charges[n])) + charge.__init__(self._stacked_charges[:, n], shifts=charges[n].shifts) + self.charges.append(charge) + else: + if len(shifts) != stacked_charges.shape[1]: + raise ValueError("`len(shifts)` = {} is different from " + "`stacked_charges.shape[1]` = {}".format( + len(shifts), stacked_charges.shape[1])) + + if stacked_charges.shape[1] != len(charges): + raise ValueError("`len(charges) and shape[1] of `stacked_charges` " + "have to be the same.") + for n in range(len(charges)): + charge = charges[n].__new__(type(charges[n])) + charge.__init__(stacked_charges[:, n], shifts=shifts[n]) + self.charges.append(charge) + self._stacked_charges = stacked_charges + + @classmethod + def from_charge_types(cls, charge_types: Type, shifts: List[np.ndarray], + stacked_charges: np.ndarray): + if len(charge_types) != stacked_charges.shape[1]: + raise ValueError("`len(charge_types) and shape[1] of `stacked_charges` " + "have to be the same.") + if len(charge_types) != len(shifts): + raise ValueError( + "`len(charge_types) and `len(shifts)` have to be the same.") + charges = [ + charge_types[n].__new__(charge_types[n]) + for n in range(len(charge_types)) + ] + return cls(charges=charges, stacked_charges=stacked_charges, shifts=shifts) + + @property + def num_charges(self) -> int: + """ + Return the number of different charges in `ChargeCollection`. + """ + return self._stacked_charges.shape[1] + + def get_item(self, n: int) -> Tuple: + """ + Returns the `n-th` charge-tuple of ChargeCollection in a tuple. + """ + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + return tuple(self._stacked_charges[n, :].flat) + + def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: + """ + Returns the `n-th` charge-tuples of ChargeCollection in an np.ndarray. + """ + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + return self._stacked_charges[n, :] + + def __getitem__(self, n: Union[np.ndarray, int]) -> "ChargeCollection": + + if isinstance(n, (np.integer, int)): + n = np.asarray([n]) + + array = self._stacked_charges[n, :] + + return self.from_charge_types( + charge_types=[type(c) for c in self.charges], + shifts=[c.shifts for c in self.charges], + stacked_charges=array) + # if self.num_charges == 1: + # array = np.expand_dims(array, 0) + + # if len(array.shape) == 2: + # if array.shape[1] == 1: + # array = np.squeeze(array, axis=1) + # if len(array.shape) == 0: + # array = np.asarray([array]) + + # charges = [] + # if np.prod(array.shape) == 0: + # for n in range(len(self.charges)): + # charge = self.charges[n].__new__(type(self.charges[n])) + # charge.__init__( + # charges=[np.empty(0, dtype=self.charges[n].dtype)], + # shifts=self.charges[n].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + + # if len(array.shape) == 1: + # array = np.expand_dims(array, 1) + + # for m in range(len(self.charges)): + # charge = self.charges[m].__new__(type(self.charges[m])) + # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) + # charges.append(charge) + + # obj = self.__new__(type(self)) + # obj.__init__(charges=charges) + # return obj + + def __iter__(self): + return self.Iterator(self._stacked_charges) + + def __add__(self, other: "Charge") -> "Charge": + """ + Fuse `self` with `other`. + Args: + other: A `ChargeCollection` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + return ChargeCollection( + [c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) + + def __sub__(self, other: "Charge") -> "Charge": + """ + Subtract `other` from `self`. + Args: + other: A `ChargeCollection` object. + Returns: + Charge: The result of fusing `self` with `other`. + """ + return ChargeCollection( + [c1 - c2 for c1, c2 in zip(self.charges, other.charges)]) + + def __repr__(self): + text = str(type(self)) + '\n ' + for n in range(len(self.charges)): + tmp = self.charges[n].__repr__() + tmp = tmp.replace('\n', '\n\t') + text += (tmp + '\n') + return text + + def __len__(self): + return len(self.charges[0]) + + def __mul__(self, number: Union[bool, int]) -> "Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + return ChargeCollection(charges=[c * number for c in self.charges]) + + def __rmul__(self, number: Union[bool, int]) -> "Charge": + if number not in (True, False, 0, 1, -1): + raise ValueError( + "can only multiply by `True`, `False`, `1` or `0`, found {}".format( + number)) + + return self.__mul__(number) + + def isin(self, targets: Union[Iterable, "ChargeCollection"]): + if isinstance(targets, type(self)): + _targets = [t for t in targets] + return np.logical_or.reduce([ + np.logical_and.reduce([ + np.isin(self._stacked_charges[:, n], _targets[m][n]) + for n in range(len(_targets[m])) + ]) + for m in range(len(_targets)) + ]) + + def __contains__(self, targets: Union[Iterable, "ChargeCollection"]): + if isinstance(targets, type(self)): + if len(targets) > 1: + raise ValueError( + '__contains__ expects a single input, found {} inputs'.format( + len(targets))) + + _targets = targets.get_item(0) + return np.any( + np.logical_and.reduce([ + np.isin(self._stacked_charges[:, n], _targets[n]) + for n in range(len(_targets)) + ])) + + def unique( + self, + return_index=False, + return_inverse=False, + return_counts=False, + ) -> Tuple["ChargeCollection", np.ndarray, np.ndarray, np.ndarray]: + """ + Compute the unique charges in `BaseCharge`. + See np.unique for a more detailed explanation. This function + does the same but instead of a np.ndarray, it returns the unique + elements in a `BaseCharge` object. + Args: + return_index: If `True`, also return the indices of `self.charges` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse: If `True`, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `self.charges`. + return_counts: If `True`, also return the number of times each unique item appears + in `self.charges`. + Returns: + BaseCharge: The sorted unique values. + np.ndarray: The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + np.ndarray: The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + np.ndarray: The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + """ + + result = np.unique( + np.stack([self.charges[n].charges for n in range(len(self.charges))], + axis=1), + return_index=return_index, + return_inverse=return_inverse, + return_counts=return_counts, + axis=0) + charges = [] + if not (return_index or return_inverse or return_counts): + for n in range(len(self.charges)): + obj = self.charges[n].__new__(type(self.charges[n])) + obj.__init__(charges=[result[:, n]], shifts=self.charges[n].shifts) + charges.append(obj) + return ChargeCollection(charges) + for n in range(len(self.charges)): + obj = self.charges[n].__new__(type(self.charges[n])) + obj.__init__(charges=[result[0][:, n]], shifts=self.charges[n].shifts) + charges.append(obj) + out = ChargeCollection(charges) + return tuple([out] + [result[n] for n in range(1, len(result))]) + + def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: + if len(target_charges) != len(self.charges): + raise ValueError( + "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" + .format(len(target_charges), len(self.charges))) + return np.logical_and.reduce([ + self.charges[n].equals(target_charges[n]) + for n in range(len(target_charges)) + ]) + + def __eq__(self, target_charges: Iterable): + raise NotImplementedError() + if isinstance(target_charges, type(self)): + target_charges = np.stack([c.charges for c in target_charges.charges], + axis=1) + target_charges = np.asarray(target_charges) + if target_charges.ndim == 1: + target_charges = np.expand_dims(target_charges, 0) + if target_charges.shape[1] != len(self.charges): + raise ValueError( + "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" + .format(len(target_charges), len(self.charges))) + return np.logical_and.reduce( + self._stacked_charges == target_charges, axis=1) + + def concatenate(self, + others: Union["ChargeCollection", List["ChargeCollection"]]): + """ + Concatenate `self.charges` with `others.charges`. + Args: + others: List of `BaseCharge` objects. + Returns: + BaseCharge: The concatenated charges. + """ + if isinstance(others, type(self)): + others = [others] + + charges = [ + self.charges[n].concatenate([o.charges[n] + for o in others]) + for n in range(len(self.charges)) + ] + return ChargeCollection(charges) + + @property + def dtype(self): + return np.result_type(*[c.dtype for c in self.charges]) + + @property + def zero_charge(self): + obj = self.__new__(type(self)) + obj.__init__(charges=[c.zero_charge for c in self.charges]) + return obj + + def intersect(self, + other: "ChargeCollection", + return_indices: Optional[bool] = False) -> "ChargeCollection": + if return_indices: + ua, ia = self.unique(return_index=True) + ub, ib = other.unique(return_index=True) + conc = ua.concatenate(ub) + uab, iab, cntab = conc.unique(return_index=True, return_counts=True) + intersection = uab[cntab == 2] + comm1 = np.argmax( + np.logical_and.reduce( + np.repeat( + np.expand_dims(self._stacked_charges, 2), + intersection._stacked_charges.shape[0], + axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), + axis=1), + axis=0) + comm2 = np.argmax( + np.logical_and.reduce( + np.repeat( + np.expand_dims(other._stacked_charges, 2), + intersection._stacked_charges.shape[0], + axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), + axis=1), + axis=0) + return intersection, comm1, comm2 + + else: + self_unique = self.unique() + other_unique = other.unique() + concatenated = self_unique.concatenate(other_unique) + tmp_unique, counts = concatenated.unique(return_counts=True) + return tmp_unique[counts == 2] + + +def fuse_charges( + charges: List[Union[BaseCharge, ChargeCollection]], + flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: + """ + Fuse all `charges` into a new charge. + Charges are fused from "right to left", + in accordance with row-major order. + + Args: + charges: A list of charges to be fused. + flows: A list of flows, one for each element in `charges`. + Returns: + ChargeCollection: The result of fusing `charges`. + """ + if len(charges) != len(flows): + raise ValueError( + "`charges` and `flows` are of unequal lengths {} != {}".format( + len(charges), len(flows))) + fused_charges = charges[0] * flows[0] + for n in range(1, len(charges)): + fused_charges = fused_charges + charges[n] * flows[n] + return fused_charges + + +def fuse_degeneracies(degen1: Union[List, np.ndarray], + degen2: Union[List, np.ndarray]) -> np.ndarray: + """ + Fuse degeneracies `degen1` and `degen2` of two leg-charges + by simple kronecker product. `degen1` and `degen2` typically belong to two + consecutive legs of `BlockSparseTensor`. + Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns + `[10, 100, 20, 200, 30, 300]`. + When using row-major ordering of indices in `BlockSparseTensor`, + the position of `degen1` should be "to the left" of the position of `degen2`. + Args: + degen1: Iterable of integers + degen2: Iterable of integers + Returns: + np.ndarray: The result of fusing `dege1` with `degen2`. + """ + return np.reshape(degen1[:, None] * degen2[None, :], + len(degen1) * len(degen2)) From 8fad2586478e769f89f8cd8b06f68d652b3fe7e5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 15:07:20 -0500 Subject: [PATCH 188/212] deleted some files --- .../block_tensor/block_tensor_old.py | 1675 ----------------- .../block_tensor/block_tensor_old_test.py | 176 -- tensornetwork/block_tensor/chargebkp.py | 1040 ---------- tensornetwork/block_tensor/chargebkp2.py | 778 -------- tensornetwork/block_tensor/index_old.py | 294 --- tensornetwork/block_tensor/index_old_test.py | 171 -- 6 files changed, 4134 deletions(-) delete mode 100644 tensornetwork/block_tensor/block_tensor_old.py delete mode 100644 tensornetwork/block_tensor/block_tensor_old_test.py delete mode 100644 tensornetwork/block_tensor/chargebkp.py delete mode 100644 tensornetwork/block_tensor/chargebkp2.py delete mode 100644 tensornetwork/block_tensor/index_old.py delete mode 100644 tensornetwork/block_tensor/index_old_test.py diff --git a/tensornetwork/block_tensor/block_tensor_old.py b/tensornetwork/block_tensor/block_tensor_old.py deleted file mode 100644 index c552a184a..000000000 --- a/tensornetwork/block_tensor/block_tensor_old.py +++ /dev/null @@ -1,1675 +0,0 @@ -# Copyright 2019 The TensorNetwork Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import numpy as np -#from tensornetwork.block_tensor.lookup import lookup -# pylint: disable=line-too-long -from tensornetwork.network_components import Node, contract, contract_between -from tensornetwork.backends import backend_factory -# pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charge_pair, fuse_degeneracies, fuse_charges, unfuse -import numpy as np -import scipy as sp -import itertools -import time -from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable -Tensor = Any - - -def _check_flows(flows) -> None: - if (set(flows) != {1}) and (set(flows) != {-1}) and (set(flows) != {-1, 1}): - raise ValueError( - "flows = {} contains values different from 1 and -1".format(flows)) - - -def _find_best_partition(charges, flows): - if len(charges) == 1: - raise ValueError( - '_expecting `charges` with a length of at least 2, got `len(charges)={}`' - .format(len(charges))) - dims = np.asarray([len(c) for c in charges]) - min_ind = np.argmin([ - np.abs(np.prod(dims[0:n]) - np.prod(dims[n::])) - for n in range(1, len(charges)) - ]) - fused_left_charges = fuse_charges(charges[0:min_ind + 1], - flows[0:min_ind + 1]) - fused_right_charges = fuse_charges(charges[min_ind + 1::], - flows[min_ind + 1::]) - - return fused_left_charges, fused_right_charges, min_ind + 1 - - -def map_to_integer(dims: Union[List, np.ndarray], - table: np.ndarray, - dtype: Optional[Type[np.number]] = np.int64): - """ - Map a `table` of integers of shape (N, r) bijectively into - an np.ndarray `integers` of length N of unique numbers. - The mapping is done using - ``` - `integers[n] = table[n,0] * np.prod(dims[1::]) + table[n,1] * np.prod(dims[2::]) + ... + table[n,r-1] * 1` - - Args: - dims: An iterable of integers. - table: An array of shape (N,r) of integers. - dtype: An optional dtype used for the conversion. - Care should be taken when choosing this to avoid overflow issues. - Returns: - np.ndarray: An array of integers. - """ - converter_table = np.expand_dims( - np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))), 0) - tmp = table * converter_table - integers = np.sum(tmp, axis=1) - return integers - - -def compute_fused_charge_degeneracies(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: - """ - For a list of charges, compute all possible fused charges resulting - from fusing `charges`, together with their respective degeneracyn - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - Returns: - dict: Mapping fused charges (int) to degeneracies (int) - """ - if len(charges) == 1: - return np.unique(flows[0] * charges[0], return_counts=True) - - # get unique charges and their degeneracies on the first leg. - # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = np.unique( - charges[0], return_counts=True) - #multiply the flow into the charges of first leg - accumulated_charges *= flows[0] - for n in range(1, len(charges)): - #list of unique charges and list of their degeneracies - #on the next unfused leg of the tensor - leg_charges, leg_degeneracies = np.unique(charges[n], return_counts=True) - - #fuse the unique charges - #Note: entries in `fused_charges` are not unique anymore. - #flow1 = 1 because the flow of leg 0 has already been - #mulitplied above - fused_charges = fuse_charge_pair( - q1=accumulated_charges, flow1=1, q2=leg_charges, flow2=flows[n]) - #compute the degeneracies of `fused_charges` charges - #`fused_degeneracies` is a list of degeneracies such that - # `fused_degeneracies[n]` is the degeneracy of of - # charge `c = fused_charges[n]`. - fused_degeneracies = fuse_degeneracies(accumulated_degeneracies, - leg_degeneracies) - #compute the new degeneracies resulting from fusing - #`accumulated_charges` and `leg_charges_2` - accumulated_charges = np.unique(fused_charges) - accumulated_degeneracies = np.empty( - len(accumulated_charges), dtype=np.int64) - for n in range(len(accumulated_charges)): - accumulated_degeneracies[n] = np.sum( - fused_degeneracies[fused_charges == accumulated_charges[n]]) - return accumulated_charges, accumulated_degeneracies - - -def compute_num_nonzero(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> int: - """ - Compute the number of non-zero elements, given the meta-data of - a symmetric tensor. - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - Returns: - int: The number of non-zero elements. - """ - accumulated_charges, accumulated_degeneracies = compute_fused_charge_degeneracies( - charges, flows) - if len(np.nonzero(accumulated_charges == 0)[0]) == 0: - raise ValueError( - "given leg-charges `charges` and flows `flows` are incompatible " - "with a symmetric tensor") - return accumulated_degeneracies[accumulated_charges == 0][0] - - -def compute_nonzero_block_shapes(charges: List[np.ndarray], - flows: List[Union[bool, int]]) -> Dict: - """ - Compute the blocks and their respective shapes of a symmetric tensor, - given its meta-data. - Args: - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - Returns: - dict: Dictionary mapping a tuple of charges to a shape tuple. - Each element corresponds to a non-zero valued block of the tensor. - """ - #FIXME: this routine is slow - _check_flows(flows) - degeneracies = [] - unique_charges = [] - rank = len(charges) - #find the unique quantum numbers and their degeneracy on each leg - for leg in range(rank): - c, d = np.unique(charges[leg], return_counts=True) - unique_charges.append(c) - degeneracies.append(dict(zip(c, d))) - - #find all possible combination of leg charges c0, c1, ... - #(with one charge per leg 0, 1, ...) - #such that sum([flows[0] * c0, flows[1] * c1, ...]) = 0 - charge_combinations = list( - itertools.product(*[ - unique_charges[leg] * flows[leg] - for leg in range(len(unique_charges)) - ])) - net_charges = np.array([np.sum(c) for c in charge_combinations]) - zero_idxs = np.nonzero(net_charges == 0)[0] - charge_shape_dict = {} - for idx in zero_idxs: - c = charge_combinations[idx] - shapes = [degeneracies[leg][flows[leg] * c[leg]] for leg in range(rank)] - charge_shape_dict[c] = shapes - return charge_shape_dict - - -def find_diagonal_sparse_blocks(data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. Note that `column_charges` - are never explicitly fused (`row_charges` are). - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the sparse locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - flows = row_flows.copy() - flows.extend(column_flows) - _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) - - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - column_charges, column_flows) - #convenience container for storing the degeneracies of each - #column charge - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - if len(row_charges) > 1: - left_row_charges, right_row_charges, _ = _find_best_partition( - row_charges, row_flows) - unique_left = np.unique(left_row_charges) - unique_right = np.unique(right_row_charges) - unique_row_charges = np.unique( - fuse_charges(charges=[unique_left, unique_right], flows=[1, 1])) - - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - row_locations = find_sparse_positions( - left_charges=left_row_charges, - left_flow=1, - right_charges=right_row_charges, - right_flow=1, - target_charges=common_charges) - elif len(row_charges) == 1: - fused_row_charges = fuse_charges(row_charges, row_flows) - - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - relevant_fused_row_charges = fused_row_charges[np.isin( - fused_row_charges, common_charges)] - row_locations = {} - for c in common_charges: - row_locations[c] = np.nonzero(relevant_fused_row_charges == c)[0] - else: - raise ValueError('Found an empty sequence for `row_charges`') - #some numpy magic to get the index locations of the blocks - degeneracy_vector = np.empty( - np.sum([len(v) for v in row_locations.values()]), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - degeneracy_vector[row_locations[c]] = column_degeneracies[-c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[np.sort(row_locations[c])], 1) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) - inds = np.reshape(a + b, len(row_locations[c]) * column_degeneracies[-c]) - if not return_data: - blocks[c] = [inds, (len(row_locations[c]), column_degeneracies[-c])] - else: - blocks[c] = np.reshape(data[inds], - (len(row_locations[c]), column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_depreacated_1( - data: np.ndarray, - row_charges: List[Union[List, np.ndarray]], - column_charges: List[Union[List, np.ndarray]], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - This version is slow for matrices with shape[0] >> shape[1], but fast otherwise. - - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. Note that `column_charges` - are never explicitly fused (`row_charges` are). - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the sparse locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - flows = row_flows.copy() - flows.extend(column_flows) - _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) - - #since we are using row-major we have to fuse the row charges anyway. - fused_row_charges = fuse_charges(row_charges, row_flows) - #get the unique row-charges - unique_row_charges, row_dims = np.unique( - fused_row_charges, return_counts=True) - - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - column_charges, column_flows) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(fused_row_charges, common_charges) - relevant_row_charges = fused_row_charges[mask] - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(start_positions[masks[c]], 1) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 0) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_deprecated_0( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated: this version is about 2 times slower (worst case) than the current used - implementation - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict. - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column - - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(row_charges, common_charges) - relevant_row_charges = row_charges[mask] - - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_row_charges) which, - #for each charge `c` in `relevant_row_charges` holds the - #column-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_row_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_row_charges == c - masks[c] = mask - degeneracy_vector[mask] = column_degeneracies[-c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each row - # within the data vector. - # E.g. for `relevant_row_charges` = [0,1,0,0,3], and - # column_degeneracies[0] = 10 - # column_degeneracies[1] = 20 - # column_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in row-major order) in - # each row with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - column_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - column_degeneracies[-c], 0) - b = np.expand_dims(np.arange(column_degeneracies[-c]), 1) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_diagonal_sparse_blocks_column_major( - data: np.ndarray, - charges: List[np.ndarray], - flows: List[Union[bool, int]], - return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - Given the meta data and underlying data of a symmetric matrix, compute - all diagonal blocks and return them in a dict, assuming column-major - ordering. - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - charges: List of np.ndarray, one for each leg. - Each np.ndarray `charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - - Returns: - dict: Dictionary mapping quantum numbers (integers) to either an np.ndarray - or a python list of locations and shapes, depending on the value of `return_data`. - """ - if len(charges) != 2: - raise ValueError("input has to be a two-dimensional symmetric matrix") - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges)`") - - #we multiply the flows into the charges - row_charges = flows[0] * charges[0] # a list of charges on each row - column_charges = flows[1] * charges[1] # a list of charges on each column - - #get the unique charges - unique_row_charges, row_dims = np.unique(row_charges, return_counts=True) - unique_column_charges, column_dims = np.unique( - column_charges, return_counts=True) - #get the charges common to rows and columns (only those matter) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - - #convenience container for storing the degeneracies of each - #row and column charge - row_degeneracies = dict(zip(unique_row_charges, row_dims)) - column_degeneracies = dict(zip(unique_column_charges, column_dims)) - - # we only care about charges common to row and columns - mask = np.isin(column_charges, -common_charges) - relevant_column_charges = column_charges[mask] - - #some numpy magic to get the index locations of the blocks - #we generate a vector of `len(relevant_column_charges) which, - #for each charge `c` in `relevant_column_charges` holds the - #row-degeneracy of charge `c` - degeneracy_vector = np.empty(len(relevant_column_charges), dtype=np.int64) - #for each charge `c` in `common_charges` we generate a boolean mask - #for indexing the positions where `relevant_column_charges` has a value of `c`. - masks = {} - for c in common_charges: - mask = relevant_column_charges == -c - masks[c] = mask - degeneracy_vector[mask] = row_degeneracies[c] - - # the result of the cumulative sum is a vector containing - # the stop positions of the non-zero values of each column - # within the data vector. - # E.g. for `relevant_column_charges` = [0,1,0,0,3], and - # row_degeneracies[0] = 10 - # row_degeneracies[1] = 20 - # row_degeneracies[3] = 30 - # we have - # `stop_positions` = [10, 10+20, 10+20+10, 10+20+10+10, 10+20+10+10+30] - # The starting positions of consecutive elements (in column-major order) in - # each column with charge `c=0` within the data vector are then simply obtained using - # masks[0] = [True, False, True, True, False] - # and `stop_positions[masks[0]] - row_degeneracies[0]` - stop_positions = np.cumsum(degeneracy_vector) - blocks = {} - - for c in common_charges: - #numpy broadcasting is substantially faster than kron! - a = np.expand_dims(stop_positions[masks[c]] - row_degeneracies[c], 0) - b = np.expand_dims(np.arange(row_degeneracies[c]), 1) - if not return_data: - blocks[c] = [ - np.reshape(a + b, row_degeneracies[c] * column_degeneracies[-c]), - (row_degeneracies[c], column_degeneracies[-c]) - ] - else: - blocks[c] = np.reshape( - data[np.reshape(a + b, - row_degeneracies[c] * column_degeneracies[-c])], - (row_degeneracies[c], column_degeneracies[-c])) - return blocks - - -def find_dense_positions_deprecated(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the all different blocks - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - together with their corresponding index-values of the data in the dense array. - `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` - to an array of integers. - For the above example, we get: - * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` - was obtained from fusing -2 and 2. - * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, - `fused_charges[5,13,17]` were obtained from fusing 0 and 0. - * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` - was obtained from fusing 1 and -1. - Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. - Returns: - dict: Mapping tuples of integers to np.ndarray of integers. - """ - _check_flows([left_flow, right_flow]) - unique_left = np.unique(left_charges) - unique_right = np.unique(right_charges) - fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) - left_inds, right_inds = unfuse( - np.nonzero(fused == target_charge)[0], len(unique_left), - len(unique_right)) - left_c = unique_left[left_inds] - right_c = unique_right[right_inds] - len_right_charges = len(right_charges) - linear_positions = {} - for left_charge, right_charge in zip(left_c, right_c): - left_positions = np.nonzero(left_charges == left_charge)[0] - left_offsets = np.expand_dims(left_positions * len_right_charges, 1) - right_offsets = np.expand_dims( - np.nonzero(right_charges == right_charge)[0], 0) - linear_positions[(left_charge, right_charge)] = np.reshape( - left_offsets + right_offsets, - left_offsets.shape[0] * right_offsets.shape[1]) - return np.sort(np.concatenate(list(linear_positions.values()))) - - -def find_dense_positions(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charge: int) -> Dict: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the all different blocks - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - together with their corresponding index-values of the data in the dense array. - `find_dense_blocks` returns a dict mapping tuples `(left_charge, right_charge)` - to an array of integers. - For the above example, we get: - * for `left_charge` = -2 and `right_charge` = 2 we get an array [2]. Thus, `fused_charges[2]` - was obtained from fusing -2 and 2. - * for `left_charge` = 0 and `right_charge` = 0 we get an array [5, 13, 17]. Thus, - `fused_charges[5,13,17]` were obtained from fusing 0 and 0. - * for `left_charge` = 1 and `right_charge` = -1 we get an array [8]. Thus, `fused_charges[8]` - was obtained from fusing 1 and -1. - Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. - Returns: - dict: Mapping tuples of integers to np.ndarray of integers. - """ - _check_flows([left_flow, right_flow]) - unique_left, left_degeneracies = np.unique(left_charges, return_counts=True) - unique_right, right_degeneracies = np.unique( - right_charges, return_counts=True) - - common_charges = np.intersect1d( - unique_left, (target_charge - right_flow * unique_right) * left_flow, - assume_unique=True) - right_locations = {} - for c in common_charges: - - right_locations[(target_charge - left_flow * c) * right_flow] = np.nonzero( - right_charges == (target_charge - left_flow * c) * right_flow)[0] - - len_right_charges = len(right_charges) - indices = [] - for n in range(len(left_charges)): - c = left_charges[n] - if c not in common_charges: - continue - indices.append(n * len_right_charges + right_locations[ - (target_charge - left_flow * c) * right_flow]) - return np.concatenate(indices) - - -def find_sparse_positions(left_charges: np.ndarray, left_flow: int, - right_charges: np.ndarray, right_flow: int, - target_charges: Union[List[int], np.ndarray]) -> Dict: - """ - Find the sparse locations of elements (i.e. the index-values within the SPARSE tensor) - in the vector `fused_charges` (resulting from fusing np.ndarrays - `left_charges` and `right_charges`) that have a value of `target_charge`, - assuming that all elements different from `target_charges` are `0`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charges = [0,1] - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` 0 1 2 3 4 5 6 7 8 - we want to find the all different blocks - that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, - together with their corresponding sparse index-values of the data in the sparse array, - assuming that all elements in `fused_charges` different from `target_charges` are 0. - - `find_sparse_blocks` returns a dict mapping integers `target_charge` - to an array of integers denoting the sparse locations of elements within - `fused_charges`. - For the above example, we get: - * `target_charge=0`: [0,1,3,5,7] - * `target_charge=1`: [2,4,6,8] - Args: - left_charges: An np.ndarray of integer charges. - left_flow: The flow direction of the left charges. - right_charges: An np.ndarray of integer charges. - right_flow: The flow direction of the right charges. - target_charge: The target charge. - Returns: - dict: Mapping integers to np.ndarray of integers. - """ - #FIXME: this is probably still not optimal - - _check_flows([left_flow, right_flow]) - target_charges = np.unique(target_charges) - unique_left = np.unique(left_charges) - unique_right = np.unique(right_charges) - fused = fuse_charges([unique_left, unique_right], [left_flow, right_flow]) - - #compute all unique charges that can add up to - #target_charges - left_inds, right_inds = [], [] - for target_charge in target_charges: - li, ri = unfuse( - np.nonzero(fused == target_charge)[0], len(unique_left), - len(unique_right)) - left_inds.append(li) - right_inds.append(ri) - - #now compute the relevant unique left and right charges - unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] - unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] - - #only keep those charges that are relevant - relevant_left_charges = left_charges[np.isin(left_charges, - unique_left_charges)] - relevant_right_charges = right_charges[np.isin(right_charges, - unique_right_charges)] - - unique_right_charges, right_dims = np.unique( - relevant_right_charges, return_counts=True) - right_degeneracies = dict(zip(unique_right_charges, right_dims)) - #generate a degeneracy vector which for each value r in relevant_right_charges - #holds the corresponding number of non-zero elements `relevant_right_charges` - #that can add up to `target_charges`. - degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) - right_indices = {} - for left_charge in unique_left_charges: - total_degeneracy = np.sum(right_dims[np.isin( - left_flow * left_charge + right_flow * unique_right_charges, - target_charges)]) - tmp_relevant_right_charges = relevant_right_charges[np.isin( - relevant_right_charges, - (target_charges - left_flow * left_charge) * right_flow)] - - for target_charge in target_charges: - right_indices[(left_charge, target_charge)] = np.nonzero( - tmp_relevant_right_charges == - (target_charge - left_flow * left_charge) * right_flow)[0] - - degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy - - stop_positions = np.cumsum(degeneracy_vector) - start_positions = stop_positions - degeneracy_vector - blocks = {t: [] for t in target_charges} - for left_charge in unique_left_charges: - a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) - for target_charge in target_charges: - ri = right_indices[(left_charge, target_charge)] - if len(ri) != 0: - b = np.expand_dims(ri, 1) - tmp = a + b - blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) - out = {} - for target_charge in target_charges: - out[target_charge] = np.concatenate(blocks[target_charge]) - return out - - -def compute_dense_to_sparse_mapping_deprecated(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` - the rank of the tensor. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - t1 = time.time() - fused_charges = fuse_charges(charges, flows) - nz_indices = np.nonzero(fused_charges == target_charge)[0] - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - - index_locations = [] - for n in reversed(range(len(charges))): - t1 = time.time() - nz_indices, right_indices = unfuse(nz_indices, np.prod(dims[0:n]), dims[n]) - index_locations.insert(0, right_indices) - print(time.time() - t1) - return index_locations - - -def compute_dense_to_sparse_mapping_2(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - np.ndarray: An (N, r) np.ndarray of dtype np.int16, - with `N` the number of non-zero elements, and `r` - the rank of the tensor. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - - #note: left_charges and right_charges have been fused from RIGHT to LEFT - left_charges, right_charges, partition = _find_best_partition(charges, flows) - t1 = time.time() - nz_indices = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=target_charge) - print(time.time() - t1) - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - t1 = time.time() - nz_left_indices, nz_right_indices = unfuse(nz_indices, len(left_charges), - len(right_charges)) - print(time.time() - t1) - index_locations = [] - #first unfuse left charges - for n in range(partition): - t1 = time.time() - indices, nz_left_indices = unfuse(nz_left_indices, dims[n], - np.prod(dims[n + 1:partition])) - index_locations.append(indices) - print(time.time() - t1) - for n in range(partition, len(dims)): - t1 = time.time() - indices, nz_right_indices = unfuse(nz_right_indices, dims[n], - np.prod(dims[n + 1::])) - index_locations.append(indices) - print(time.time() - t1) - - return index_locations - - -def compute_dense_to_sparse_mapping(charges: List[np.ndarray], - flows: List[Union[bool, int]], - target_charge: int) -> int: - """ - Compute the mapping from multi-index positions to the linear positions - within the sparse data container, given the meta-data of a symmetric tensor. - This function returns a list of np.ndarray `index_positions`, with - `len(index_positions)=len(charges)` (equal to the rank of the tensor). - When stacked into a `(N,r)` np.ndarray `multi_indices`, i.e. - ` - multi_indices = np.stack(index_positions, axis=1) #np.ndarray of shape (N,r) - ` - with `r` the rank of the tensor and `N` the number of non-zero elements of - the symmetric tensor, then the element at position `n` within the linear - data-array `data` of the tensor have multi-indices given by `multi_indices[n,:], - i.e. `data[n]` has the multi-index `multi_indices[n,:]`, and the total charges - can for example be obtained using - ``` - index_positions = compute_dense_to_sparse_mapping(charges, flows, target_charge=0) - total_charges = np.zeros(len(index_positions[0]), dtype=np.int16) - for n in range(len(charges)): - total_charges += flows[n]*charges[n][index_positions[n]] - np.testing.assert_allclose(total_charges, 0) - ``` - Args: - charges: List of np.ndarray of int, one for each leg of the - underlying tensor. Each np.ndarray `charges[leg]` - is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - flows: A list of integers, one for each leg, - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - target_charge: The total target charge of the blocks to be calculated. - Returns: - list of np.ndarray: A list of length `r`, with `r` the rank of the tensor. - Each element in the list is an N-dimensional np.ndarray of int, - with `N` the number of non-zero elements. - """ - #find the best partition (the one where left and right dimensions are - #closest - dims = np.asarray([len(c) for c in charges]) - #note: left_charges and right_charges have been fused from RIGHT to LEFT - left_charges, right_charges, partition = _find_best_partition(charges, flows) - nz_indices = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=target_charge) - - if len(nz_indices) == 0: - raise ValueError( - "`charges` do not add up to a total charge {}".format(target_charge)) - return np.unravel_index(nz_indices, dims) - - -class BlockSparseTensor: - """ - Minimal class implementation of block sparsity. - The class design follows Glen's proposal (Design 0). - The class currently only supports a single U(1) symmetry - and only numpy.ndarray. - - Attributes: - * self.data: A 1d np.ndarray storing the underlying - data of the tensor - * self.charges: A list of `np.ndarray` of shape - (D,), where D is the bond dimension. Once we go beyond - a single U(1) symmetry, this has to be updated. - - * self.flows: A list of integers of length `k`. - `self.flows` determines the flows direction of charges - on each leg of the tensor. A value of `-1` denotes - outflowing charge, a value of `1` denotes inflowing - charge. - - The tensor data is stored in self.data, a 1d np.ndarray. - """ - - def __init__(self, data: np.ndarray, indices: List[Index]) -> None: - """ - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - indices: List of `Index` objecst, one for each leg. - """ - self.indices = indices - _check_flows(self.flows) - num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) - - if num_non_zero_elements != len(data.flat): - raise ValueError("number of tensor elements defined " - "by `charges` is different from" - " len(data)={}".format(len(data.flat))) - - self.data = np.asarray(data.flat) #do not copy data - - @classmethod - def randn(cls, indices: List[Index], - dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": - """ - Initialize a random symmetric tensor from random normal distribution. - Args: - indices: List of `Index` objecst, one for each leg. - dtype: An optional numpy dtype. The dtype of the tensor - Returns: - BlockSparseTensor - """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] - num_non_zero_elements = compute_num_nonzero(charges, flows) - backend = backend_factory.get_backend('numpy') - data = backend.randn((num_non_zero_elements,), dtype=dtype) - return cls(data=data, indices=indices) - - @classmethod - def random(cls, indices: List[Index], - dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": - """ - Initialize a random symmetric tensor from random normal distribution. - Args: - indices: List of `Index` objecst, one for each leg. - dtype: An optional numpy dtype. The dtype of the tensor - Returns: - BlockSparseTensor - """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] - num_non_zero_elements = compute_num_nonzero(charges, flows) - dtype = dtype if dtype is not None else self.np.float64 - - def init_random(): - if ((np.dtype(dtype) is np.dtype(np.complex128)) or - (np.dtype(dtype) is np.dtype(np.complex64))): - return np.random.rand(num_non_zero_elements).astype( - dtype) - 0.5 + 1j * ( - np.random.rand(num_non_zero_elements).astype(dtype) - 0.5) - return np.random.randn(num_non_zero_elements).astype(dtype) - 0.5 - - return cls(data=init_random(), indices=indices) - - @property - def rank(self): - return len(self.indices) - - @property - def dense_shape(self) -> Tuple: - """ - The dense shape of the tensor. - Returns: - Tuple: A tuple of `int`. - """ - return tuple([i.dimension for i in self.indices]) - - @property - def shape(self) -> Tuple: - """ - The sparse shape of the tensor. - Returns: - Tuple: A tuple of `Index` objects. - """ - return tuple(self.indices) - - @property - def dtype(self) -> Type[np.number]: - return self.data.dtype - - @property - def flows(self): - return [i.flow for i in self.indices] - - @property - def charges(self): - return [i.charges for i in self.indices] - - def transpose(self, - order: Union[List[int], np.ndarray], - transposed_linear_positions: Optional[np.ndarray] = None - ) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order`. This routine currently shuffles - data. - Args: - order: The new order of indices. - transposed_linear_positions: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `transposed_linear_positions` - can greatly speed up the transposition. - Returns: - BlockSparseTensor: The transposed tensor. - """ - #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the - #lookup-table from dense to sparse indices. According to some quick - #testing, the final lookup is currently the bottleneck. - #FIXME: transpose currently shuffles data. This can in principle be postponed - #until `tensordot` or `find_diagonal_sparsenn_blocks`, at the cost of - #maintaining two lookup tables for sparse-to-dense positions and dense-to-sparse - #positions - if len(order) != self.rank: - raise ValueError( - "`len(order)={}` is different form `self.rank={}`".format( - len(order), self.rank)) - #transpose is the only function using self.dense_to_sparse_table - #so we can initialize it here. This will change if we are implementing - #lazy shuffling of data. In this case, `find_diagonal_sparse_blocks` - #also needs - - #we use elementary indices here because it is - #more efficient to get the fused charges using - #the best partition - if transposed_linear_positions is None: - elementary_indices = {} - flat_elementary_indices = [] - - for n in range(self.rank): - elementary_indices[n] = self.indices[n].get_elementary_indices() - flat_elementary_indices.extend(elementary_indices[n]) - flat_index_list = np.arange(len(flat_elementary_indices)) - cum_num_legs = np.append( - 0, np.cumsum([len(elementary_indices[n]) for n in range(self.rank)])) - flat_order = np.concatenate( - [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - - flat_charges = [i.charges for i in flat_elementary_indices] - flat_flows = [i.flow for i in flat_elementary_indices] - flat_dims = [len(c) for c in flat_charges] - flat_strides = np.flip(np.append(1, np.cumprod(np.flip(flat_dims[1::])))) - if not hasattr(self, 'dense_to_sparse_table'): - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition( - flat_charges, flat_flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - self.dense_to_sparse_table = sp.sparse.csr_matrix((np.arange( - len(self.data)), (linear_positions, - np.zeros(len(self.data), dtype=np.int64)))) - - flat_tr_charges = [flat_charges[n] for n in flat_order] - flat_tr_flows = [flat_flows[n] for n in flat_order] - flat_tr_strides = [flat_strides[n] for n in flat_order] - flat_tr_dims = [flat_dims[n] for n in flat_order] - - tr_left_charges, tr_right_charges, _ = _find_best_partition( - flat_tr_charges, flat_tr_flows) - #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_charges([ - np.arange(flat_tr_dims[n]) * flat_tr_strides[n] - for n in range(len(flat_tr_dims)) - ], - flows=[1] * len(flat_tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - - inds = np.squeeze(self.dense_to_sparse_table[ - tr_dense_linear_positions[tr_linear_positions], 0].toarray()) - else: - inds = transposed_linear_positions - self.data = self.data[inds] - return inds - - def transpose_intersect1d( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": - """ - Transpose the tensor into the new order `order` - Args: pp - order: The new order of indices. - Returns: - BlockSparseTensor: The transposed tensor. - """ - #FIXME: this implementation uses scipy.sparse.csr_matrix to generate the - #lookup-table from dense to sparse indices. According to some quick - #testing, the final lookup is currently the bottleneck. - #FIXME: transpose currently shuffles data. This can in principle be postponed - #until `tensordot` or `find_diagonal_sparse_blocks` - if len(order) != self.rank: - raise ValueError(len(order), self.rank) - charges = self.charges #call only once in case some of the indices are merged indices - dims = [len(c) for c in charges] - - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - tr_charges = [charges[n] for n in order] - tr_flows = [self.flows[n] for n in order] - tr_strides = [strides[n] for n in order] - tr_dims = [dims[n] for n in order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( - tr_charges, tr_flows) - - tr_dense_linear_positions = fuse_charges( - [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - flows=[1] * len(tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - new_linear_positions = tr_dense_linear_positions[tr_linear_positions] - _, _, inds = np.intersect1d( - linear_positions, - new_linear_positions, - return_indices=True, - assume_unique=True) - self.data = self.data[inds] - - # def transpose_lookup(self, order: Union[List[int], np.ndarray] - # ) -> "BlockSparseTensor": - # """ - # Deprecated - - # Transpose the tensor into the new order `order`. Uses a simple cython std::map - # for the lookup - # Args: - # order: The new order of indices. - # Returns: - # BlockSparseTensor: The transposed tensor. - # """ - # if len(order) != self.rank: - # raise ValueError( - # "`len(order)={}` is different form `self.rank={}`".format( - # len(order), self.rank)) - # charges = self.charges #call only once in case some of the indices are merged indices - # dims = [len(c) for c in charges] - - # strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - # #find the best partition into left and right charges - # left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - # #find the index-positions of the elements in the fusion - # #of `left_charges` and `right_charges` that have `0` - # #total charge (those are the only non-zero elements). - # linear_positions = find_dense_positions( - # left_charges, 1, right_charges, 1, target_charge=0) - - # tr_charges = [charges[n] for n in order] - # tr_flows = [self.flows[n] for n in order] - # tr_strides = [strides[n] for n in order] - # tr_dims = [dims[n] for n in order] - # tr_left_charges, tr_right_charges, _ = _find_best_partition( - # tr_charges, tr_flows) - # #FIXME: this should be done without fully fusing the strides - # tr_dense_linear_positions = fuse_charges( - # [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - # flows=[1] * len(tr_dims)) - # tr_linear_positions = find_dense_positions(tr_left_charges, 1, - # tr_right_charges, 1, 0) - # inds = lookup(linear_positions, - # tr_dense_linear_positions[tr_linear_positions]) - # self.data = self.data[inds] - - def transpose_searchsorted( - self, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": - """ - Deprecated: - - Transpose the tensor into the new order `order`. Uses `np.searchsorted` - for the lookup. - Args: - order: The new order of indices. - Returns: - BlockSparseTensor: The transposed tensor. - """ - if len(order) != self.rank: - raise ValueError( - "`len(order)={}` is different form `self.rank={}`".format( - len(order), self.rank)) - charges = self.charges #call only once in case some of the indices are merged indices - dims = [len(c) for c in charges] - - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - #find the best partition into left and right charges - left_charges, right_charges, _ = _find_best_partition(charges, self.flows) - #find the index-positions of the elements in the fusion - #of `left_charges` and `right_charges` that have `0` - #total charge (those are the only non-zero elements). - linear_positions = find_dense_positions( - left_charges, 1, right_charges, 1, target_charge=0) - - tr_charges = [charges[n] for n in order] - tr_flows = [self.flows[n] for n in order] - tr_strides = [strides[n] for n in order] - tr_dims = [dims[n] for n in order] - tr_left_charges, tr_right_charges, _ = _find_best_partition( - tr_charges, tr_flows) - #FIXME: this should be done without fully fusing the strides - tr_dense_linear_positions = fuse_charges( - [np.arange(tr_dims[n]) * tr_strides[n] for n in range(len(tr_dims))], - flows=[1] * len(tr_dims)) - tr_linear_positions = find_dense_positions(tr_left_charges, 1, - tr_right_charges, 1, 0) - - inds = np.searchsorted(linear_positions, - tr_dense_linear_positions[tr_linear_positions]) - self.data = self.data[inds] - - def reset_shape(self) -> None: - """ - Bring the tensor back into its elementary shape. - """ - self.indices = self.get_elementary_indices() - - def get_elementary_indices(self) -> List: - """ - Compute the elementary indices of the array. - """ - elementary_indices = [] - for i in self.indices: - elementary_indices.extend(i.get_elementary_indices()) - - return elementary_indices - - def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: - """ - Reshape `tensor` into `shape` in place. - `BlockSparseTensor.reshape` works essentially the same as the dense - version, with the notable exception that the tensor can only be - reshaped into a form compatible with its elementary indices. - The elementary indices are the indices at the leaves of the `Index` - objects `tensors.indices`. - For example, while the following reshaping is possible for regular - dense numpy tensor, - ``` - A = np.random.rand(6,6,6) - np.reshape(A, (2,3,6,6)) - ``` - the same code for BlockSparseTensor - ``` - q1 = np.random.randint(0,10,6) - q2 = np.random.randint(0,10,6) - q3 = np.random.randint(0,10,6) - i1 = Index(charges=q1,flow=1) - i2 = Index(charges=q2,flow=-1) - i3 = Index(charges=q3,flow=1) - A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(A.shape) #prints (6,6,6) - A.reshape((2,3,6,6)) #raises ValueError - ``` - raises a `ValueError` since (2,3,6,6) - is incompatible with the elementary shape (6,6,6) of the tensor. - - Args: - tensor: A symmetric tensor. - shape: The new shape. Can either be a list of `Index` - or a list of `int`. - Returns: - BlockSparseTensor: A new tensor reshaped into `shape` - """ - dense_shape = [] - for s in shape: - if isinstance(s, Index): - dense_shape.append(s.dimension) - else: - dense_shape.append(s) - # a few simple checks - if np.prod(dense_shape) != np.prod(self.dense_shape): - raise ValueError("A tensor with {} elements cannot be " - "reshaped into a tensor with {} elements".format( - np.prod(self.shape), np.prod(dense_shape))) - - #keep a copy of the old indices for the case where reshaping fails - #FIXME: this is pretty hacky! - index_copy = [i.copy() for i in self.indices] - - def raise_error(): - #if this error is raised then `shape` is incompatible - #with the elementary indices. We then reset the shape - #to what is was before the call to `reshape`. - self.indices = index_copy - elementary_indices = [] - for i in self.indices: - elementary_indices.extend(i.get_elementary_indices()) - raise ValueError("The shape {} is incompatible with the " - "elementary shape {} of the tensor.".format( - dense_shape, - tuple([e.dimension for e in elementary_indices]))) - - self.reset_shape() #bring tensor back into its elementary shape - for n in range(len(dense_shape)): - if dense_shape[n] > self.dense_shape[n]: - while dense_shape[n] > self.dense_shape[n]: - #fuse indices - i1, i2 = self.indices.pop(n), self.indices.pop(n) - #note: the resulting flow is set to one since the flow - #is multiplied into the charges. As a result the tensor - #will then be invariant in any case. - self.indices.insert(n, fuse_index_pair(i1, i2)) - if self.dense_shape[n] > dense_shape[n]: - raise_error() - elif dense_shape[n] < self.dense_shape[n]: - raise_error() - #at this point the first len(dense_shape) indices of the tensor - #match the `dense_shape`. - while len(dense_shape) < len(self.indices): - i2, i1 = self.indices.pop(), self.indices.pop() - self.indices.append(fuse_index_pair(i1, i2)) - - def get_diagonal_blocks(self, return_data: Optional[bool] = True) -> Dict: - """ - Obtain the diagonal blocks of symmetric matrix. - BlockSparseTensor has to be a matrix. - For matrices with shape[0] << shape[1], this routine avoids explicit fusion - of column charges. - - Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - row_indices = self.indices[0].get_elementary_indices() - column_indices = self.indices[1].get_elementary_indices() - - return find_diagonal_sparse_blocks( - data=self.data, - row_charges=[i.charges for i in row_indices], - column_charges=[i.charges for i in column_indices], - row_flows=[i.flow for i in row_indices], - column_flows=[i.flow for i in column_indices], - return_data=return_data) - - def get_diagonal_blocks_deprecated_1( - self, return_data: Optional[bool] = True) -> Dict: - """ - Obtain the diagonal blocks of symmetric matrix. - BlockSparseTensor has to be a matrix. - For matrices with shape[0] << shape[1], this routine avoids explicit fusion - of column charges. - - Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - row_indices = self.indices[0].get_elementary_indices() - column_indices = self.indices[1].get_elementary_indices() - - return find_diagonal_sparse_blocks_deprecated_1( - data=self.data, - row_charges=[i.charges for i in row_indices], - column_charges=[i.charges for i in column_indices], - row_flows=[i.flow for i in row_indices], - column_flows=[i.flow for i in column_indices], - return_data=return_data) - - def get_diagonal_blocks_deprecated_0( - self, return_data: Optional[bool] = True) -> Dict: - """ - Deprecated - - Obtain the diagonal blocks of symmetric matrix. - BlockSparseTensor has to be a matrix. - Args: - return_data: If `True`, the return dictionary maps quantum numbers `q` to - actual `np.ndarray` with the data. This involves a copy of data. - If `False`, the returned dict maps quantum numbers of a list - [locations, shape], where `locations` is an np.ndarray of type np.int64 - containing the locations of the tensor elements within A.data, i.e. - `A.data[locations]` contains the elements belonging to the tensor with - quantum numbers `(q,q). `shape` is the shape of the corresponding array. - Returns: - dict: Dictionary mapping charge to np.ndarray of rank 2 (a matrix) - - """ - if self.rank != 2: - raise ValueError( - "`get_diagonal_blocks` can only be called on a matrix, but found rank={}" - .format(self.rank)) - - return find_diagonal_sparse_blocks_deprecated_0( - data=self.data, - charges=self.charges, - flows=self.flows, - return_data=return_data) - - -def reshape(tensor: BlockSparseTensor, - shape: Union[Iterable[Index], Iterable[int]]) -> BlockSparseTensor: - """ - Reshape `tensor` into `shape`. - `reshape` works essentially the same as the dense version, with the - notable exception that the tensor can only be reshaped into a form - compatible with its elementary indices. The elementary indices are - the indices at the leaves of the `Index` objects `tensors.indices`. - For example, while the following reshaping is possible for regular - dense numpy tensor, - ``` - A = np.random.rand(6,6,6) - np.reshape(A, (2,3,6,6)) - ``` - the same code for BlockSparseTensor - ``` - q1 = np.random.randint(0,10,6) - q2 = np.random.randint(0,10,6) - q3 = np.random.randint(0,10,6) - i1 = Index(charges=q1,flow=1) - i2 = Index(charges=q2,flow=-1) - i3 = Index(charges=q3,flow=1) - A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(A.shape) #prints (6,6,6) - reshape(A, (2,3,6,6)) #raises ValueError - ``` - raises a `ValueError` since (2,3,6,6) - is incompatible with the elementary shape (6,6,6) of the tensor. - - Args: - tensor: A symmetric tensor. - shape: The new shape. Can either be a list of `Index` - or a list of `int`. - Returns: - BlockSparseTensor: A new tensor reshaped into `shape` - """ - result = BlockSparseTensor( - data=tensor.data.copy(), indices=[i.copy() for i in tensor.indices]) - result.reshape(shape) - return result diff --git a/tensornetwork/block_tensor/block_tensor_old_test.py b/tensornetwork/block_tensor/block_tensor_old_test.py deleted file mode 100644 index 9f11bec6e..000000000 --- a/tensornetwork/block_tensor/block_tensor_old_test.py +++ /dev/null @@ -1,176 +0,0 @@ -import numpy as np -import pytest -# pylint: disable=line-too-long -from tensornetwork.block_tensor.block_tensor import BlockSparseTensor, compute_num_nonzero, find_sparse_positions, find_dense_positions -from index import Index, fuse_charges - -np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] - - -@pytest.mark.parametrize("dtype", np_dtypes) -def test_block_sparse_init(dtype): - D = 10 #bond dimension - B = 10 #number of blocks - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16) - for _ in range(rank) - ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - num_elements = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) - A = BlockSparseTensor.random(indices=indices, dtype=dtype) - assert A.dtype == dtype - for r in range(rank): - assert A.indices[r].name == 'index{}'.format(r) - assert A.dense_shape == tuple([D] * rank) - assert len(A.data) == num_elements - - -def test_find_dense_positions(): - left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) - right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) - target_charge = 0 - fused_charges = fuse_charges([left_charges, right_charges], [1, 1]) - dense_positions = find_dense_positions(left_charges, 1, right_charges, 1, - target_charge) - np.testing.assert_allclose(dense_positions, - np.nonzero(fused_charges == target_charge)[0]) - - -def test_find_dense_positions_2(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - n1 = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) - row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], - [1 for _ in range(rank // 2)]) - column_charges = fuse_charges( - [indices[n].charges for n in range(rank // 2, rank)], - [1 for _ in range(rank // 2, rank)]) - - i01 = indices[0] * indices[1] - i23 = indices[2] * indices[3] - positions = find_dense_positions(i01.charges, 1, i23.charges, 1, 0) - assert len(positions) == n1 - - -def test_find_sparse_positions(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - n1 = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) - row_charges = fuse_charges([indices[n].charges for n in range(rank // 2)], - [1 for _ in range(rank // 2)]) - column_charges = fuse_charges( - [indices[n].charges for n in range(rank // 2, rank)], - [1 for _ in range(rank // 2, rank)]) - - i01 = indices[0] * indices[1] - i23 = indices[2] * indices[3] - unique_row_charges = np.unique(i01.charges) - unique_column_charges = np.unique(i23.charges) - common_charges = np.intersect1d( - unique_row_charges, -unique_column_charges, assume_unique=True) - blocks = find_sparse_positions( - i01.charges, 1, i23.charges, 1, target_charges=[0]) - assert sum([len(v) for v in blocks.values()]) == n1 - np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) - - -def test_find_sparse_positions_2(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - flows = [1, -1] - - rank = len(flows) - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - i1, i2 = indices - common_charges = np.intersect1d(i1.charges, i2.charges) - row_locations = find_sparse_positions( - left_charges=i1.charges, - left_flow=flows[0], - right_charges=i2.charges, - right_flow=flows[1], - target_charges=common_charges) - fused = (i1 * i2).charges - relevant = fused[np.isin(fused, common_charges)] - for k, v in row_locations.items(): - np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) - - -def test_get_diagonal_blocks(): - D = 40 #bond dimension - B = 4 #number of blocks - dtype = np.int16 #the dtype of the quantum numbers - rank = 4 - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [ - np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - for _ in range(rank) - ] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - common_charges = np.intersect1d(indices[0].charges, indices[1].charges) - row_locations = find_sparse_positions( - left_charges=indices[0].charges, - left_flow=1, - right_charges=indices[1].charges, - right_flow=1, - target_charges=common_charges) - - -def test_dense_transpose(): - Ds = [10, 11, 12] #bond dimension - rank = len(Ds) - flows = np.asarray([1 for _ in range(rank)]) - flows[-2::] = -1 - charges = [np.zeros(Ds[n], dtype=np.int16) for n in range(rank)] - indices = [ - Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) - for n in range(rank) - ] - A = BlockSparseTensor.random(indices=indices, dtype=np.float64) - B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) - A.transpose((1, 0, 2)) - np.testing.assert_allclose(A.data, B.flat) diff --git a/tensornetwork/block_tensor/chargebkp.py b/tensornetwork/block_tensor/chargebkp.py deleted file mode 100644 index 46b9ea4d1..000000000 --- a/tensornetwork/block_tensor/chargebkp.py +++ /dev/null @@ -1,1040 +0,0 @@ -# Copyright 2019 The TensorNetwork Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import numpy as np -from tensornetwork.network_components import Node, contract, contract_between -# pylint: disable=line-too-long -from tensornetwork.backends import backend_factory -import copy -import warnings -from typing import List, Union, Any, Optional, Tuple, Text, Iterable, Type - - -def _copy_charges(charges): - cs = [] - for n in range(len(charges)): - c = type(charges[n]).__new__(type( - charges[n])) #create a new charge object of type type(other) - c.__init__(charges[n].charges.copy()) - cs.append(c) - return cs - - -class BaseCharge: - """ - Base class for fundamental charges (i.e. for symmetries that - are not products of smaller groups) - """ - - def __init__(self, - charges: Optional[Union[List[np.ndarray], np.ndarray]] = None, - shifts: Optional[Union[List[int], np.ndarray]] = None) -> None: - """ - Initialize a BaseCharge object. - Args: - charges: Optional `np.ndarray` or list of `np.ndarray` of type `int` holdingn - the physical charges. If a list of `np,ndarray` is passed, the arrays are merged - into a single `np.ndarray` by `np.left_shift`-ing and adding up charges. The amount - of left-shift per `np,ndarray` is determined by its `dtype`. E.g. an `np,ndarray` of - `dtype=np.int16` is shifted by 16 bits. Charges are shifted and added moving from - small to large indices in `charges`. `BaseCharge` can hold at most 8 individual - charges of `dtype=np.int8` on 64-bit architectures. - shifts: An optional list of shifts, used for initializing a `BaseCharge` object from - an existing `BaseCharge` object. - """ - if charges is not None: - if isinstance(charges, np.ndarray): - charges = [charges] - self._itemsizes = [c.dtype.itemsize for c in charges] - if np.sum(self._itemsizes) > 8: - raise TypeError("number of bits required to store all charges " - "in a single int is larger than 64") - - if len(charges) > 1: - if shifts is not None: - raise ValueError("If `shifts` is passed, only a single charge array " - "can be passed. Got len(charges) = {}".format( - len(charges))) - if shifts is None: - dtype = np.int8 - if np.sum(self._itemsizes) > 1: - dtype = np.int16 - if np.sum(self._itemsizes) > 2: - dtype = np.int32 - if np.sum(self._itemsizes) > 4: - dtype = np.int64 - #multiply by eight to get number of bits - self.shifts = 8 * np.flip( - np.append(0, np.cumsum(np.flip( - self._itemsizes[1::])))).astype(dtype) - dtype_charges = [c.astype(dtype) for c in charges] - self.charges = np.sum([ - np.left_shift(dtype_charges[n], self.shifts[n]) - for n in range(len(dtype_charges)) - ], - axis=0).astype(dtype) - else: - if np.max(shifts) >= charges[0].dtype.itemsize * 8: - raise TypeError("shifts {} are incompatible with dtype {}".format( - shifts, charges[0].dtype)) - self.shifts = np.asarray(shifts) - self.charges = charges[0] - else: - self.charges = np.asarray([]) - self.shifts = np.asarray([]) - - def __add__(self, other: "BaseCharge") -> "BaseCharge": - """ - Fuse the charges of two `BaseCharge` objects and return a new - `BaseCharge` holding the result. - Args: - other: A `BaseChare` object. - Returns: - BaseCharge: The result of fusing `self` with `other`. - """ - raise NotImplementedError("`__add__` is not implemented for `BaseCharge`") - - def __sub__(self, other: "BaseCharge") -> "BaseCharge": - """ - Subtract the charges of `other` from `self. - Returns a `BaseCharge` holding the result. - Args: - other: A `BaseChare` object. - Returns: - BaseCharge: The result subtracting `other` from `self`. - """ - - raise NotImplementedError("`__sub__` is not implemented for `BaseCharge`") - - def __matmul__(self, other: "BaseCharge") -> "BaseCharge": - """ - Build the direct product of two charges and return - it in a new `BaseCharge` object. - Args: - other: A `BaseCharge` object. - Returns: - BaseCharge: The direct product of `self` and `other`. - """ - raise NotImplementedError( - "`__matmul__` is not implemented for `BaseCharge`") - - def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Return the charge-element at position `n`. - Args: - n: An integer or `np.ndarray`. - Returns: - np.ndarray: The charges at `n`. - """ - return self.charges[n] - - def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Return the charge-element at position `n`. - Needed to provide a common interface with `ChargeCollection`. - Args: - n: An integer or `np.ndarray`. - Returns: - np.ndarray: The charges at `n`. - - """ - - return self.get_item(n) - - def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": - """ - Return the charge-element at position `n`, wrapped into a `BaseCharge` - object. - Args: - n: An integer or `np.ndarray`. - Returns: - BaseCharge: The charges at `n`. - """ - - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - charges = self.charges[n] - obj = self.__new__(type(self)) - obj.__init__(charges=[charges], shifts=self.shifts) - return obj - - @property - def num_symmetries(self): - """ - The number of individual symmetries stored in this object. - """ - return len(self.shifts) - - def __len__(self) -> int: - return np.prod(self.charges.shape) - - def __repr__(self): - return str(type(self)) + '\nshifts: ' + self.shifts.__repr__( - ) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' - - @property - def dual_charges(self) -> np.ndarray: - raise NotImplementedError( - "`dual_charges` is not implemented for `BaseCharge`") - - def __mul__(self, number: Union[bool, int]) -> "BaseCharge": - """ - Multiply `self` with `number` from the left. - `number` can take values in `1,-1, 0, True, False`. - This multiplication is used to transform between charges and dual-charges. - Args: - number: Can can take values in `1,-1, 0, True, False`. - If `1,True`, return the original object - If `-1, 0, False` return a new `BaseCharge` holding the - dual-charges. - Returns: - BaseCharge: The result of `self * number` - """ - raise NotImplementedError("`__mul__` is not implemented for `BaseCharge`") - - def __rmul__(self, number: Union[bool, int]) -> "BaseCharge": - """ - Multiply `self` with `number` from the right. - `number` can take values in `1,-1, 0, True, False`. - This multiplication is used to transform between charges and dual-charges. - Args: - number: Can can take values in `1,-1, 0, True, False`. - If `1,True`, return the original object - If `-1, 0, False` return a new `BaseCharge` holding the - dual-charges. - Returns: - BaseCharge: The result of `number * self`. - """ - - raise NotImplementedError("`__rmul__` is not implemented for `BaseCharge`") - - @property - def dtype(self): - return self.charges.dtype - - def unique(self, - return_index=False, - return_inverse=False, - return_counts=False - ) -> Tuple["BaseCharge", np.ndarray, np.ndarray, np.ndarray]: - """ - Compute the unique charges in `BaseCharge`. - See np.unique for a more detailed explanation. This function - does the same but instead of a np.ndarray, it returns the unique - elements in a `BaseCharge` object. - Args: - return_index: If `True`, also return the indices of `self.charges` (along the specified axis, - if provided, or in the flattened array) that result in the unique array. - return_inverse: If `True`, also return the indices of the unique array (for the specified - axis, if provided) that can be used to reconstruct `self.charges`. - return_counts: If `True`, also return the number of times each unique item appears - in `self.charges`. - Returns: - BaseCharge: The sorted unique values. - np.ndarray: The indices of the first occurrences of the unique values in the - original array. Only provided if `return_index` is True. - np.ndarray: The indices to reconstruct the original array from the - unique array. Only provided if `return_inverse` is True. - np.ndarray: The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - """ - result = np.unique( - self.charges, - return_index=return_index, - return_inverse=return_inverse, - return_counts=return_counts) - if not (return_index or return_inverse or return_counts): - out = self.__new__(type(self)) - out.__init__([result], self.shifts) - return out - else: - out = self.__new__(type(self)) - out.__init__([result[0]], self.shifts) - return tuple([out] + [result[n] for n in range(1, len(result))]) - - def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: - """ - Test each element of `BaseCharge` if it is in `targets`. Returns - an `np.ndarray` of `dtype=bool`. - Args: - targets: The test elements - Returns: - np.ndarray: An array of `bool` type holding the result of the comparison. - """ - if isinstance(targets, type(self)): - if not np.all(self.shifts == targets.shifts): - raise ValueError( - "Cannot compare charges with different shifts {} and {}".format( - self.shifts, targets.shifts)) - - targets = targets.charges - targets = np.asarray(targets) - return np.isin(self.charges, targets) - - def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: - """ - Test each element of `BaseCharge` if it is in `targets`. Returns - an `np.ndarray` of `dtype=bool`. - Args: - targets: The test elements - Returns: - np.ndarray: An array of `bool` type holding the result of the comparison. - """ - - if isinstance(target, type(self)): - if not np.all(self.shifts == target.shifts): - raise ValueError( - "Cannot compare charges with different shifts {} and {}".format( - self.shifts, tparget.shifts)) - target = target.charges - target = np.asarray(target) - return target in self.charges - - def equals(self, target_charges: Iterable) -> np.ndarray: - """ - Find indices where `BaseCharge` equals `target_charges`. - `target_charges` has to be an array of the same lenghts - as `BaseCharge.shifts`, containing one integer per symmetry of - `BaseCharge` - Args: - target_charges: np.ndarray of integers encoding charges. - Returns: - np.ndarray: Boolean array with `True` where `BaseCharge` equals - `target_charges` and `False` everywhere else. - """ - if len(target_charges) != len(self.shifts): - raise ValueError("len(target_charges) = {} is different " - "from len(shifts) = {}".format( - len(target_charges), len(self.shifts))) - _target_charges = np.asarray(target_charges).astype(self.charges.dtype) - target = np.sum([ - np.left_shift(_target_charges[n], self.shifts[n]) - for n in range(len(self.shifts)) - ]) - return self.charges == target - - def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: - """ - Find indices where `BaseCharge` equals `target_charges`. - `target` is a single integer encoding all symmetries of - `BaseCharge` - Args: - target: integerger encoding charges. - Returns: - np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals - `target` and `False` everywhere else. - """ - if isinstance(target, type(self)): - return np.squeeze( - np.expand_dims(self.charges, 1) == np.expand_dims(target.charges, 0)) - return np.squeeze( - np.expand_dims(self.charges, 1) == np.expand_dims( - np.asarray(target), 0)) - - def concatenate(self, others: Union["BaseCharge", List["BaseCharge"]]): - """ - Concatenate `self.charges` with `others.charges`. - Args: - others: List of `BaseCharge` objects. - Returns: - BaseCharge: The concatenated charges. - """ - if isinstance(others, type(self)): - others = [others] - for o in others: - if not np.all(self.shifts == o.shifts): - raise ValueError( - "Cannot fuse charges with different shifts {} and {}".format( - self.shifts, o.shifts)) - - charges = np.concatenate( - [self.charges] + [o.charges for o in others], axis=0) - out = self.__new__(type(self)) - out.__init__([charges], self.shifts) - return out - - @property - def dtype(self): - return self.charges.dtype - - @property - def zero_charge(self): - obj = self.__new__(type(self)) - obj.__init__(charges=[np.asarray([self.dtype.type(0)])], shifts=self.shifts) - return obj - - def __iter__(self): - return iter(self.charges) - - def intersect(self, - other: "BaseCharge", - return_indices: Optional[bool] = False) -> "BaseCharge": - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot intersect charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if return_indices: - charges, comm1, comm2 = np.intersect1d( - self.charges, other.charges, return_indices=return_indices) - else: - charges = np.intersect1d(self.charges, other.charges) - - obj = self.__new__(type(self)) - obj.__init__(charges=[charges], shifts=self.shifts) - if return_indices: - return obj, comm1, comm2 - return obj - - -class U1Charge(BaseCharge): - """ - A simple charge class for a single U1 symmetry. - This class can store multiple U1 charges in a single - np.ndarray of integer dtype. Depending on the dtype of - the individual symmetries, this class can store: - * 8 np.int8 - * 4 np.int16 - * 2 np.int32 - * 1 np.int64 - or any suitable combination of dtypes, such that their - bite-sum remains below 64. - """ - - def __init__(self, - charges: List[np.ndarray], - shifts: Optional[np.ndarray] = None) -> None: - super().__init__(charges=charges, shifts=shifts) - - def __add__(self, other: "U1Charge") -> "U1Charge": - """ - Fuse the charges of `self` with charges of `other`, and - return a new `U1Charge` object holding the result. - Args: - other: A `U1Charge` object. - Returns: - U1Charge: The result of fusing `self` with `other`. - """ - if self.num_symmetries != other.num_symmetries: - raise ValueError( - "cannot fuse charges with different number of symmetries") - - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse U1-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, U1Charge): - raise TypeError( - "can only add objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - fused = np.reshape(self.charges[:, None] + other.charges[None, :], - len(self.charges) * len(other.charges)) - return U1Charge(charges=[fused], shifts=self.shifts) - - def __sub__(self, other: "U1Charge") -> "U1Charge": - """ - Subtract the charges of `other` from charges of `self` and - return a new `U1Charge` object holding the result. - Args: - other: A `U1Charge` object. - Returns: - U1Charge: The result of fusing `self` with `other`. - """ - if self.num_symmetries != other.num_symmetries: - raise ValueError( - "cannot fuse charges with different number of symmetries") - - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse U1-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, U1Charge): - raise TypeError( - "can only subtract objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - fused = np.reshape(self.charges[:, None] - other.charges[None, :], - len(self.charges) * len(other.charges)) - return U1Charge(charges=[fused], shifts=self.shifts) - - def __matmul__(self, other: Union["U1Charge", "U1Charge"]) -> "U1Charge": - itemsize = np.sum(self._itemsizes + other._itemsizes) - if itemsize > 8: - raise TypeError("Number of bits required to store all charges " - "in a single int is larger than 64") - dtype = np.int16 #need at least np.int16 to store two charges - if itemsize > 2: - dtype = np.int32 - if itemsize > 4: - dtype = np.int64 - - charges = np.left_shift( - self.charges.astype(dtype), - 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) - - shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) - return U1Charge(charges=[charges], shifts=shifts) - - def __mul__(self, number: Union[bool, int]) -> "U1Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - #outflowing charges - if number in (0, False, -1): - charges = self.dtype.type(-1) * self.charges - shifts = self.shifts - return U1Charge(charges=[charges], shifts=shifts) - #inflowing charges - if number in (1, True): - #Note: the returned U1Charge shares its data with self - return U1Charge(charges=[self.charges], shifts=self.shifts) - - # def __rmul__(self, number: Union[bool, int]) -> "U1Charge": - # raise - # print(number not in (True, False, 0, 1, -1)) - # if number not in (True, False, 0, 1, -1): - # raise ValueError( - # "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - # number)) - # return self.__mul__(number) - - @property - def dual_charges(self) -> np.ndarray: - #the dual of a U1 charge is its negative value - return self.charges * self.dtype.type(-1) - - -class Z2Charge(BaseCharge): - """ - A simple charge class for Z2 symmetries. - """ - - def __init__(self, - charges: List[np.ndarray], - shifts: Optional[np.ndarray] = None) -> None: - if isinstance(charges, np.ndarray): - charges = [charges] - - if shifts is None: - itemsizes = [c.dtype.itemsize for c in charges] - if not np.all([i == 1 for i in itemsizes]): - # martin: This error could come back at us, but I'll leave it for now - warnings.warn( - "Z2 charges can be entirely stored in " - "np.int8, but found dtypes = {}. Converting to np.int8.".format( - [c.dtype for c in charges])) - - charges = [c.astype(np.int8) for c in charges] - - super().__init__(charges, shifts) - - def __add__(self, other: "Z2Charge") -> "Z2Charge": - """ - Fuse the charges of `self` with charges of `other`, and - return a new `Z2Charge` object holding the result. - Args: - other: A `Z2Charge` object. - Returns: - Z2Charge: The result of fusing `self` with `other`. - """ - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse Z2-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, Z2Charge): - raise TypeError( - "can only add objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - fused = np.reshape( - np.bitwise_xor(self.charges[:, None], other.charges[None, :]), - len(self.charges) * len(other.charges)) - - return Z2Charge(charges=[fused], shifts=self.shifts) - - def __sub__(self, other: "Z2Charge") -> "Z2Charge": - """ - Subtract charges of `other` from charges of `self` and - return a new `Z2Charge` object holding the result. - Note that ofr Z2 charges, subtraction and addition are identical - Args: - other: A `Z2Charge` object. - Returns: - Z2Charge: The result of fusing `self` with `other`. - """ - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse Z2-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, Z2Charge): - raise TypeError( - "can only subtract objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - return self.__add__(other) - - def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": - itemsize = np.sum(self._itemsizes + other._itemsizes) - if itemsize > 8: - raise TypeError("Number of bits required to store all charges " - "in a single int is larger than 64") - dtype = np.int16 #need at least np.int16 to store two charges - if itemsize > 2: - dtype = np.int32 - if itemsize > 4: - dtype = np.int64 - - charges = np.left_shift( - self.charges.astype(dtype), - 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) - - shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) - return Z2Charge(charges=[charges], shifts=shifts) - - def __mul__(self, number: Union[bool, int]) -> "Z2Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - #Z2 is self-dual - return Z2Charge(charges=[self.charges], shifts=self.shifts) - - def __rmul__(self, number: Union[bool, int]) -> "Z2Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - - return self.__mul__(number) - - @property - def dual_charges(self): - #Z2 charges are self-dual - return self.charges - - def equals(self, target_charges: Iterable) -> np.ndarray: - if not np.all(np.isin(target_charges, np.asarray([0, 1]))): - raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( - np.unique(target_charges))) - return super().equals(target_charges) - - -class ChargeCollection: - """ - - """ - - class Iterator: - - def __init__(self, data: np.ndarray): - self.n = 0 - self.data = data - - def __next__(self): - if self.n < self.data.shape[0]: - result = self.data[self.n, :] - self.n += 1 - return tuple(result) #this makes a copy! - else: - raise StopIteration - - def __init__(self, - charges: List[BaseCharge], - shifts: Optional[List[np.ndarray]] = None, - stacked_charges: Optional[np.ndarray] = None) -> None: - if not isinstance(charges, list): - raise TypeError("only list allowed for argument `charges` " - "in BaseCharge.__init__(charges)") - if (shifts is not None) and (stacked_charges is None): - raise ValueError( - "Found `shifts == None` and `stacked_charges != None`." - "`shifts` and `stacked_charges` can only be passed together.") - if (shifts is None) and (stacked_charges is not None): - raise ValueError( - "Found `shifts != None` and `stacked_charges == None`." - "`shifts` and `stacked_charges` can only be passed together.") - self.charges = [] - if stacked_charges is None: - if not np.all([len(c) == len(charges[0]) for c in charges]): - raise ValueError("not all charges have the same length. " - "Got lengths = {}".format([len(c) for c in charges])) - for n in range(len(charges)): - if not isinstance(charges[n], BaseCharge): - raise TypeError( - "`ChargeCollection` can only be initialized " - "with a list of `BaseCharge`. Found {} instead".format( - [type(charges[n]) for n in range(len(charges))])) - - self._stacked_charges = np.stack([c.charges for c in charges], axis=1) - for n in range(len(charges)): - charge = charges[n].__new__(type(charges[n])) - charge.__init__(self._stacked_charges[:, n], shifts=charges[n].shifts) - self.charges.append(charge) - else: - if len(shifts) != stacked_charges.shape[1]: - raise ValueError("`len(shifts)` = {} is different from " - "`stacked_charges.shape[1]` = {}".format( - len(shifts), stacked_charges.shape[1])) - - if stacked_charges.shape[1] != len(charges): - raise ValueError("`len(charges) and shape[1] of `stacked_charges` " - "have to be the same.") - for n in range(len(charges)): - charge = charges[n].__new__(type(charges[n])) - charge.__init__(stacked_charges[:, n], shifts=shifts[n]) - self.charges.append(charge) - self._stacked_charges = stacked_charges - - @classmethod - def from_charge_types(cls, charge_types: Type, shifts: List[np.ndarray], - stacked_charges: np.ndarray): - if len(charge_types) != stacked_charges.shape[1]: - raise ValueError("`len(charge_types) and shape[1] of `stacked_charges` " - "have to be the same.") - if len(charge_types) != len(shifts): - raise ValueError( - "`len(charge_types) and `len(shifts)` have to be the same.") - charges = [ - charge_types[n].__new__(charge_types[n]) - for n in range(len(charge_types)) - ] - return cls(charges=charges, stacked_charges=stacked_charges, shifts=shifts) - - @property - def num_charges(self) -> int: - """ - Return the number of different charges in `ChargeCollection`. - """ - return self._stacked_charges.shape[1] - - def get_item(self, n: int) -> Tuple: - """ - Returns the `n-th` charge-tuple of ChargeCollection in a tuple. - """ - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - return tuple(self._stacked_charges[n, :].flat) - - def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Returns the `n-th` charge-tuples of ChargeCollection in an np.ndarray. - """ - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - return self._stacked_charges[n, :] - - def __getitem__(self, n: Union[np.ndarray, int]) -> "ChargeCollection": - - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - - array = self._stacked_charges[n, :] - - return self.from_charge_types( - charge_types=[type(c) for c in self.charges], - shifts=[c.shifts for c in self.charges], - stacked_charges=array) - # if self.num_charges == 1: - # array = np.expand_dims(array, 0) - - # if len(array.shape) == 2: - # if array.shape[1] == 1: - # array = np.squeeze(array, axis=1) - # if len(array.shape) == 0: - # array = np.asarray([array]) - - # charges = [] - # if np.prod(array.shape) == 0: - # for n in range(len(self.charges)): - # charge = self.charges[n].__new__(type(self.charges[n])) - # charge.__init__( - # charges=[np.empty(0, dtype=self.charges[n].dtype)], - # shifts=self.charges[n].shifts) - # charges.append(charge) - - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj - - # if len(array.shape) == 1: - # array = np.expand_dims(array, 1) - - # for m in range(len(self.charges)): - # charge = self.charges[m].__new__(type(self.charges[m])) - # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) - # charges.append(charge) - - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj - - def __iter__(self): - return self.Iterator(self._stacked_charges) - - def __add__(self, other: "Charge") -> "Charge": - """ - Fuse `self` with `other`. - Args: - other: A `ChargeCollection` object. - Returns: - Charge: The result of fusing `self` with `other`. - """ - return ChargeCollection( - [c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) - - def __sub__(self, other: "Charge") -> "Charge": - """ - Subtract `other` from `self`. - Args: - other: A `ChargeCollection` object. - Returns: - Charge: The result of fusing `self` with `other`. - """ - return ChargeCollection( - [c1 - c2 for c1, c2 in zip(self.charges, other.charges)]) - - def __repr__(self): - text = str(type(self)) + '\n ' - for n in range(len(self.charges)): - tmp = self.charges[n].__repr__() - tmp = tmp.replace('\n', '\n\t') - text += (tmp + '\n') - return text - - def __len__(self): - return len(self.charges[0]) - - def __mul__(self, number: Union[bool, int]) -> "Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - return ChargeCollection(charges=[c * number for c in self.charges]) - - def __rmul__(self, number: Union[bool, int]) -> "Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - - return self.__mul__(number) - - def isin(self, targets: Union[Iterable, "ChargeCollection"]): - if isinstance(targets, type(self)): - _targets = [t for t in targets] - return np.logical_or.reduce([ - np.logical_and.reduce([ - np.isin(self._stacked_charges[:, n], _targets[m][n]) - for n in range(len(_targets[m])) - ]) - for m in range(len(_targets)) - ]) - - def __contains__(self, targets: Union[Iterable, "ChargeCollection"]): - if isinstance(targets, type(self)): - if len(targets) > 1: - raise ValueError( - '__contains__ expects a single input, found {} inputs'.format( - len(targets))) - - _targets = targets.get_item(0) - return np.any( - np.logical_and.reduce([ - np.isin(self._stacked_charges[:, n], _targets[n]) - for n in range(len(_targets)) - ])) - - def unique( - self, - return_index=False, - return_inverse=False, - return_counts=False, - ) -> Tuple["ChargeCollection", np.ndarray, np.ndarray, np.ndarray]: - """ - Compute the unique charges in `BaseCharge`. - See np.unique for a more detailed explanation. This function - does the same but instead of a np.ndarray, it returns the unique - elements in a `BaseCharge` object. - Args: - return_index: If `True`, also return the indices of `self.charges` (along the specified axis, - if provided, or in the flattened array) that result in the unique array. - return_inverse: If `True`, also return the indices of the unique array (for the specified - axis, if provided) that can be used to reconstruct `self.charges`. - return_counts: If `True`, also return the number of times each unique item appears - in `self.charges`. - Returns: - BaseCharge: The sorted unique values. - np.ndarray: The indices of the first occurrences of the unique values in the - original array. Only provided if `return_index` is True. - np.ndarray: The indices to reconstruct the original array from the - unique array. Only provided if `return_inverse` is True. - np.ndarray: The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - """ - - result = np.unique( - np.stack([self.charges[n].charges for n in range(len(self.charges))], - axis=1), - return_index=return_index, - return_inverse=return_inverse, - return_counts=return_counts, - axis=0) - charges = [] - if not (return_index or return_inverse or return_counts): - for n in range(len(self.charges)): - obj = self.charges[n].__new__(type(self.charges[n])) - obj.__init__(charges=[result[:, n]], shifts=self.charges[n].shifts) - charges.append(obj) - return ChargeCollection(charges) - for n in range(len(self.charges)): - obj = self.charges[n].__new__(type(self.charges[n])) - obj.__init__(charges=[result[0][:, n]], shifts=self.charges[n].shifts) - charges.append(obj) - out = ChargeCollection(charges) - return tuple([out] + [result[n] for n in range(1, len(result))]) - - def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: - if len(target_charges) != len(self.charges): - raise ValueError( - "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" - .format(len(target_charges), len(self.charges))) - return np.logical_and.reduce([ - self.charges[n].equals(target_charges[n]) - for n in range(len(target_charges)) - ]) - - def __eq__(self, target_charges: Iterable): - raise NotImplementedError() - if isinstance(target_charges, type(self)): - target_charges = np.stack([c.charges for c in target_charges.charges], - axis=1) - target_charges = np.asarray(target_charges) - if target_charges.ndim == 1: - target_charges = np.expand_dims(target_charges, 0) - if target_charges.shape[1] != len(self.charges): - raise ValueError( - "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" - .format(len(target_charges), len(self.charges))) - return np.logical_and.reduce( - self._stacked_charges == target_charges, axis=1) - - def concatenate(self, - others: Union["ChargeCollection", List["ChargeCollection"]]): - """ - Concatenate `self.charges` with `others.charges`. - Args: - others: List of `BaseCharge` objects. - Returns: - BaseCharge: The concatenated charges. - """ - if isinstance(others, type(self)): - others = [others] - - charges = [ - self.charges[n].concatenate([o.charges[n] - for o in others]) - for n in range(len(self.charges)) - ] - return ChargeCollection(charges) - - @property - def dtype(self): - return np.result_type(*[c.dtype for c in self.charges]) - - @property - def zero_charge(self): - obj = self.__new__(type(self)) - obj.__init__(charges=[c.zero_charge for c in self.charges]) - return obj - - def intersect(self, - other: "ChargeCollection", - return_indices: Optional[bool] = False) -> "ChargeCollection": - if return_indices: - ua, ia = self.unique(return_index=True) - ub, ib = other.unique(return_index=True) - conc = ua.concatenate(ub) - uab, iab, cntab = conc.unique(return_index=True, return_counts=True) - intersection = uab[cntab == 2] - comm1 = np.argmax( - np.logical_and.reduce( - np.repeat( - np.expand_dims(self._stacked_charges, 2), - intersection._stacked_charges.shape[0], - axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), - axis=1), - axis=0) - comm2 = np.argmax( - np.logical_and.reduce( - np.repeat( - np.expand_dims(other._stacked_charges, 2), - intersection._stacked_charges.shape[0], - axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), - axis=1), - axis=0) - return intersection, comm1, comm2 - - else: - self_unique = self.unique() - other_unique = other.unique() - concatenated = self_unique.concatenate(other_unique) - tmp_unique, counts = concatenated.unique(return_counts=True) - return tmp_unique[counts == 2] - - -def fuse_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: - """ - Fuse all `charges` into a new charge. - Charges are fused from "right to left", - in accordance with row-major order. - - Args: - charges: A list of charges to be fused. - flows: A list of flows, one for each element in `charges`. - Returns: - ChargeCollection: The result of fusing `charges`. - """ - if len(charges) != len(flows): - raise ValueError( - "`charges` and `flows` are of unequal lengths {} != {}".format( - len(charges), len(flows))) - fused_charges = charges[0] * flows[0] - for n in range(1, len(charges)): - fused_charges = fused_charges + charges[n] * flows[n] - return fused_charges - - -def fuse_degeneracies(degen1: Union[List, np.ndarray], - degen2: Union[List, np.ndarray]) -> np.ndarray: - """ - Fuse degeneracies `degen1` and `degen2` of two leg-charges - by simple kronecker product. `degen1` and `degen2` typically belong to two - consecutive legs of `BlockSparseTensor`. - Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns - `[10, 100, 20, 200, 30, 300]`. - When using row-major ordering of indices in `BlockSparseTensor`, - the position of `degen1` should be "to the left" of the position of `degen2`. - Args: - degen1: Iterable of integers - degen2: Iterable of integers - Returns: - np.ndarray: The result of fusing `dege1` with `degen2`. - """ - return np.reshape(degen1[:, None] * degen2[None, :], - len(degen1) * len(degen2)) diff --git a/tensornetwork/block_tensor/chargebkp2.py b/tensornetwork/block_tensor/chargebkp2.py deleted file mode 100644 index 9be4be39b..000000000 --- a/tensornetwork/block_tensor/chargebkp2.py +++ /dev/null @@ -1,778 +0,0 @@ -# Copyright 2019 The TensorNetwork Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import numpy as np -from tensornetwork.network_components import Node, contract, contract_between -# pylint: disable=line-too-long -from tensornetwork.backends import backend_factory -import copy -import warnings -from typing import List, Union, Any, Optional, Tuple, Text, Iterable, Type - - -class BaseCharge: - - def __init__(self, - charges: np.ndarray, - charge_labels: Optional[np.ndarray] = None) -> None: - if charges.dtype is not np.int16: - raise TypeError("`charges` have to be of dtype `np.int16`") - if charge_labels.dtype is not np.int16: - raise TypeError("`charge_labels` have to be of dtype `np.int16`") - - if charge_labels is None: - self.unique_charges, charge_labels = np.unique( - charges, return_inverse=True) - self.charge_labels = charge_labels.astype(np.uint16) - - else: - self.unique_charges = charges - self.charge_labels = charge_labels.astype(np.uint16) - - def __add__(self, other: "BaseCharge") -> "BaseCharge": - # fuse the unique charges from each index, then compute new unique charges - comb_qnums = self.fuse(self.unique_charges, other.unique_charges) - [unique_charges, new_labels] = np.unique(comb_qnums, return_inverse=True) - new_labels = new_labels.reshape( - len(self.unique_charges), len(other.unique_charges)).astype(np.uint16) - - # find new labels using broadcasting (could use np.tile but less efficient) - charge_labels = new_labels[( - self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.uint16) - ).ravel(), (other.charge_labels[None, :] + - np.zeros([len(self), 1], dtype=np.uint16)).ravel()] - obj = self.__new__(type(self)) - obj.__init__(unique_charges, charge_labels) - return obj - - def __len__(self): - return len(self.charge_labels) - - @property - def charges(self) -> np.ndarray: - return self.unique_charges[self.charge_labels] - - @property - def dtype(self): - return self.unique_charges.dtype - - def __repr__(self): - return str(type(self)) + '\n' + 'charges: ' + self.charges.__repr__() + '\n' - - def unique(self, - return_index=False, - return_inverse=False, - return_counts=False - ) -> Tuple["BaseCharge", np.ndarray, np.ndarray, np.ndarray]: - """ - Compute the unique charges in `BaseCharge`. - See np.unique for a more detailed explanation. This function - does the same but instead of a np.ndarray, it returns the unique - elements in a `BaseCharge` object. - Args: - return_index: If `True`, also return the indices of `self.charges` (along the specified axis, - if provided, or in the flattened array) that result in the unique array. - return_inverse: If `True`, also return the indices of the unique array (for the specified - axis, if provided) that can be used to reconstruct `self.charges`. - return_counts: If `True`, also return the number of times each unique item appears - in `self.charges`. - Returns: - BaseCharge: The sorted unique values. - np.ndarray: The indices of the first occurrences of the unique values in the - original array. Only provided if `return_index` is True. - np.ndarray: The indices to reconstruct the original array from the - unique array. Only provided if `return_inverse` is True. - np.ndarray: The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - """ - obj = self.__new__(type(self)) - obj.__init__( - self.unique_charges, - charge_labels=np.arange(len(self.unique_charges), dtype=np.uint16)) - - out = [obj] - if return_index: - _, index = np.unique(self.charge_labels, return_index=True) - out.append(index) - if return_inverse: - out.append(self.charge_labels) - if return_counts: - _, cnts = np.unique(self.charge_labels, return_counts=True) - out.append(cnts) - if len(out) == 1: - return out[0] - if len(out) == 2: - return out[0], out[1] - if len(out) == 3: - return out[0], out[1], out[2] - - def isin(self, targets: Union[int, Iterable, "BaseCharge"]) -> np.ndarray: - """ - Test each element of `BaseCharge` if it is in `targets`. Returns - an `np.ndarray` of `dtype=bool`. - Args: - targets: The test elements - Returns: - np.ndarray: An array of `bool` type holding the result of the comparison. - """ - if isinstance(targets, type(self)): - targets = targets.unique_charges - targets = np.asarray(targets) - common, label_to_unique, label_to_targets = np.intersect1d( - self.unique_charges, targets, return_indices=True) - if len(common) == 0: - return np.full(len(self.charge_labels), fill_value=False, dtype=np.bool) - return np.isin(self.charge_labels, label_to_unique) - - def __contains__(self, target: Union[int, Iterable, "BaseCharge"]) -> bool: - """ - """ - - if isinstance(target, type(self)): - target = target.unique_charges - target = np.asarray(target) - return target in self.unique_charges - - def __eq__(self, target: Union[int, Iterable]) -> np.ndarray: - """ - Find indices where `BaseCharge` equals `target_charges`. - `target` is a single integer encoding all symmetries of - `BaseCharge` - Args: - target: integerger encoding charges. - Returns: - np.ndarray: Boolean array with `True` where `BaseCharge.charges` equals - `target` and `False` everywhere else. - """ - if isinstance(target, type(self)): - target = target.charges - elif isinstance(target, (np.integer, int)): - target = np.asarray([target]) - target = np.asarray(target) - tmp = np.full(len(target), fill_value=-1, dtype=np.int16) - - _, label_to_unique, label_to_target = np.intersect1d( - self.unique_charges, target, return_indices=True) - tmp[label_to_target] = label_to_unique - return np.squeeze( - np.expand_dims(self.charge_labels, 1) == np.expand_dims(tmp, 0)) - - @property - def zero_charge(self): - obj = self.__new__(type(self)) - obj.__init__( - np.asarray([self.dtype.type(0)]), np.asarray([0], dtype=np.uint16)) - return obj - - def __iter__(self): - return iter(self.charges) - - def intersect(self, - other: "BaseCharge", - return_indices: Optional[bool] = False) -> "BaseCharge": - if return_indices: - charges, comm1, comm2 = np.intersect1d( - self.charges, other.charges, return_indices=return_indices) - else: - charges = np.intersect1d(self.charges, other.charges) - - obj = self.__new__(type(self)) - obj.__init__(charges, np.arange(len(charges), dtype=np.uint16)) - if return_indices: - return obj, comm1.astype(np.uint16), comm2.astype(np.uint16) - return obj - - def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": - """ - Return the charge-element at position `n`, wrapped into a `BaseCharge` - object. - Args: - n: An integer or `np.ndarray`. - Returns: - BaseCharge: The charges at `n`. - """ - - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - obj = self.__new__(type(self)) - obj.__init__(self.unique_charges, self.charge_labels[n]) - return obj - - def get_item(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Return the charge-element at position `n`. - Args: - n: An integer or `np.ndarray`. - Returns: - np.ndarray: The charges at `n`. - """ - return self.charges[n] - - def __mul__(self, number: Union[bool, int]) -> "U1Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - #outflowing charges - if number in (0, False, -1): - return U1Charge( - self.dual_charges(self.unique_charges), self.charge_labels) - #inflowing charges - if number in (1, True): - return U1Charge(self.unique_charges, self.charge_labels) - - @property - def dual(self, charges): - return self.dual_charges - - -class U1Charge(BaseCharge): - - def __init__(self, - charges: np.ndarray, - charge_labels: Optional[np.ndarray] = None) -> None: - super().__init__(charges, charge_labels) - - @staticmethod - def fuse(charge1, charge2): - return np.add.outer(charge1, charge2).ravel() - - @staticmethod - def dual_charges(charges): - return charges * charges.dtype.type(-1) - - -class Z2Charge(BaseCharge): - """ - A simple charge class for Z2 symmetries. - """ - - def __init__(self, - charges: List[np.ndarray], - shifts: Optional[np.ndarray] = None) -> None: - if isinstance(charges, np.ndarray): - charges = [charges] - - if shifts is None: - itemsizes = [c.dtype.itemsize for c in charges] - if not np.all([i == 1 for i in itemsizes]): - # martin: This error could come back at us, but I'll leave it for now - warnings.warn( - "Z2 charges can be entirely stored in " - "np.int8, but found dtypes = {}. Converting to np.int8.".format( - [c.dtype for c in charges])) - - charges = [c.astype(np.int8) for c in charges] - - super().__init__(charges, shifts) - - def __add__(self, other: "Z2Charge") -> "Z2Charge": - """ - Fuse the charges of `self` with charges of `other`, and - return a new `Z2Charge` object holding the result. - Args: - other: A `Z2Charge` object. - Returns: - Z2Charge: The result of fusing `self` with `other`. - """ - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse Z2-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, Z2Charge): - raise TypeError( - "can only add objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - fused = np.reshape( - np.bitwise_xor(self.charges[:, None], other.charges[None, :]), - len(self.charges) * len(other.charges)) - - return Z2Charge(charges=[fused], shifts=self.shifts) - - def __sub__(self, other: "Z2Charge") -> "Z2Charge": - """ - Subtract charges of `other` from charges of `self` and - return a new `Z2Charge` object holding the result. - Note that ofr Z2 charges, subtraction and addition are identical - Args: - other: A `Z2Charge` object. - Returns: - Z2Charge: The result of fusing `self` with `other`. - """ - if not np.all(self.shifts == other.shifts): - raise ValueError( - "Cannot fuse Z2-charges with different shifts {} and {}".format( - self.shifts, other.shifts)) - if not isinstance(other, Z2Charge): - raise TypeError( - "can only subtract objects of identical types, found {} and {} instead" - .format(type(self), type(other))) - - return self.__add__(other) - - def __matmul__(self, other: Union["Z2Charge", "Z2Charge"]) -> "Z2Charge": - itemsize = np.sum(self._itemsizes + other._itemsizes) - if itemsize > 8: - raise TypeError("Number of bits required to store all charges " - "in a single int is larger than 64") - dtype = np.int16 #need at least np.int16 to store two charges - if itemsize > 2: - dtype = np.int32 - if itemsize > 4: - dtype = np.int64 - - charges = np.left_shift( - self.charges.astype(dtype), - 8 * np.sum(other._itemsizes)) + other.charges.astype(dtype) - - shifts = np.append(self.shifts + 8 * np.sum(other._itemsizes), other.shifts) - return Z2Charge(charges=[charges], shifts=shifts) - - def __mul__(self, number: Union[bool, int]) -> "Z2Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - #Z2 is self-dual - return Z2Charge(charges=[self.charges], shifts=self.shifts) - - def __rmul__(self, number: Union[bool, int]) -> "Z2Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - - return self.__mul__(number) - - @property - def dual_charges(self): - #Z2 charges are self-dual - return self.charges - - def equals(self, target_charges: Iterable) -> np.ndarray: - if not np.all(np.isin(target_charges, np.asarray([0, 1]))): - raise ValueError("Z2-charges can only be 0 or 1, found charges {}".format( - np.unique(target_charges))) - return super().equals(target_charges) - - -class ChargeCollection: - """ - - """ - - class Iterator: - - def __init__(self, data: np.ndarray): - self.n = 0 - self.data = data - - def __next__(self): - if self.n < self.data.shape[0]: - result = self.data[self.n, :] - self.n += 1 - return tuple(result) #this makes a copy! - else: - raise StopIteration - - def __init__(self, - charges: List[BaseCharge], - shifts: Optional[List[np.ndarray]] = None, - stacked_charges: Optional[np.ndarray] = None) -> None: - if not isinstance(charges, list): - raise TypeError("only list allowed for argument `charges` " - "in BaseCharge.__init__(charges)") - if (shifts is not None) and (stacked_charges is None): - raise ValueError( - "Found `shifts == None` and `stacked_charges != None`." - "`shifts` and `stacked_charges` can only be passed together.") - if (shifts is None) and (stacked_charges is not None): - raise ValueError( - "Found `shifts != None` and `stacked_charges == None`." - "`shifts` and `stacked_charges` can only be passed together.") - self.charges = [] - if stacked_charges is None: - if not np.all([len(c) == len(charges[0]) for c in charges]): - raise ValueError("not all charges have the same length. " - "Got lengths = {}".format([len(c) for c in charges])) - for n in range(len(charges)): - if not isinstance(charges[n], BaseCharge): - raise TypeError( - "`ChargeCollection` can only be initialized " - "with a list of `BaseCharge`. Found {} instead".format( - [type(charges[n]) for n in range(len(charges))])) - - self._stacked_charges = np.stack([c.charges for c in charges], axis=1) - for n in range(len(charges)): - charge = charges[n].__new__(type(charges[n])) - charge.__init__(self._stacked_charges[:, n], shifts=charges[n].shifts) - self.charges.append(charge) - else: - if len(shifts) != stacked_charges.shape[1]: - raise ValueError("`len(shifts)` = {} is different from " - "`stacked_charges.shape[1]` = {}".format( - len(shifts), stacked_charges.shape[1])) - - if stacked_charges.shape[1] != len(charges): - raise ValueError("`len(charges) and shape[1] of `stacked_charges` " - "have to be the same.") - for n in range(len(charges)): - charge = charges[n].__new__(type(charges[n])) - charge.__init__(stacked_charges[:, n], shifts=shifts[n]) - self.charges.append(charge) - self._stacked_charges = stacked_charges - - @classmethod - def from_charge_types(cls, charge_types: Type, shifts: List[np.ndarray], - stacked_charges: np.ndarray): - if len(charge_types) != stacked_charges.shape[1]: - raise ValueError("`len(charge_types) and shape[1] of `stacked_charges` " - "have to be the same.") - if len(charge_types) != len(shifts): - raise ValueError( - "`len(charge_types) and `len(shifts)` have to be the same.") - charges = [ - charge_types[n].__new__(charge_types[n]) - for n in range(len(charge_types)) - ] - return cls(charges=charges, stacked_charges=stacked_charges, shifts=shifts) - - @property - def num_charges(self) -> int: - """ - Return the number of different charges in `ChargeCollection`. - """ - return self._stacked_charges.shape[1] - - def get_item(self, n: int) -> Tuple: - """ - Returns the `n-th` charge-tuple of ChargeCollection in a tuple. - """ - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - return tuple(self._stacked_charges[n, :].flat) - - def get_item_ndarray(self, n: Union[np.ndarray, int]) -> np.ndarray: - """ - Returns the `n-th` charge-tuples of ChargeCollection in an np.ndarray. - """ - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - return self._stacked_charges[n, :] - - def __getitem__(self, n: Union[np.ndarray, int]) -> "ChargeCollection": - - if isinstance(n, (np.integer, int)): - n = np.asarray([n]) - - array = self._stacked_charges[n, :] - - return self.from_charge_types( - charge_types=[type(c) for c in self.charges], - shifts=[c.shifts for c in self.charges], - stacked_charges=array) - # if self.num_charges == 1: - # array = np.expand_dims(array, 0) - - # if len(array.shape) == 2: - # if array.shape[1] == 1: - # array = np.squeeze(array, axis=1) - # if len(array.shape) == 0: - # array = np.asarray([array]) - - # charges = [] - # if np.prod(array.shape) == 0: - # for n in range(len(self.charges)): - # charge = self.charges[n].__new__(type(self.charges[n])) - # charge.__init__( - # charges=[np.empty(0, dtype=self.charges[n].dtype)], - # shifts=self.charges[n].shifts) - # charges.append(charge) - - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj - - # if len(array.shape) == 1: - # array = np.expand_dims(array, 1) - - # for m in range(len(self.charges)): - # charge = self.charges[m].__new__(type(self.charges[m])) - # charge.__init__(charges=[array[:, m]], shifts=self.charges[m].shifts) - # charges.append(charge) - - # obj = self.__new__(type(self)) - # obj.__init__(charges=charges) - # return obj - - def __iter__(self): - return self.Iterator(self._stacked_charges) - - def __add__(self, other: "Charge") -> "Charge": - """ - Fuse `self` with `other`. - Args: - other: A `ChargeCollection` object. - Returns: - Charge: The result of fusing `self` with `other`. - """ - return ChargeCollection( - [c1 + c2 for c1, c2 in zip(self.charges, other.charges)]) - - def __sub__(self, other: "Charge") -> "Charge": - """ - Subtract `other` from `self`. - Args: - other: A `ChargeCollection` object. - Returns: - Charge: The result of fusing `self` with `other`. - """ - return ChargeCollection( - [c1 - c2 for c1, c2 in zip(self.charges, other.charges)]) - - def __repr__(self): - text = str(type(self)) + '\n ' - for n in range(len(self.charges)): - tmp = self.charges[n].__repr__() - tmp = tmp.replace('\n', '\n\t') - text += (tmp + '\n') - return text - - def __len__(self): - return len(self.charges[0]) - - def __mul__(self, number: Union[bool, int]) -> "Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - return ChargeCollection(charges=[c * number for c in self.charges]) - - def __rmul__(self, number: Union[bool, int]) -> "Charge": - if number not in (True, False, 0, 1, -1): - raise ValueError( - "can only multiply by `True`, `False`, `1` or `0`, found {}".format( - number)) - - return self.__mul__(number) - - def isin(self, targets: Union[Iterable, "ChargeCollection"]): - if isinstance(targets, type(self)): - _targets = [t for t in targets] - return np.logical_or.reduce([ - np.logical_and.reduce([ - np.isin(self._stacked_charges[:, n], _targets[m][n]) - for n in range(len(_targets[m])) - ]) - for m in range(len(_targets)) - ]) - - def __contains__(self, targets: Union[Iterable, "ChargeCollection"]): - if isinstance(targets, type(self)): - if len(targets) > 1: - raise ValueError( - '__contains__ expects a single input, found {} inputs'.format( - len(targets))) - - _targets = targets.get_item(0) - return np.any( - np.logical_and.reduce([ - np.isin(self._stacked_charges[:, n], _targets[n]) - for n in range(len(_targets)) - ])) - - def unique( - self, - return_index=False, - return_inverse=False, - return_counts=False, - ) -> Tuple["ChargeCollection", np.ndarray, np.ndarray, np.ndarray]: - """ - Compute the unique charges in `BaseCharge`. - See np.unique for a more detailed explanation. This function - does the same but instead of a np.ndarray, it returns the unique - elements in a `BaseCharge` object. - Args: - return_index: If `True`, also return the indices of `self.charges` (along the specified axis, - if provided, or in the flattened array) that result in the unique array. - return_inverse: If `True`, also return the indices of the unique array (for the specified - axis, if provided) that can be used to reconstruct `self.charges`. - return_counts: If `True`, also return the number of times each unique item appears - in `self.charges`. - Returns: - BaseCharge: The sorted unique values. - np.ndarray: The indices of the first occurrences of the unique values in the - original array. Only provided if `return_index` is True. - np.ndarray: The indices to reconstruct the original array from the - unique array. Only provided if `return_inverse` is True. - np.ndarray: The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - """ - - result = np.unique( - np.stack([self.charges[n].charges for n in range(len(self.charges))], - axis=1), - return_index=return_index, - return_inverse=return_inverse, - return_counts=return_counts, - axis=0) - charges = [] - if not (return_index or return_inverse or return_counts): - for n in range(len(self.charges)): - obj = self.charges[n].__new__(type(self.charges[n])) - obj.__init__(charges=[result[:, n]], shifts=self.charges[n].shifts) - charges.append(obj) - return ChargeCollection(charges) - for n in range(len(self.charges)): - obj = self.charges[n].__new__(type(self.charges[n])) - obj.__init__(charges=[result[0][:, n]], shifts=self.charges[n].shifts) - charges.append(obj) - out = ChargeCollection(charges) - return tuple([out] + [result[n] for n in range(1, len(result))]) - - def equals(self, target_charges: List[Union[List, np.ndarray]]) -> np.ndarray: - if len(target_charges) != len(self.charges): - raise ValueError( - "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" - .format(len(target_charges), len(self.charges))) - return np.logical_and.reduce([ - self.charges[n].equals(target_charges[n]) - for n in range(len(target_charges)) - ]) - - def __eq__(self, target_charges: Iterable): - raise NotImplementedError() - if isinstance(target_charges, type(self)): - target_charges = np.stack([c.charges for c in target_charges.charges], - axis=1) - target_charges = np.asarray(target_charges) - if target_charges.ndim == 1: - target_charges = np.expand_dims(target_charges, 0) - if target_charges.shape[1] != len(self.charges): - raise ValueError( - "len(target_charges) ={} is different from len(ChargeCollection.charges) = {}" - .format(len(target_charges), len(self.charges))) - return np.logical_and.reduce( - self._stacked_charges == target_charges, axis=1) - - def concatenate(self, - others: Union["ChargeCollection", List["ChargeCollection"]]): - """ - Concatenate `self.charges` with `others.charges`. - Args: - others: List of `BaseCharge` objects. - Returns: - BaseCharge: The concatenated charges. - """ - if isinstance(others, type(self)): - others = [others] - - charges = [ - self.charges[n].concatenate([o.charges[n] - for o in others]) - for n in range(len(self.charges)) - ] - return ChargeCollection(charges) - - @property - def dtype(self): - return np.result_type(*[c.dtype for c in self.charges]) - - @property - def zero_charge(self): - obj = self.__new__(type(self)) - obj.__init__(charges=[c.zero_charge for c in self.charges]) - return obj - - def intersect(self, - other: "ChargeCollection", - return_indices: Optional[bool] = False) -> "ChargeCollection": - if return_indices: - ua, ia = self.unique(return_index=True) - ub, ib = other.unique(return_index=True) - conc = ua.concatenate(ub) - uab, iab, cntab = conc.unique(return_index=True, return_counts=True) - intersection = uab[cntab == 2] - comm1 = np.argmax( - np.logical_and.reduce( - np.repeat( - np.expand_dims(self._stacked_charges, 2), - intersection._stacked_charges.shape[0], - axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), - axis=1), - axis=0) - comm2 = np.argmax( - np.logical_and.reduce( - np.repeat( - np.expand_dims(other._stacked_charges, 2), - intersection._stacked_charges.shape[0], - axis=2) == np.expand_dims(intersection._stacked_charges.T, 0), - axis=1), - axis=0) - return intersection, comm1, comm2 - - else: - self_unique = self.unique() - other_unique = other.unique() - concatenated = self_unique.concatenate(other_unique) - tmp_unique, counts = concatenated.unique(return_counts=True) - return tmp_unique[counts == 2] - - -def fuse_charges( - charges: List[Union[BaseCharge, ChargeCollection]], - flows: List[Union[bool, int]]) -> Union[BaseCharge, ChargeCollection]: - """ - Fuse all `charges` into a new charge. - Charges are fused from "right to left", - in accordance with row-major order. - - Args: - charges: A list of charges to be fused. - flows: A list of flows, one for each element in `charges`. - Returns: - ChargeCollection: The result of fusing `charges`. - """ - if len(charges) != len(flows): - raise ValueError( - "`charges` and `flows` are of unequal lengths {} != {}".format( - len(charges), len(flows))) - fused_charges = charges[0] * flows[0] - for n in range(1, len(charges)): - fused_charges = fused_charges + charges[n] * flows[n] - return fused_charges - - -def fuse_degeneracies(degen1: Union[List, np.ndarray], - degen2: Union[List, np.ndarray]) -> np.ndarray: - """ - Fuse degeneracies `degen1` and `degen2` of two leg-charges - by simple kronecker product. `degen1` and `degen2` typically belong to two - consecutive legs of `BlockSparseTensor`. - Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns - `[10, 100, 20, 200, 30, 300]`. - When using row-major ordering of indices in `BlockSparseTensor`, - the position of `degen1` should be "to the left" of the position of `degen2`. - Args: - degen1: Iterable of integers - degen2: Iterable of integers - Returns: - np.ndarray: The result of fusing `dege1` with `degen2`. - """ - return np.reshape(degen1[:, None] * degen2[None, :], - len(degen1) * len(degen2)) diff --git a/tensornetwork/block_tensor/index_old.py b/tensornetwork/block_tensor/index_old.py deleted file mode 100644 index 378760e1c..000000000 --- a/tensornetwork/block_tensor/index_old.py +++ /dev/null @@ -1,294 +0,0 @@ -# Copyright 2019 The TensorNetwork Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import numpy as np -from tensornetwork.network_components import Node, contract, contract_between -# pylint: disable=line-too-long -from tensornetwork.backends import backend_factory -import copy -from typing import List, Union, Any, Optional, Tuple, Text - - -def fuse_charge_pair(q1: Union[List, np.ndarray], flow1: int, - q2: Union[List, np.ndarray], flow2: int) -> np.ndarray: - """ - Fuse charges `q1` with charges `q2` by simple addition (valid - for U(1) charges). `q1` and `q2` typically belong to two consecutive - legs of `BlockSparseTensor`. - Given `q1 = [0,1,2]` and `q2 = [10,100]`, this returns - `[10, 100, 11, 101, 12, 102]`. - When using row-major ordering of indices in `BlockSparseTensor`, - the position of q1 should be "to the left" of the position of q2. - - Args: - q1: Iterable of integers - flow1: Flow direction of charge `q1`. - q2: Iterable of integers - flow2: Flow direction of charge `q2`. - Returns: - np.ndarray: The result of fusing `q1` with `q2`. - """ - return np.reshape( - flow1 * np.asarray(q1)[:, None] + flow2 * np.asarray(q2)[None, :], - len(q1) * len(q2)) - - -def fuse_charges(charges: List[Union[List, np.ndarray]], - flows: List[int]) -> np.ndarray: - """ - Fuse all `charges` by simple addition (valid - for U(1) charges). Charges are fused from "right to left", - in accordance with row-major order (see `fuse_charges_pair`). - - Args: - chargs: A list of charges to be fused. - flows: A list of flows, one for each element in `charges`. - Returns: - np.ndarray: The result of fusing `charges`. - """ - if len(charges) == 1: - #nothing to do - return flows[0] * charges[0] - fused_charges = charges[0] * flows[0] - for n in range(1, len(charges)): - fused_charges = fuse_charge_pair( - q1=fused_charges, flow1=1, q2=charges[n], flow2=flows[n]) - return fused_charges - - -def fuse_degeneracies(degen1: Union[List, np.ndarray], - degen2: Union[List, np.ndarray]) -> np.ndarray: - """ - Fuse degeneracies `degen1` and `degen2` of two leg-charges - by simple kronecker product. `degen1` and `degen2` typically belong to two - consecutive legs of `BlockSparseTensor`. - Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns - `[10, 100, 20, 200, 30, 300]`. - When using row-major ordering of indices in `BlockSparseTensor`, - the position of `degen1` should be "to the left" of the position of `degen2`. - Args: - degen1: Iterable of integers - degen2: Iterable of integers - Returns: - np.ndarray: The result of fusing `dege1` with `degen2`. - """ - return np.reshape(degen1[:, None] * degen2[None, :], - len(degen1) * len(degen2)) - - -def unfuse(fused_indices: np.ndarray, len_left: int, - len_right: int) -> Tuple[np.ndarray, np.ndarray]: - """ - Given an np.ndarray `fused_indices` of integers denoting - index-positions of elements within a 1d array, `unfuse` - obtains the index-positions of the elements in the left and - right np.ndarrays `left`, `right` which, upon fusion, - are placed at the index-positions given by - `fused_indices` in the fused np.ndarray. - An example will help to illuminate this: - Given np.ndarrays `left`, `right` and the result - of their fusion (`fused`): - - ``` - left = [0,1,0,2] - right = [-1,3,-2] - fused = fuse_charges([left, right], flows=[1,1]) - print(fused) #[-1 3 -2 0 4 -1 -1 3 -2 1 5 0] - ``` - - we want to find which elements in `left` and `right` - fuse to a value of 0. In the above case, there are two - 0 in `fused`: one is obtained from fusing `left[1]` and - `right[0]`, the second one from fusing `left[3]` and `right[2]` - `unfuse` returns the index-positions of these values within - `left` and `right`, that is - - ``` - left_index_values, right_index_values = unfuse(np.nonzero(fused==0)[0], len(left), len(right)) - print(left_index_values) # [1,3] - print(right_index_values) # [0,2] - ``` - - Args: - fused_indices: A 1d np.ndarray of integers. - len_left: The length of the left np.ndarray. - len_right: The length of the right np.ndarray. - Returns: - (np.ndarry, np.ndarray) - """ - right = np.mod(fused_indices, len_right) - left = np.floor_divide(fused_indices - right, len_right) - return left, right - - -class Index: - """ - An index class to store indices of a symmetric tensor. - An index keeps track of all its childs by storing references - to them (i.e. it is a binary tree). - """ - - def __init__(self, - charges: Union[List, np.ndarray], - flow: int, - name: Optional[Text] = "index", - left_child: Optional["Index"] = None, - right_child: Optional["Index"] = None): - self._charges = np.asarray(charges) - self.flow = flow - self.left_child = left_child - self.right_child = right_child - self._name = name - - def __repr__(self): - return str(self.dimension) - - @property - def is_leave(self): - return (self.left_child is None) and (self.right_child is None) - - @property - def dimension(self): - return np.prod([len(i.charges) for i in self.get_elementary_indices()]) - - def _copy_helper(self, index: "Index", copied_index: "Index") -> None: - """ - Helper function for copy - """ - if index.left_child != None: - left_copy = Index( - charges=copy.copy(index.left_child.charges), - flow=copy.copy(index.left_child.flow), - name=copy.copy(index.left_child.name)) - - copied_index.left_child = left_copy - self._copy_helper(index.left_child, left_copy) - if index.right_child != None: - right_copy = Index( - charges=copy.copy(index.right_child.charges), - flow=copy.copy(index.right_child.flow), - name=copy.copy(index.right_child.name)) - copied_index.right_child = right_copy - self._copy_helper(index.right_child, right_copy) - - def copy(self): - """ - Returns: - Index: A deep copy of `Index`. Note that all children of - `Index` are copied as well. - """ - index_copy = Index( - charges=self._charges.copy(), flow=copy.copy(self.flow), name=self.name) - - self._copy_helper(self, index_copy) - return index_copy - - def _leave_helper(self, index: "Index", leave_list: List) -> None: - if index.left_child: - self._leave_helper(index.left_child, leave_list) - if index.right_child: - self._leave_helper(index.right_child, leave_list) - if (index.left_child is None) and (index.right_child is None): - leave_list.append(index) - - def get_elementary_indices(self) -> List: - """ - Returns: - List: A list containing the elementary indices (the leaves) - of `Index`. - """ - leave_list = [] - self._leave_helper(self, leave_list) - return leave_list - - def __mul__(self, index: "Index") -> "Index": - """ - Merge `index` and self into a single larger index. - The flow of the resulting index is set to 1. - Flows of `self` and `index` are multiplied into - the charges upon fusing.n - """ - return fuse_index_pair(self, index) - - @property - def charges(self): - if self.is_leave: - return self._charges - fused_charges = fuse_charge_pair( - self.left_child.charges, self.left_child.flow, self.right_child.charges, - self.right_child.flow) - - return fused_charges - - @property - def name(self): - if self._name: - return self._name - if self.is_leave: - return self.name - return self.left_child.name + ' & ' + self.right_child.name - - -def fuse_index_pair(left_index: Index, - right_index: Index, - flow: Optional[int] = 1) -> Index: - """ - Fuse two consecutive indices (legs) of a symmetric tensor. - Args: - left_index: A tensor Index. - right_index: A tensor Index. - flow: An optional flow of the resulting `Index` object. - Returns: - Index: The result of fusing `index1` and `index2`. - """ - #Fuse the charges of the two indices - if left_index is right_index: - raise ValueError( - "index1 and index2 are the same object. Can only fuse distinct objects") - - return Index( - charges=None, flow=flow, left_child=left_index, right_child=right_index) - - -def fuse_indices(indices: List[Index], flow: Optional[int] = 1) -> Index: - """ - Fuse a list of indices (legs) of a symmetric tensor. - Args: - indices: A list of tensor Index objects - flow: An optional flow of the resulting `Index` object. - Returns: - Index: The result of fusing `indices`. - """ - - index = indices[0] - for n in range(1, len(indices)): - index = fuse_index_pair(index, indices[n], flow=flow) - return index - - -def split_index(index: Index) -> Tuple[Index, Index]: - """ - Split an index (leg) of a symmetric tensor into two legs. - Args: - index: A tensor Index. - Returns: - Tuple[Index, Index]: The result of splitting `index`. - """ - if index.is_leave: - raise ValueError("cannot split an elementary index") - - return index.left_child, index.right_child diff --git a/tensornetwork/block_tensor/index_old_test.py b/tensornetwork/block_tensor/index_old_test.py deleted file mode 100644 index 293b37bd8..000000000 --- a/tensornetwork/block_tensor/index_old_test.py +++ /dev/null @@ -1,171 +0,0 @@ -import numpy as np -# pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_charges, fuse_degeneracies, fuse_charge_pair, fuse_indices, unfuse - - -def test_index_fusion_mul(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = i1 * i2 - assert i12.left_child is i1 - assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - - -def test_fuse_index_pair(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = fuse_index_pair(i1, i2) - assert i12.left_child is i1 - assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - - -def test_fuse_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = fuse_indices([i1, i2]) - assert i12.left_child is i1 - assert i12.right_child is i2 - assert np.all(i12.charges == fuse_charge_pair(q1, 1, q2, 1)) - - -def test_split_index(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 1 - q2 = np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype) #quantum numbers on leg 2 - i1 = Index(charges=q1, flow=1, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=1, name='index2') #index on leg 2 - - i12 = i1 * i2 - i1_, i2_ = split_index(i12) - assert i1 is i1_ - assert i2 is i2_ - np.testing.assert_allclose(q1, i1.charges) - np.testing.assert_allclose(q2, i2.charges) - np.testing.assert_allclose(q1, i1_.charges) - np.testing.assert_allclose(q2, i2_.charges) - assert i1_.name == 'index1' - assert i2_.name == 'index2' - assert i1_.flow == i1.flow - assert i2_.flow == i2.flow - - -def test_elementary_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q4 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q3, flow=1, name='index3') - i4 = Index(charges=q4, flow=1, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - elmt12 = i12.get_elementary_indices() - assert elmt12[0] is i1 - assert elmt12[1] is i2 - - i1234 = i12 * i34 - elmt1234 = i1234.get_elementary_indices() - assert elmt1234[0] is i1 - assert elmt1234[1] is i2 - assert elmt1234[2] is i3 - assert elmt1234[3] is i4 - assert elmt1234[0].name == 'index1' - assert elmt1234[1].name == 'index2' - assert elmt1234[2].name == 'index3' - assert elmt1234[3].name == 'index4' - assert elmt1234[0].flow == i1.flow - assert elmt1234[1].flow == i2.flow - assert elmt1234[2].flow == i3.flow - assert elmt1234[3].flow == i4.flow - - np.testing.assert_allclose(q1, i1.charges) - np.testing.assert_allclose(q2, i2.charges) - np.testing.assert_allclose(q3, i3.charges) - np.testing.assert_allclose(q4, i4.charges) - - -def test_leave(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - assert i1.is_leave - assert i2.is_leave - - i12 = i1 * i2 - assert not i12.is_leave - - -def test_copy(): - D = 10 - B = 4 - dtype = np.int16 - q1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - q2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) - i1 = Index(charges=q1, flow=1, name='index1') - i2 = Index(charges=q2, flow=1, name='index2') - i3 = Index(charges=q1, flow=-1, name='index3') - i4 = Index(charges=q2, flow=-1, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - i1234 = i12 * i34 - i1234_copy = i1234.copy() - - elmt1234 = i1234_copy.get_elementary_indices() - assert elmt1234[0] is not i1 - assert elmt1234[1] is not i2 - assert elmt1234[2] is not i3 - assert elmt1234[3] is not i4 - - -def test_unfuse(): - q1 = np.random.randint(-4, 5, 10).astype(np.int16) - q2 = np.random.randint(-4, 5, 4).astype(np.int16) - q3 = np.random.randint(-4, 5, 4).astype(np.int16) - q12 = fuse_charges([q1, q2], [1, 1]) - q123 = fuse_charges([q12, q3], [1, 1]) - nz = np.nonzero(q123 == 0)[0] - q12_inds, q3_inds = unfuse(nz, len(q12), len(q3)) - - q1_inds, q2_inds = unfuse(q12_inds, len(q1), len(q2)) - np.testing.assert_allclose(q1[q1_inds] + q2[q2_inds] + q3[q3_inds], - np.zeros(len(q1_inds), dtype=np.int16)) From d51e94b30f68b163188c9e45979621723c3c6baf Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 15:10:15 -0500 Subject: [PATCH 189/212] fix bug --- tensornetwork/block_tensor/block_tensor.py | 82 +++++++++++----------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 1ad33b5b2..72076f8f0 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -142,9 +142,9 @@ def _find_best_partition(dims: Iterable[int]) -> int: return min_ind + 1 -def compute_fused_charge_degeneracies( - charges: List[BaseCharge], - flows: List[bool]) -> Tuple[BaseCharge, np.ndarray]: +def compute_fused_charge_degeneracies(charges: List[BaseCharge], + flows: List[bool] + ) -> Tuple[BaseCharge, np.ndarray]: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`, together with their respective degeneracies @@ -166,8 +166,9 @@ def compute_fused_charge_degeneracies( # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = ( - charges[0] * flows[0]).unique(return_counts=True) + accumulated_charges, accumulated_degeneracies = (charges[0] * + flows[0]).unique( + return_counts=True) for n in range(1, len(charges)): leg_charges, leg_degeneracies = charges[n].unique(return_counts=True) fused_charges = accumulated_charges + leg_charges * flows[n] @@ -184,9 +185,9 @@ def compute_fused_charge_degeneracies( return accumulated_charges, accumulated_degeneracies -def compute_unique_fused_charges( - charges: List[BaseCharge], - flows: List[Union[bool, int]]) -> Tuple[BaseCharge, np.ndarray]: +def compute_unique_fused_charges(charges: List[BaseCharge], + flows: List[Union[bool, int]] + ) -> Tuple[BaseCharge, np.ndarray]: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`. @@ -241,9 +242,9 @@ def compute_num_nonzero(charges: List[BaseCharge], flows: List[bool]) -> int: return np.squeeze(accumulated_degeneracies[nz_inds][0]) -def _find_diagonal_sparse_blocks( - charges: List[BaseCharge], flows: np.ndarray, - partition: int) -> (np.ndarray, np.ndarray, np.ndarray): +def _find_diagonal_sparse_blocks(charges: List[BaseCharge], flows: np.ndarray, + partition: int + ) -> (np.ndarray, np.ndarray, np.ndarray): """ Find the location of all non-trivial symmetry blocks from the data vector of of SymTensor (when viewed as a matrix across some prescribed index @@ -309,9 +310,9 @@ def _find_diagonal_sparse_blocks( # calculate mappings for the position in datavector of each block if num_blocks < 15: # faster method for small number of blocks - row_locs = np.concatenate( - [(row_ind.charge_labels == n) for n in range(num_blocks)]).reshape( - num_blocks, row_ind.dim) + row_locs = np.concatenate([ + (row_ind.charge_labels == n) for n in range(num_blocks) + ]).reshape(num_blocks, row_ind.dim) else: # faster method for large number of blocks row_locs = np.zeros([num_blocks, row_ind.dim], dtype=bool) @@ -324,8 +325,9 @@ def _find_diagonal_sparse_blocks( [[row_degen[row_to_block[n]], col_degen[col_to_block[n]]] for n in range(num_blocks)], dtype=np.uint32).T - block_maps = [(cumulate_num_nz[row_locs[n, :]][:, None] + np.arange( - block_dims[1, n])[None, :]).ravel() for n in range(num_blocks)] + block_maps = [(cumulate_num_nz[row_locs[n, :]][:, None] + + np.arange(block_dims[1, n])[None, :]).ravel() + for n in range(num_blocks)] obj = charges[0].__new__(type(charges[0])) obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), charges[0].charge_types) @@ -816,7 +818,7 @@ def reshape(tensor: BlockSparseTensor, BlockSparseTensor: A new tensor reshaped into `shape` """ - return self.reshape(shape) + return tensor.reshape(shape) def transpose(tensor: BlockSparseTensor, @@ -835,10 +837,10 @@ def transpose(tensor: BlockSparseTensor, return result -def _compute_transposed_sparse_blocks( - indices: BlockSparseTensor, - order: Union[List[int], np.ndarray], - transposed_partition: Optional[int] = None) -> Tuple[BaseCharge, Dict, int]: +def _compute_transposed_sparse_blocks(indices: BlockSparseTensor, + order: Union[List[int], np.ndarray], + transposed_partition: Optional[int] = None + ) -> Tuple[BaseCharge, Dict, int]: """ Args: indices: A symmetric tensor. @@ -867,11 +869,11 @@ def _compute_transposed_sparse_blocks( order=flat_order) -def tensordot( - tensor1: BlockSparseTensor, - tensor2: BlockSparseTensor, - axes: Sequence[Sequence[int]], - final_order: Optional[Union[List, np.ndarray]] = None) -> BlockSparseTensor: +def tensordot(tensor1: BlockSparseTensor, + tensor2: BlockSparseTensor, + axes: Sequence[Sequence[int]], + final_order: Optional[Union[List, np.ndarray]] = None + ) -> BlockSparseTensor: """ Contract two `BlockSparseTensor`s along `axes`. Args: @@ -1462,12 +1464,12 @@ def _get_stride_arrays(dims): return [np.arange(dims[n]) * strides[n] for n in range(len(dims))] -def reduce_charges( - charges: List[BaseCharge], - flows: Iterable[bool], - target_charges: np.ndarray, - return_locations: Optional[bool] = False, - strides: Optional[np.ndarray] = None) -> Tuple[BaseCharge, np.ndarray]: +def reduce_charges(charges: List[BaseCharge], + flows: Iterable[bool], + target_charges: np.ndarray, + return_locations: Optional[bool] = False, + strides: Optional[np.ndarray] = None + ) -> Tuple[BaseCharge, np.ndarray]: """ Add quantum numbers arising from combining two or more charges into a single index, keeping only the quantum numbers that appear in 'target_charges'. @@ -1585,12 +1587,12 @@ def reduce_charges( return obj -def reduce_to_target_charges( - charges: List[BaseCharge], - flows: List[Union[int, bool]], - target_charges: BaseCharge, - strides: Optional[np.ndarray] = None, - return_positions: Optional[bool] = False) -> np.ndarray: +def reduce_to_target_charges(charges: List[BaseCharge], + flows: List[Union[int, bool]], + target_charges: BaseCharge, + strides: Optional[np.ndarray] = None, + return_positions: Optional[bool] = False + ) -> np.ndarray: """ Find the dense locations of elements (i.e. the index-values within the DENSE tensor) in the vector of `fused_charges` resulting from fusing all elements of `charges` @@ -1861,8 +1863,8 @@ def find_sparse_positions_new(charges: List[BaseCharge], charge_labels = np.concatenate( [final_relevant_labels[n] for n in relevant_left_inverse]) inds = np.concatenate([ - start_positions[n] + np.arange( - total_degen[relevant_left_inverse[n]], dtype=np.uint32) + start_positions[n] + + np.arange(total_degen[relevant_left_inverse[n]], dtype=np.uint32) for n in range(len(relevant_left_inverse)) ]) From 2f55188d1b8b8eb8a316cb00be3b1aeed777ff00 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 15:21:42 -0500 Subject: [PATCH 190/212] renaming and shortening --- tensornetwork/block_tensor/block_tensor.py | 52 ++++------------------ 1 file changed, 8 insertions(+), 44 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 72076f8f0..786058c72 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -30,27 +30,11 @@ Tensor = Any -def combine_index_strides(index_dims: np.ndarray, - strides: np.ndarray) -> np.ndarray: - """ - Combine multiple indices of some dimensions and strides into a single index, - based on row-major order. Used when transposing SymTensors. - Args: - index_dims (np.ndarray): list of dim of each index. - strides (np.ndarray): list of strides of each index. - Returns: - np.ndarray: strides of combined index. - """ - num_ind = len(index_dims) - comb_ind_locs = np.arange( - 0, strides[0] * index_dims[0], strides[0], dtype=np.uint32) - for n in range(1, num_ind): - comb_ind_locs = np.add.outer( - comb_ind_locs, - np.arange(0, strides[n] * index_dims[n], strides[n], - dtype=np.uint32)).ravel() - - return comb_ind_locs +def fuse_stride_arrays(dims: np.ndarray, strides: np.ndarray) -> np.ndarray: + return fuse_ndarrays([ + np.arange(0, strides[n] * dims[n], strides[n], dtype=np.uint32) + for n in range(len(dims)) + ]) def compute_sparse_lookup(charges: List[BaseCharge], flows: Iterable[bool], @@ -83,24 +67,6 @@ def _get_strides(dims): return np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) -def fuse_ndarray_pair(array1: Union[List, np.ndarray], - array2: Union[List, np.ndarray]) -> np.ndarray: - """ - Fuse ndarrays `array1` and `array2` by kronecker-addition. - Given `array1 = [0,1,2]` and `array2 = [10,100]`, this returns - `[10, 100, 11, 101, 12, 102]`. - - Args: - array1: np.ndarray - array2: np.ndarray - Returns: - np.ndarray: The result of adding `array1` and `array2` - """ - return np.reshape( - np.asarray(array1)[:, None] + np.asarray(array2)[None, :], - len(array1) * len(array2)) - - def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: """ Fuse all `arrays` by simple kronecker addition. @@ -114,7 +80,7 @@ def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: return arrays[0] fused_arrays = arrays[0] for n in range(1, len(arrays)): - fused_arrays = fuse_ndarray_pair(array1=fused_arrays, array2=arrays[n]) + fused_arrays = np.ravel(np.add.outer(fused_arrays, arrays[n])) return fused_arrays @@ -1527,10 +1493,8 @@ def reduce_charges(charges: List[BaseCharge], if return_locations: if strides is not None: # computed locations based on non-trivial strides - row_pos = combine_index_strides(tensor_dims[:partition], - strides[:partition]) - col_pos = combine_index_strides(tensor_dims[partition:], - strides[partition:]) + row_pos = fuse_stride_arrays(tensor_dims[:partition], strides[:partition]) + col_pos = fuse_stride_arrays(tensor_dims[partition:], strides[partition:]) # reduce combined qnums to include only those in target_charges reduced_rows = [0] * left_ind.num_unique From 2c65f8b81bd9fc831e9effc0c6b9ed74b7f97fe8 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 15:39:52 -0500 Subject: [PATCH 191/212] cleaning up code --- tensornetwork/block_tensor/block_tensor.py | 1343 ++------------------ 1 file changed, 121 insertions(+), 1222 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 786058c72..9bddb7e81 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -208,6 +208,127 @@ def compute_num_nonzero(charges: List[BaseCharge], flows: List[bool]) -> int: return np.squeeze(accumulated_degeneracies[nz_inds][0]) +def reduce_charges(charges: List[BaseCharge], + flows: Iterable[bool], + target_charges: np.ndarray, + return_locations: Optional[bool] = False, + strides: Optional[np.ndarray] = None + ) -> Tuple[BaseCharge, np.ndarray]: + """ + Add quantum numbers arising from combining two or more charges into a + single index, keeping only the quantum numbers that appear in 'target_charges'. + Equilvalent to using "combine_charges" followed by "reduce", but is + generally much more efficient. + Args: + charges (List[SymIndex]): list of SymIndex. + flows (np.ndarray): vector of bools describing index orientations. + target_charges (np.ndarray): n-by-m array describing qauntum numbers of the + qnums which should be kept with 'n' the number of symmetries. + return_locations (bool, optional): if True then return the location of the kept + values of the fused charges + strides (np.ndarray, optional): index strides with which to compute the + return_locations of the kept elements. Defaults to trivial strides (based on + row major order) if ommitted. + Returns: + SymIndex: the fused index after reduction. + np.ndarray: locations of the fused SymIndex qnums that were kept. + """ + + num_inds = len(charges) + tensor_dims = [len(c) for c in charges] + + if len(charges) == 1: + # reduce single index + if strides is None: + strides = np.array([1], dtype=np.uint32) + return charges[0].dual(flows[0]).reduce( + target_charges, return_locations=return_locations, strides=strides[0]) + + else: + # find size-balanced partition of charges + partition = _find_best_partition(tensor_dims) + + # compute quantum numbers for each partition + left_ind = fuse_charges(charges[:partition], flows[:partition]) + right_ind = fuse_charges(charges[partition:], flows[partition:]) + + # compute combined qnums + comb_qnums = fuse_ndarray_charges(left_ind.unique_charges, + right_ind.unique_charges, + charges[0].charge_types) + [unique_comb_qnums, comb_labels] = np.unique( + comb_qnums, return_inverse=True, axis=1) + num_unique = unique_comb_qnums.shape[1] + + # intersect combined qnums and target_charges + reduced_qnums, label_to_unique, label_to_kept = intersect( + unique_comb_qnums, target_charges, axis=1, return_indices=True) + map_to_kept = -np.ones(num_unique, dtype=np.int16) + for n in range(len(label_to_unique)): + map_to_kept[label_to_unique[n]] = n + new_comb_labels = map_to_kept[comb_labels].reshape( + [left_ind.num_unique, right_ind.num_unique]) + if return_locations: + if strides is not None: + # computed locations based on non-trivial strides + row_pos = fuse_stride_arrays(tensor_dims[:partition], strides[:partition]) + col_pos = fuse_stride_arrays(tensor_dims[partition:], strides[partition:]) + + # reduce combined qnums to include only those in target_charges + reduced_rows = [0] * left_ind.num_unique + row_locs = [0] * left_ind.num_unique + for n in range(left_ind.num_unique): + temp_label = new_comb_labels[n, right_ind.charge_labels] + temp_keep = temp_label >= 0 + reduced_rows[n] = temp_label[temp_keep] + row_locs[n] = col_pos[temp_keep] + + reduced_labels = np.concatenate( + [reduced_rows[n] for n in left_ind.charge_labels]) + reduced_locs = np.concatenate([ + row_pos[n] + row_locs[left_ind.charge_labels[n]] + for n in range(left_ind.dim) + ]) + obj = charges[0].__new__(type(charges[0])) + obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) + return obj, reduced_locs + + else: # trivial strides + # reduce combined qnums to include only those in target_charges + reduced_rows = [0] * left_ind.num_unique + row_locs = [0] * left_ind.num_unique + for n in range(left_ind.num_unique): + temp_label = new_comb_labels[n, right_ind.charge_labels] + temp_keep = temp_label >= 0 + reduced_rows[n] = temp_label[temp_keep] + row_locs[n] = np.where(temp_keep)[0] + + reduced_labels = np.concatenate( + [reduced_rows[n] for n in left_ind.charge_labels]) + reduced_locs = np.concatenate([ + n * right_ind.dim + row_locs[left_ind.charge_labels[n]] + for n in range(left_ind.dim) + ]) + obj = charges[0].__new__(type(charges[0])) + obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) + + return obj, reduced_locs + + else: + # reduce combined qnums to include only those in target_charges + reduced_rows = [0] * left_ind.num_unique + for n in range(left_ind.num_unique): + temp_label = new_comb_labels[n, right_ind.charge_labels] + reduced_rows[n] = temp_label[temp_label >= 0] + + reduced_labels = np.concatenate( + [reduced_rows[n] for n in left_ind.charge_labels]) + obj = charges[0].__new__(type(charges[0])) + obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) + + return obj + + def _find_diagonal_sparse_blocks(charges: List[BaseCharge], flows: np.ndarray, partition: int ) -> (np.ndarray, np.ndarray, np.ndarray): @@ -345,16 +466,11 @@ def _find_transposed_diagonal_sparse_blocks( new_row_flows = flows[order[:tr_partition]] new_col_flows = flows[order[tr_partition:]] - # compute qnums of row/cols in transposed tensor unique_row_qnums, new_row_degen = compute_fused_charge_degeneracies( new_row_charges, new_row_flows) - # unique_row_qnums, new_row_degen = compute_qnum_degen( - # new_row_charges, new_row_flows) unique_col_qnums, new_col_degen = compute_fused_charge_degeneracies( new_col_charges, np.logical_not(new_col_flows)) - # unique_col_qnums, new_col_degen = compute_qnum_degen( - # new_col_charges, np.logical_not(new_col_flows)) block_qnums, new_row_map, new_col_map = intersect( unique_row_qnums.unique_charges, unique_col_qnums.unique_charges, @@ -377,7 +493,6 @@ def _find_transposed_diagonal_sparse_blocks( block_qnums, return_locations=True, strides=new_strides[tr_partition:]) - # compute qnums of row/cols in original tensor orig_partition = _find_best_partition(tensor_dims) orig_width = np.prod(tensor_dims[orig_partition:]) @@ -397,7 +512,6 @@ def _find_transposed_diagonal_sparse_blocks( orig_col_ind = fuse_charges(charges[orig_partition:], np.logical_not(flows[orig_partition:])) - # compute row degens (i.e. number of non-zero elements per row) inv_row_map = -np.ones( orig_unique_row_qnums.unique_charges.shape[1], dtype=np.int16) for n in range(len(row_map)): @@ -407,7 +521,6 @@ def _find_transposed_diagonal_sparse_blocks( 0)[inv_row_map[orig_row_ind.charge_labels]] all_cumul_degens = np.cumsum(np.insert(all_degens[:-1], 0, 0)).astype(np.uint32) - # generate vector which translates from dense row position to sparse row position dense_to_sparse = np.zeros(orig_width, dtype=np.uint32) for n in range(orig_num_blocks): dense_to_sparse[orig_col_ind.charge_labels == col_map[n]] = np.arange( @@ -803,38 +916,6 @@ def transpose(tensor: BlockSparseTensor, return result -def _compute_transposed_sparse_blocks(indices: BlockSparseTensor, - order: Union[List[int], np.ndarray], - transposed_partition: Optional[int] = None - ) -> Tuple[BaseCharge, Dict, int]: - """ - Args: - indices: A symmetric tensor. - order: The new order of indices. - permutation: An np.ndarray of int for reshuffling the data, - typically the output of a prior call to `transpose`. Passing `permutation` - can greatly speed up the transposition. - return_permutation: If `True`, return the the permutation data. - Returns: - - """ - if len(order) != len(indices): - raise ValueError( - "`len(order)={}` is different form `len(indices)={}`".format( - len(order), len(indices))) - flat_indices, flat_charges, flat_flows, flat_strides, flat_order, transposed_partition = flatten_meta_data( - indices, order, transposed_partition) - if transposed_partition is None: - transposed_partition = _find_best_partition( - [len(flat_charges[n]) for n in flat_order]) - - return _find_transposed_diagonal_sparse_blocks( - flat_charges, - flat_flows, - tr_partition=transposed_partition, - order=flat_order) - - def tensordot(tensor1: BlockSparseTensor, tensor2: BlockSparseTensor, axes: Sequence[Sequence[int]], @@ -1010,1185 +1091,3 @@ def flatten_meta_data(indices, order, partition): [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) return flat_elementary_indices, flat_charges, flat_flows, flat_strides, flat_order, new_partition - - -##################################################### DEPRECATED ROUTINES ############################ - - -def _find_transposed_diagonal_sparse_blocks_old( - charges: List[BaseCharge], flows: List[Union[bool, int]], order: np.ndarray, - tr_partition: int) -> Tuple[BaseCharge, List[np.ndarray]]: - """ - Given the meta data and underlying data of a symmetric matrix, compute the - dense positions of all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. - - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - row_strides: An optional np.ndarray denoting the strides of `row_charges`. - If `None`, natural stride ordering is assumed. - column_strides: An optional np.ndarray denoting the strides of - `column_charges`. If `None`, natural stride ordering is assumed. - - Returns: - List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. - List[List]: A list containing the blocks information. - For each element `e` in the list `e[0]` is an `np.ndarray` of ints - denoting the dense positions of the non-zero elements and `e[1]` - is a tuple corresponding to the blocks' matrix shape - """ - _check_flows(flows) - if len(flows) != len(charges): - raise ValueError("`len(flows)` is different from `len(charges) ") - if np.all(order == np.arange(len(order))): - return _find_diagonal_sparse_blocks(charges, flows, tr_partition) - - strides = _get_strides([len(c) for c in charges]) - - tr_row_charges = [charges[n] for n in order[:tr_partition]] - tr_row_flows = [flows[n] for n in order[:tr_partition]] - tr_row_strides = [strides[n] for n in order[:tr_partition]] - - tr_column_charges = [charges[n] for n in order[tr_partition:]] - tr_column_flows = [flows[n] for n in order[tr_partition:]] - tr_column_strides = [strides[n] for n in order[tr_partition:]] - - unique_tr_column_charges, tr_column_dims = compute_fused_charge_degeneracies( - tr_column_charges, tr_column_flows) - unique_tr_row_charges = compute_unique_fused_charges(tr_row_charges, - tr_row_flows) - - fused = unique_tr_row_charges + unique_tr_column_charges - tr_li, tr_ri = np.divmod( - np.nonzero(fused == unique_tr_column_charges.identity_charges)[0], - len(unique_tr_column_charges)) - - row_ind, row_locations = reduce_charges( - charges=tr_row_charges, - flows=tr_row_flows, - target_charges=unique_tr_row_charges.charges[:, tr_li], - return_locations=True, - strides=tr_row_strides) - - col_ind, column_locations = reduce_charges( - charges=tr_column_charges, - flows=tr_column_flows, - target_charges=unique_tr_column_charges.charges[:, tr_ri], - return_locations=True, - strides=tr_column_strides) - - partition = _find_best_partition([len(c) for c in charges]) - fused_row_charges = fuse_charges(charges[:partition], flows[:partition]) - fused_column_charges = fuse_charges(charges[partition:], flows[partition:]) - - unique_fused_row, row_inverse = fused_row_charges.unique(return_inverse=True) - unique_fused_column, column_inverse = fused_column_charges.unique( - return_inverse=True) - - unique_column_charges, column_dims = compute_fused_charge_degeneracies( - charges[partition:], flows[partition:]) - unique_row_charges = compute_unique_fused_charges(charges[:partition], - flows[:partition]) - fused = unique_row_charges + unique_column_charges - li, ri = np.divmod( - np.nonzero(fused == unique_column_charges.identity_charges)[0], - len(unique_column_charges)) - - common_charges, label_to_row, label_to_column = unique_row_charges.intersect( - unique_column_charges * True, return_indices=True) - num_blocks = len(label_to_row) - tmp = -np.ones(len(unique_row_charges), dtype=np.int16) - for n in range(len(label_to_row)): - tmp[label_to_row[n]] = n - - degeneracy_vector = np.append(column_dims[label_to_column], - 0)[tmp[row_inverse]] - start_positions = np.cumsum(np.insert(degeneracy_vector[:-1], 0, - 0)).astype(np.uint32) - - column_dimension = np.prod([len(c) for c in charges[partition:]]) - - column_lookup = compute_sparse_lookup(charges[partition:], flows[partition:], - common_charges) - - blocks = [] - for n in range(num_blocks): - rlocs = row_locations[row_ind.charge_labels == n] - clocs = column_locations[col_ind.charge_labels == n] - orig_row_posL, orig_col_posL = np.divmod(rlocs, np.uint32(column_dimension)) - orig_row_posR, orig_col_posR = np.divmod(clocs, np.uint32(column_dimension)) - inds = (start_positions[np.add.outer(orig_row_posL, orig_row_posR)] + - column_lookup[np.add.outer(orig_col_posL, orig_col_posR)]).ravel() - - blocks.append([inds, (len(rlocs), len(clocs))]) - charges_out = unique_tr_row_charges[tr_li] - return charges_out, blocks - - -def _find_diagonal_dense_blocks( - row_charges: List[BaseCharge], - column_charges: List[BaseCharge], - row_flows: List[Union[bool, int]], - column_flows: List[Union[bool, int]], - row_strides: Optional[np.ndarray] = None, - column_strides: Optional[np.ndarray] = None, -) -> Tuple[BaseCharge, List[np.ndarray]]: - """ - - Deprecated - Given the meta data and underlying data of a symmetric matrix, compute the - dense positions of all diagonal blocks and return them in a dict. - `row_charges` and `column_charges` are lists of np.ndarray. The tensor - is viewed as a matrix with rows given by fusing `row_charges` and - columns given by fusing `column_charges`. - - Args: - data: An np.ndarray of the data. The number of elements in `data` - has to match the number of non-zero elements defined by `charges` - and `flows` - row_charges: List of np.ndarray, one for each leg of the row-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - column_charges: List of np.ndarray, one for each leg of the column-indices. - Each np.ndarray `row_charges[leg]` is of shape `(D[leg],)`. - The bond dimension `D[leg]` can vary on each leg. - row_flows: A list of integers, one for each entry in `row_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - column_flows: A list of integers, one for each entry in `column_charges`. - with values `1` or `-1`, denoting the flow direction - of the charges on each leg. `1` is inflowing, `-1` is outflowing - charge. - row_strides: An optional np.ndarray denoting the strides of `row_charges`. - If `None`, natural stride ordering is assumed. - column_strides: An optional np.ndarray denoting the strides of - `column_charges`. If `None`, natural stride ordering is assumed. - - Returns: - List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. - List[List]: A list containing the blocks information. - For each element `e` in the list `e[0]` is an `np.ndarray` of ints - denoting the dense positions of the non-zero elements and `e[1]` - is a tuple corresponding to the blocks' matrix shape - - """ - flows = list(row_flows).copy() - flows.extend(column_flows) - _check_flows(flows) - if len(flows) != (len(row_charges) + len(column_charges)): - raise ValueError( - "`len(flows)` is different from `len(row_charges) + len(column_charges)`" - ) - #get the unique column-charges - #we only care about their degeneracies, not their order; that's much faster - #to compute since we don't have to fuse all charges explicitly - #`compute_fused_charge_degeneracies` multiplies flows into the column_charges - unique_column_charges = compute_unique_fused_charges(column_charges, - column_flows) - - unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) - #get the charges common to rows and columns (only those matter) - fused = unique_row_charges + unique_column_charges - li, ri = np.divmod( - np.nonzero(fused == unique_column_charges.identity_charges)[0], - len(unique_column_charges)) - if ((row_strides is None) and - (column_strides is not None)) or ((row_strides is not None) and - (column_strides is None)): - raise ValueError("`row_strides` and `column_strides` " - "have to be passed simultaneously." - " Found `row_strides={}` and " - "`column_strides={}`".format(row_strides, column_strides)) - if row_strides is not None: - row_locations = find_dense_positions( - charges=row_charges, - flows=row_flows, - target_charges=unique_row_charges[li], - strides=row_strides) - - else: - column_dim = np.prod([len(c) for c in column_charges]) - row_locations = find_dense_positions( - charges=row_charges, - flows=row_flows, - target_charges=unique_row_charges[li]) - for v in row_locations.values(): - v *= column_dim - if column_strides is not None: - column_locations = find_dense_positions( - charges=column_charges, - flows=column_flows, - target_charges=unique_column_charges[ri], - strides=column_strides, - store_dual=True) - - else: - column_locations = find_dense_positions( - charges=column_charges, - flows=column_flows, - target_charges=unique_column_charges[ri], - store_dual=True) - blocks = [] - for c in unique_row_charges[li]: - #numpy broadcasting is substantially faster than kron! - rlocs = np.expand_dims(row_locations[c], 1) - clocs = np.expand_dims(column_locations[c], 0) - inds = np.reshape(rlocs + clocs, rlocs.shape[0] * clocs.shape[1]) - blocks.append([inds, (rlocs.shape[0], clocs.shape[1])]) - return unique_row_charges[li], blocks - - -# def find_sparse_positions_2( -# charges: List[Union[BaseCharge, ChargeCollection]], -# flows: List[Union[int, bool]], -# target_charges: Union[BaseCharge, ChargeCollection]) -> Dict: -# """ -# Find the sparse locations of elements (i.e. the index-values within -# the SPARSE tensor) in the vector `fused_charges` (resulting from -# fusing `left_charges` and `right_charges`) -# that have a value of `target_charges`, assuming that all elements -# different from `target_charges` are `0`. -# For example, given -# ``` -# left_charges = [-2,0,1,0,0] -# right_charges = [-1,0,2,1] -# target_charges = [0,1] -# fused_charges = fuse_charges([left_charges, right_charges],[1,1]) -# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] -# ``` 0 1 2 3 4 5 6 7 8 -# we want to find the all different blocks -# that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, -# together with their corresponding sparse index-values of the data in the sparse array, -# assuming that all elements in `fused_charges` different from `target_charges` are 0. - -# `find_sparse_blocks` returns a dict mapping integers `target_charge` -# to an array of integers denoting the sparse locations of elements within -# `fused_charges`. -# For the above example, we get: -# * `target_charge=0`: [0,1,3,5,7] -# * `target_charge=1`: [2,4,6,8] -# Args: -# left_charges: An np.ndarray of integer charges. -# left_flow: The flow direction of the left charges. -# right_charges: An np.ndarray of integer charges. -# right_flow: The flow direction of the right charges. -# target_charge: The target charge. -# Returns: -# dict: Mapping integers to np.ndarray of integers. -# """ -# #FIXME: this is probably still not optimal - -# _check_flows(flows) -# if len(charges) == 1: -# fused_charges = charges[0] * flows[0] -# unique_charges = fused_charges.unique() -# target_charges = target_charges.unique() -# relevant_target_charges = unique_charges.intersect(target_charges) -# relevant_fused_charges = fused_charges[fused_charges.isin( -# relevant_target_charges)] -# return { -# c: np.nonzero(relevant_fused_charges == c)[0] -# for c in relevant_target_charges -# } - -# left_charges, right_charges, partition = _find_best_partition(charges, flows) - -# unique_target_charges, inds = target_charges.unique(return_index=True) -# target_charges = target_charges[np.sort(inds)] - -# unique_left = left_charges.unique() -# unique_right = right_charges.unique() -# fused = unique_left + unique_right - -# #compute all unique charges that can add up to -# #target_charges -# left_inds, right_inds = [], [] -# for target_charge in target_charges: -# li, ri = np.divmod(np.nonzero(fused == target_charge)[0], len(unique_right)) -# left_inds.append(li) -# right_inds.append(ri) - -# #now compute the relevant unique left and right charges -# unique_left_charges = unique_left[np.unique(np.concatenate(left_inds))] -# unique_right_charges = unique_right[np.unique(np.concatenate(right_inds))] - -# #only keep those charges that are relevant -# relevant_left_charges = left_charges[left_charges.isin(unique_left_charges)] -# relevant_right_charges = right_charges[right_charges.isin( -# unique_right_charges)] - -# unique_right_charges, right_dims = relevant_right_charges.unique( -# return_counts=True) -# right_degeneracies = dict(zip(unique_right_charges, right_dims)) -# #generate a degeneracy vector which for each value r in relevant_right_charges -# #holds the corresponding number of non-zero elements `relevant_right_charges` -# #that can add up to `target_charges`. -# degeneracy_vector = np.empty(len(relevant_left_charges), dtype=np.int64) -# right_indices = {} - -# for n in range(len(unique_left_charges)): -# left_charge = unique_left_charges[n] -# total_charge = left_charge + unique_right_charges -# total_degeneracy = np.sum(right_dims[total_charge.isin(target_charges)]) -# tmp_relevant_right_charges = relevant_right_charges[ -# relevant_right_charges.isin((target_charges + left_charge * (-1)))] - -# for n in range(len(target_charges)): -# target_charge = target_charges[n] -# right_indices[(left_charge.get_item(0), -# target_charge.get_item(0))] = np.nonzero( -# tmp_relevant_right_charges == ( -# target_charge + left_charge * (-1)))[0] - -# degeneracy_vector[relevant_left_charges == left_charge] = total_degeneracy - -# start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector -# blocks = {t: [] for t in target_charges} -# # iterator returns tuple of `int` for ChargeCollection objects -# # and `int` for Ba seCharge objects (both hashable) -# for left_charge in unique_left_charges: -# a = np.expand_dims(start_positions[relevant_left_charges == left_charge], 0) -# for target_charge in target_charges: -# ri = right_indices[(left_charge, target_charge)] -# if len(ri) != 0: -# b = np.expand_dims(ri, 1) -# tmp = a + b -# blocks[target_charge].append(np.reshape(tmp, np.prod(tmp.shape))) -# out = {} -# for target_charge in target_charges: -# out[target_charge] = np.concatenate(blocks[target_charge]) -# return out - - -def _compute_sparse_lookups(row_charges: BaseCharge, row_flows, column_charges, - column_flows): - """ - Compute lookup tables for looking up how dense index positions map - to sparse index positions for the diagonal blocks a symmetric matrix. - Args: - row_charges: - - """ - column_flows = list(-np.asarray(column_flows)) - fused_column_charges = fuse_charges(column_charges, column_flows) - fused_row_charges = fuse_charges(row_charges, row_flows) - unique_column_charges, column_inverse = fused_column_charges.unique( - return_inverse=True) - unique_row_charges, row_inverse = fused_row_charges.unique( - return_inverse=True) - common_charges, comm_row, comm_col = unique_row_charges.intersect( - unique_column_charges, return_indices=True) - - col_ind_sort = np.argsort(column_inverse, kind='stable') - row_ind_sort = np.argsort(row_inverse, kind='stable') - _, col_charge_degeneracies = compute_fused_charge_degeneracies( - column_charges, column_flows) - _, row_charge_degeneracies = compute_fused_charge_degeneracies( - row_charges, row_flows) - # labelsorted_indices = column_inverse[col_ind_sort] - # tmp = np.nonzero( - # np.append(labelsorted_indices, unique_column_charges.charges.shape[0] + 1) - - # np.append(labelsorted_indices[0], labelsorted_indices))[0] - #charge_degeneracies = tmp - np.append(0, tmp[0:-1]) - - col_start_positions = np.cumsum(np.append(0, col_charge_degeneracies)) - row_start_positions = np.cumsum(np.append(0, row_charge_degeneracies)) - column_lookup = np.empty(len(fused_column_charges), dtype=np.uint32) - row_lookup = np.zeros(len(fused_row_charges), dtype=np.uint32) - for n in range(len(common_charges)): - column_lookup[col_ind_sort[col_start_positions[ - comm_col[n]]:col_start_positions[comm_col[n] + 1]]] = np.arange( - col_charge_degeneracies[comm_col[n]]) - row_lookup[ - row_ind_sort[row_start_positions[comm_row[n]]:row_start_positions[ - comm_row[n] + 1]]] = col_charge_degeneracies[comm_col[n]] - - return np.append(0, np.cumsum(row_lookup[0:-1])), column_lookup - - -def _get_stride_arrays(dims): - strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) - return [np.arange(dims[n]) * strides[n] for n in range(len(dims))] - - -def reduce_charges(charges: List[BaseCharge], - flows: Iterable[bool], - target_charges: np.ndarray, - return_locations: Optional[bool] = False, - strides: Optional[np.ndarray] = None - ) -> Tuple[BaseCharge, np.ndarray]: - """ - Add quantum numbers arising from combining two or more charges into a - single index, keeping only the quantum numbers that appear in 'target_charges'. - Equilvalent to using "combine_charges" followed by "reduce", but is - generally much more efficient. - Args: - charges (List[SymIndex]): list of SymIndex. - flows (np.ndarray): vector of bools describing index orientations. - target_charges (np.ndarray): n-by-m array describing qauntum numbers of the - qnums which should be kept with 'n' the number of symmetries. - return_locations (bool, optional): if True then return the location of the kept - values of the fused charges - strides (np.ndarray, optional): index strides with which to compute the - return_locations of the kept elements. Defaults to trivial strides (based on - row major order) if ommitted. - Returns: - SymIndex: the fused index after reduction. - np.ndarray: locations of the fused SymIndex qnums that were kept. - """ - - num_inds = len(charges) - tensor_dims = [len(c) for c in charges] - - if len(charges) == 1: - # reduce single index - if strides is None: - strides = np.array([1], dtype=np.uint32) - return charges[0].dual(flows[0]).reduce( - target_charges, return_locations=return_locations, strides=strides[0]) - - else: - # find size-balanced partition of charges - partition = _find_best_partition(tensor_dims) - - # compute quantum numbers for each partition - left_ind = fuse_charges(charges[:partition], flows[:partition]) - right_ind = fuse_charges(charges[partition:], flows[partition:]) - - # compute combined qnums - comb_qnums = fuse_ndarray_charges(left_ind.unique_charges, - right_ind.unique_charges, - charges[0].charge_types) - [unique_comb_qnums, comb_labels] = np.unique( - comb_qnums, return_inverse=True, axis=1) - num_unique = unique_comb_qnums.shape[1] - - # intersect combined qnums and target_charges - reduced_qnums, label_to_unique, label_to_kept = intersect( - unique_comb_qnums, target_charges, axis=1, return_indices=True) - map_to_kept = -np.ones(num_unique, dtype=np.int16) - for n in range(len(label_to_unique)): - map_to_kept[label_to_unique[n]] = n - new_comb_labels = map_to_kept[comb_labels].reshape( - [left_ind.num_unique, right_ind.num_unique]) - if return_locations: - if strides is not None: - # computed locations based on non-trivial strides - row_pos = fuse_stride_arrays(tensor_dims[:partition], strides[:partition]) - col_pos = fuse_stride_arrays(tensor_dims[partition:], strides[partition:]) - - # reduce combined qnums to include only those in target_charges - reduced_rows = [0] * left_ind.num_unique - row_locs = [0] * left_ind.num_unique - for n in range(left_ind.num_unique): - temp_label = new_comb_labels[n, right_ind.charge_labels] - temp_keep = temp_label >= 0 - reduced_rows[n] = temp_label[temp_keep] - row_locs[n] = col_pos[temp_keep] - - reduced_labels = np.concatenate( - [reduced_rows[n] for n in left_ind.charge_labels]) - reduced_locs = np.concatenate([ - row_pos[n] + row_locs[left_ind.charge_labels[n]] - for n in range(left_ind.dim) - ]) - obj = charges[0].__new__(type(charges[0])) - obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) - return obj, reduced_locs - - else: # trivial strides - # reduce combined qnums to include only those in target_charges - reduced_rows = [0] * left_ind.num_unique - row_locs = [0] * left_ind.num_unique - for n in range(left_ind.num_unique): - temp_label = new_comb_labels[n, right_ind.charge_labels] - temp_keep = temp_label >= 0 - reduced_rows[n] = temp_label[temp_keep] - row_locs[n] = np.where(temp_keep)[0] - - reduced_labels = np.concatenate( - [reduced_rows[n] for n in left_ind.charge_labels]) - reduced_locs = np.concatenate([ - n * right_ind.dim + row_locs[left_ind.charge_labels[n]] - for n in range(left_ind.dim) - ]) - obj = charges[0].__new__(type(charges[0])) - obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) - - return obj, reduced_locs - - else: - # reduce combined qnums to include only those in target_charges - reduced_rows = [0] * left_ind.num_unique - for n in range(left_ind.num_unique): - temp_label = new_comb_labels[n, right_ind.charge_labels] - reduced_rows[n] = temp_label[temp_label >= 0] - - reduced_labels = np.concatenate( - [reduced_rows[n] for n in left_ind.charge_labels]) - obj = charges[0].__new__(type(charges[0])) - obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types) - - return obj - - -def reduce_to_target_charges(charges: List[BaseCharge], - flows: List[Union[int, bool]], - target_charges: BaseCharge, - strides: Optional[np.ndarray] = None, - return_positions: Optional[bool] = False - ) -> np.ndarray: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector of `fused_charges` resulting from fusing all elements of `charges` - that have a value of `target_charge`. - For example, given - ``` - charges = [[-2,0,1,0,0],[-1,0,2,1]] - target_charge = 0 - fused_charges = fuse_charges(charges,[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the index-positions of charges - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - within the dense array. As one additional wrinkle, `charges` - is a subset of the permuted charges of a tensor with rank R > len(charges), - and `stride_arrays` are their corresponding range of strides, i.e. - - ``` - R=5 - D = [2,3,4,5,6] - tensor_flows = np.random.randint(-1,2,R) - tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] - order = np.arange(R) - np.random.shuffle(order) - tensor_strides = [360, 120, 30, 6, 1] - - charges = [tensor_charges[order[n]] for n in range(3)] - flows = [tensor_flows[order[n]] for n in range(len(3))] - strides = [tensor_stride[order[n]] for n in range(3)] - _ = _find_transposed_dense_positions(charges, flows, 0, strides) - - ``` - `_find_transposed_dense_blocks` returns an np.ndarray containing the - index-positions of these elements calculated using `stride_arrays`. - The result only makes sense in conjuction with the complementary - data computed from the complementary - elements in`tensor_charges`, - `tensor_strides` and `tensor_flows`. - This routine is mainly used in `_find_diagonal_dense_blocks`. - - Args: - charges: A list of BaseCharge or ChargeCollection. - flows: The flow directions of the `charges`. - target_charge: The target charge. - strides: The strides for the `charges` subset. - if `None`, natural stride ordering is assumed. - - Returns: - np.ndarray: The index-positions within the dense data array - of the elements fusing to `target_charge`. - """ - - _check_flows(flows) - if len(charges) == 1: - fused_charges = charges[0] * flows[0] - unique, inverse = fused_charges.unique(return_inverse=True) - common, label_to_unique, label_to_target = unique.intersect( - target_charges, return_indices=True) - inds = np.nonzero(np.isin(inverse, label_to_unique))[0] - if strides is not None: - permuted_inds = strides[0] * np.arange(len(charges[0])) - if return_positions: - return fused_charges[permuted_inds[inds]], inds - return fused_charges[permuted_inds[inds]] - - if return_positions: - return fused_charges[inds], inds - return fused_charges[inds] - - partition = _find_best_partition([len(c) for c in charges]) - left_charges = fuse_charges(charges[:partition], flows[:partition]) - right_charges = fuse_charges(charges[partition:], flows[partition:]) - - # unique_target_charges, inds = target_charges.unique(return_index=True) - # target_charges = target_charges[np.sort(inds)] - unique_left, left_inverse = left_charges.unique(return_inverse=True) - unique_right, right_inverse = right_charges.unique(return_inverse=True) - - fused = unique_left + unique_right - unique_fused, unique_fused_labels = fused.unique(return_inverse=True) - - relevant_charges, relevant_labels, _ = unique_fused.intersect( - target_charges, return_indices=True) - - tmp = np.full(len(unique_fused), fill_value=-1, dtype=np.int16) - tmp[relevant_labels] = np.arange(len(relevant_labels), dtype=np.int16) - lookup_target = tmp[unique_fused_labels].reshape( - [len(unique_left), len(unique_right)]) - - if return_positions: - if strides is not None: - stride_arrays = [ - np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) - ] - permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) - permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) - - row_locations = [None] * len(unique_left) - final_relevant_labels = [None] * len(unique_left) - for n in range(len(unique_left)): - labels = lookup_target[n, right_inverse] - lookup = labels >= 0 - row_locations[n] = permuted_right_inds[lookup] - final_relevant_labels[n] = labels[lookup] - - charge_labels = np.concatenate( - [final_relevant_labels[n] for n in left_inverse]) - tmp_inds = [ - permuted_left_inds[n] + row_locations[left_inverse[n]] - for n in range(len(left_charges)) - ] - try: - inds = np.concatenate(tmp_inds) - except ValueError: - inds = np.asarray(tmp_inds) - - else: - row_locations = [None] * len(unique_left) - final_relevant_labels = [None] * len(unique_left) - for n in range(len(unique_left)): - labels = lookup_target[n, right_inverse] - lookup = labels >= 0 - row_locations[n] = np.nonzero(lookup)[0] - final_relevant_labels[n] = labels[lookup] - charge_labels = np.concatenate( - [final_relevant_labels[n] for n in left_inverse]) - - inds = np.concatenate([ - n * len(right_charges) + row_locations[left_inverse[n]] - for n in range(len(left_charges)) - ]) - obj = charges[0].__new__(type(charges[0])) - obj.__init__(relevant_charges.unique_charges, charge_labels, - charges[0].charge_types) - return obj, inds - - else: - final_relevant_labels = [None] * len(unique_left) - for n in range(len(unique_left)): - labels = lookup_target[n, right_inverse] - lookup = labels >= 0 - final_relevant_labels[n] = labels[lookup] - charge_labels = np.concatenate( - [final_relevant_labels[n] for n in left_inverse]) - obj = charges[0].__new__(type(charges[0])) - obj.__init__(relevant_charges.unique_charges, charge_labels, - charges[0].charge_types) - return obj - - -def find_sparse_positions_new(charges: List[BaseCharge], - flows: List[Union[int, bool]], - target_charges: BaseCharge, - strides: Optional[np.ndarray] = None, - store_dual: Optional[bool] = False) -> np.ndarray: - """ - Find the dense locations of elements (i.e. the index-values within the DENSE tensor) - in the vector of `fused_charges` resulting from fusing all elements of `charges` - that have a value of `target_charge`. - For example, given - ``` - charges = [[-2,0,1,0,0],[-1,0,2,1]] - target_charge = 0 - fused_charges = fuse_charges(charges,[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` - we want to find the index-positions of charges - that fuse to `target_charge=0`, i.e. where `fused_charges==0`, - within the dense array. As one additional wrinkle, `charges` - is a subset of the permuted charges of a tensor with rank R > len(charges), - and `stride_arrays` are their corresponding range of strides, i.e. - - ``` - R=5 - D = [2,3,4,5,6] - tensor_flows = np.random.randint(-1,2,R) - tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] - order = np.arange(R) - np.random.shuffle(order) - tensor_strides = [360, 120, 30, 6, 1] - - charges = [tensor_charges[order[n]] for n in range(3)] - flows = [tensor_flows[order[n]] for n in range(len(3))] - strides = [tensor_stride[order[n]] for n in range(3)] - _ = _find_transposed_dense_positions(charges, flows, 0, strides) - - ``` - `_find_transposed_dense_blocks` returns an np.ndarray containing the - index-positions of these elements calculated using `stride_arrays`. - The result only makes sense in conjuction with the complementary - data computed from the complementary - elements in`tensor_charges`, - `tensor_strides` and `tensor_flows`. - This routine is mainly used in `_find_diagonal_dense_blocks`. - - Args: - charges: A list of BaseCharge or ChargeCollection. - flows: The flow directions of the `charges`. - target_charge: The target charge. - strides: The strides for the `charges` subset. - if `None`, natural stride ordering is assumed. - - Returns: - np.ndarray: The index-positions within the dense data array - of the elements fusing to `target_charge`. - """ - - _check_flows(flows) - if len(charges) == 1: - fused_charges = charges[0] * flows[0] - unique, inverse = fused_charges.unique(return_inverse=True) - common, label_to_unique, label_to_target = unique.intersect( - target_charges, return_indices=True) - inds = np.nonzero(np.isin(inverse, label_to_unique))[0] - if strides is not None: - permuted_inds = strides[0] * np.arange(len(charges[0])) - return fused_charges[permuted_inds[inds]], inds - - return fused_charges[inds], inds - - partition = _find_best_partition([len(c) for c in charges]) - left_charges = fuse_charges(charges[:partition], flows[:partition]) - right_charges = fuse_charges(charges[partition:], flows[partition:]) - - # unique_target_charges, inds = target_charges.unique(return_index=True) - # target_charges = target_charges[np.sort(inds)] - unique_left, left_inverse = left_charges.unique(return_inverse=True) - unique_right, right_inverse, right_degens = right_charges.unique( - return_inverse=True, return_counts=True) - - fused = unique_left + unique_right - - unique_fused, labels_fused = fused.unique(return_inverse=True) - - relevant_charges, label_to_unique_fused, label_to_target = unique_fused.intersect( - target_charges, return_indices=True) - - relevant_fused_positions = np.nonzero( - np.isin(labels_fused, label_to_unique_fused))[0] - relevant_left_labels, relevant_right_labels = np.divmod( - relevant_fused_positions, len(unique_right)) - rel_l_labels = np.unique(relevant_left_labels) - total_degen = { - t: np.sum(right_degens[relevant_right_labels[relevant_left_labels == t]]) - for t in rel_l_labels - } - - relevant_left_inverse = left_inverse[np.isin(left_inverse, rel_l_labels)] - degeneracy_vector = np.empty(len(relevant_left_inverse), dtype=np.uint32) - row_locations = [None] * len(unique_left) - final_relevant_labels = [None] * len(unique_left) - for n in range(len(relevant_left_labels)): - degeneracy_vector[relevant_left_inverse == - relevant_left_labels[n]] = total_degen[ - relevant_left_labels[n]] - start_positions = np.cumsum(degeneracy_vector) - degeneracy_vector - tmp = np.full(len(unique_fused), fill_value=-1, dtype=np.int16) - tmp[label_to_unique_fused] = np.arange( - len(label_to_unique_fused), dtype=np.int16) - lookup_target = tmp[labels_fused].reshape( - [len(unique_left), len(unique_right)]) - - final_relevant_labels = [None] * len(unique_left) - for n in range(len(rel_l_labels)): - labels = lookup_target[rel_l_labels[n], right_inverse] - lookup = labels >= 0 - final_relevant_labels[rel_l_labels[n]] = labels[lookup] - charge_labels = np.concatenate( - [final_relevant_labels[n] for n in relevant_left_inverse]) - inds = np.concatenate([ - start_positions[n] + - np.arange(total_degen[relevant_left_inverse[n]], dtype=np.uint32) - for n in range(len(relevant_left_inverse)) - ]) - - return relevant_charges[charge_labels], inds - - -def find_sparse_positions(charges: List[BaseCharge], - flows: List[Union[int, bool]], - target_charges: BaseCharge) -> Dict: - """ - Find the sparse locations of elements (i.e. the index-values within - the SPARSE tensor) in the vector `fused_charges` (resulting from - fusing `left_charges` and `right_charges`) - that have a value of `target_charges`, assuming that all elements - different from `target_charges` are `0`. - For example, given - ``` - left_charges = [-2,0,1,0,0] - right_charges = [-1,0,2,1] - target_charges = [0,1] - fused_charges = fuse_charges([left_charges, right_charges],[1,1]) - print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] - ``` 0 1 2 3 4 5 6 7 8 - we want to find the all different blocks - that fuse to `target_charges=[0,1]`, i.e. where `fused_charges==0` or `1`, - together with their corresponding sparse index-values of the data in the sparse array, - assuming that all elements in `fused_charges` different from `target_charges` are 0. - - `find_sparse_blocks` returns a dict mapping integers `target_charge` - to an array of integers denoting the sparse locations of elements within - `fused_charges`. - For the above example, we get: - * `target_charge=0`: [0,1,3,5,7] - * `target_charge=1`: [2,4,6,8] - Args: - charges: An np.ndarray of integer charges. - flows: The flow direction of the left charges. - target_charges: The target charges. - Returns: - dict: Mapping integers to np.ndarray of integers. - """ - _check_flows(flows) - if len(charges) == 1: - fused_charges = charges[0] * flows[0] - unique_charges = fused_charges.unique() - target_charges = target_charges.unique() - relevant_target_charges = unique_charges.intersect(target_charges) - relevant_fused_charges = fused_charges[fused_charges.isin( - relevant_target_charges)] - return { - c: np.nonzero(relevant_fused_charges == c)[0] - for c in relevant_target_charges - } - partition = _find_best_partition([len(c) for c in charges]) - left_charges = fuse_charges(charges[:partition], flows[:partition]) - right_charges = fuse_charges(charges[partition:], flows[partition:]) - - # unique_target_charges, inds = target_charges.unique(return_index=True) - # target_charges = target_charges[np.sort(inds)] - unique_left, left_inverse = left_charges.unique(return_inverse=True) - unique_right, right_inverse, right_dims = right_charges.unique( - return_inverse=True, return_counts=True) - - fused_unique = unique_left + unique_right - unique_inds = np.nonzero(fused_unique == target_charges) - relevant_positions = unique_inds[0].astype(np.uint32) - tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, - len(unique_right)) - - relevant_unique_left_inds = np.unique(tmp_inds_left) - left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) - left_lookup[relevant_unique_left_inds] = np.arange( - len(relevant_unique_left_inds)) - relevant_unique_right_inds = np.unique(tmp_inds_right) - right_lookup = np.empty( - np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) - right_lookup[relevant_unique_right_inds] = np.arange( - len(relevant_unique_right_inds)) - - left_charge_labels = np.nonzero( - np.expand_dims(left_inverse, 1) == np.expand_dims( - relevant_unique_left_inds, 0)) - relevant_left_inverse = np.arange(len(left_charge_labels[0])) - - right_charge_labels = np.expand_dims(right_inverse, 1) == np.expand_dims( - relevant_unique_right_inds, 0) - right_block_information = {} - for n in relevant_unique_left_inds: - ri = np.nonzero((unique_left[n] + unique_right).isin(target_charges))[0] - tmp_inds = np.nonzero(right_charge_labels[:, right_lookup[ri]]) - right_block_information[n] = [ri, np.arange(len(tmp_inds[0])), tmp_inds[1]] - - relevant_right_inverse = np.arange(len(right_charge_labels[0])) - - #generate a degeneracy vector which for each value r in relevant_right_charges - #holds the corresponding number of non-zero elements `relevant_right_charges` - #that can add up to `target_charges`. - degeneracy_vector = np.empty(len(left_charge_labels[0]), dtype=np.uint32) - for n in range(len(relevant_unique_left_inds)): - degeneracy_vector[relevant_left_inverse[ - left_charge_labels[1] == n]] = np.sum(right_dims[tmp_inds_right[ - tmp_inds_left == relevant_unique_left_inds[n]]]) - - start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( - np.uint32) - out = {} - for n in range(len(target_charges)): - block = [] - if len(unique_inds) > 1: - lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], - len(unique_right)) - else: - lis, ris = np.divmod(unique_inds[0], len(unique_right)) - - for m in range(len(lis)): - ri_tmp, arange, tmp_inds = right_block_information[lis[m]] - block.append( - np.add.outer( - start_positions[relevant_left_inverse[left_charge_labels[1] == - left_lookup[lis[m]]]], - arange[tmp_inds == np.nonzero( - ri_tmp == ris[m])[0]]).ravel().astype(np.uint32)) - out[target_charges[n]] = np.concatenate(block) - return out - - -# def find_dense_positions(charges: List[Union[BaseCharge, ChargeCollection]], -# flows: List[Union[int, bool]], -# target_charges: Union[BaseCharge, ChargeCollection], -# strides: Optional[np.ndarray] = None, -# store_dual: Optional[bool] = False) -> Dict: -# """ -# Find the dense locations of elements (i.e. the index-values within the DENSE tensor) -# in the vector of `fused_charges` resulting from fusing all elements of `charges` -# that have a value of `target_charge`. -# For example, given -# ``` -# charges = [[-2,0,1,0,0],[-1,0,2,1]] -# target_charge = 0 -# fused_charges = fuse_charges(charges,[1,1]) -# print(fused_charges) # [-3,-2,0,-1,-1,0,2,1,0,1,3,2,-1,0,2,1,-1,0,2,1] -# ``` -# we want to find the index-positions of charges -# that fuse to `target_charge=0`, i.e. where `fused_charges==0`, -# within the dense array. As one additional wrinkle, `charges` -# is a subset of the permuted charges of a tensor with rank R > len(charges), -# and `stride_arrays` are their corresponding range of strides, i.e. - -# ``` -# R=5 -# D = [2,3,4,5,6] -# tensor_flows = np.random.randint(-1,2,R) -# tensor_charges = [np.random.randing(-5,5,D[n]) for n in range(R)] -# order = np.arange(R) -# np.random.shuffle(order) -# tensor_strides = [360, 120, 30, 6, 1] - -# charges = [tensor_charges[order[n]] for n in range(3)] -# flows = [tensor_flows[order[n]] for n in range(len(3))] -# strides = [tensor_stride[order[n]] for n in range(3)] -# _ = _find_transposed_dense_positions(charges, flows, 0, strides) - -# ``` -# `_find_transposed_dense_blocks` returns an np.ndarray containing the -# index-positions of these elements calculated using `stride_arrays`. -# The result only makes sense in conjuction with the complementary -# data computed from the complementary -# elements in`tensor_charges`, -# `tensor_strides` and `tensor_flows`. -# This routine is mainly used in `_find_diagonal_dense_blocks`. - -# Args: -# charges: A list of BaseCharge or ChargeCollection. -# flows: The flow directions of the `charges`. -# target_charge: The target charge. -# strides: The strides for the `charges` subset. -# if `None`, natural stride ordering is assumed. - -# Returns: -# dict -# """ - -# _check_flows(flows) -# out = {} -# if store_dual: -# store_charges = target_charges * (-1) -# else: -# store_charges = target_charges - -# if len(charges) == 1: -# fused_charges = charges[0] * flows[0] -# inds = np.nonzero(fused_charges == target_charges) -# if len(target_charges) > 1: -# for n in range(len(target_charges)): -# i = inds[0][inds[1] == n] -# if len(i) == 0: -# continue -# if strides is not None: -# permuted_inds = strides[0] * np.arange(len(charges[0])) -# out[store_charges.get_item(n)] = permuted_inds[i] -# else: -# out[store_charges.get_item(n)] = i -# return out -# else: -# if strides is not None: -# permuted_inds = strides[0] * np.arange(len(charges[0])) -# out[store_charges.get_item(n)] = permuted_inds[inds[0]] -# else: -# out[store_charges.get_item(n)] = inds[0] -# return out - -# partition = _find_best_partition([len(c) for c in charges]) -# left_charges = fuse_charges(charges[:partition], flows[:partition]) -# right_charges = fuse_charges(charges[partition:], flows[partition:]) -# if strides is not None: -# stride_arrays = [ -# np.arange(len(charges[n])) * strides[n] for n in range(len(charges)) -# ] -# permuted_left_inds = fuse_ndarrays(stride_arrays[0:partition]) -# permuted_right_inds = fuse_ndarrays(stride_arrays[partition:]) - -# # unique_target_charges, inds = target_charges.unique(return_index=True) -# # target_charges = target_charges[np.sort(inds)] -# unique_left, left_inverse = left_charges.unique(return_inverse=True) -# unique_right, right_inverse = right_charges.unique(return_inverse=True) - -# fused_unique = unique_left + unique_right -# unique_inds = np.nonzero(fused_unique == target_charges) - -# relevant_positions = unique_inds[0] -# tmp_inds_left, tmp_inds_right = np.divmod(relevant_positions, -# len(unique_right)) - -# relevant_unique_left_inds = np.unique(tmp_inds_left) -# left_lookup = np.empty(np.max(relevant_unique_left_inds) + 1, dtype=np.uint32) -# left_lookup[relevant_unique_left_inds] = np.arange( -# len(relevant_unique_left_inds)) -# relevant_unique_right_inds = np.unique(tmp_inds_right) -# right_lookup = np.empty( -# np.max(relevant_unique_right_inds) + 1, dtype=np.uint32) -# right_lookup[relevant_unique_right_inds] = np.arange( -# len(relevant_unique_right_inds)) - -# left_charge_labels = np.nonzero( -# np.expand_dims(left_inverse, 1) == np.expand_dims( -# relevant_unique_left_inds, 0)) -# right_charge_labels = np.nonzero( -# np.expand_dims(right_inverse, 1) == np.expand_dims( -# relevant_unique_right_inds, 0)) - -# len_right = len(right_charges) - -# for n in range(len(target_charges)): -# if len(unique_inds) > 1: -# lis, ris = np.divmod(unique_inds[0][unique_inds[1] == n], -# len(unique_right)) -# else: -# lis, ris = np.divmod(unique_inds[0], len(unique_right)) -# dense_positions = [] -# left_positions = [] -# lookup = [] -# for m in range(len(lis)): -# li = lis[m] -# ri = ris[m] -# dense_left_positions = (left_charge_labels[0][ -# left_charge_labels[1] == left_lookup[li]]).astype(np.uint32) -# dense_right_positions = (right_charge_labels[0][ -# right_charge_labels[1] == right_lookup[ri]]).astype(np.uint32) -# if strides is None: -# positions = np.expand_dims(dense_left_positions * len_right, -# 1) + np.expand_dims(dense_right_positions, 0) -# else: -# positions = np.expand_dims( -# permuted_left_inds[dense_left_positions], 1) + np.expand_dims( -# permuted_right_inds[dense_right_positions], 0) - -# dense_positions.append(positions) -# left_positions.append(dense_left_positions) -# lookup.append( -# np.stack([ -# np.arange(len(dense_left_positions), dtype=np.uint32), -# np.full(len(dense_left_positions), fill_value=m, dtype=np.uint32) -# ], -# axis=1)) - -# if len(lookup) > 0: -# ind_sort = np.argsort(np.concatenate(left_positions)) -# it = np.concatenate(lookup, axis=0) -# table = it[ind_sort, :] -# out[store_charges.get_item(n)] = np.concatenate([ -# dense_positions[table[n, 1]][table[n, 0], :].astype(np.uint32) -# for n in range(table.shape[0]) -# ]) -# else: -# out[store_charges.get_item(n)] = np.array([]) - -# return out - -# def _find_diagonal_sparse_blocks_old(charges: List[BaseCharge], -# flows: List[Union[bool, int]], -# partition: int) -> Tuple[BaseCharge, List]: -# """ -# Given the meta data and underlying data of a symmetric matrix, compute -# all diagonal blocks and return them in a dict. -# `row_charges` and `column_charges` are lists of np.ndarray. The tensor -# is viewed as a matrix with rows given by fusing `row_charges` and -# columns given by fusing `column_charges`. - -# Args: -# charges: A list of charges. -# flows: A list of flows. -# partition: The location of the partition of `charges` into rows and colums. -# Returns: -# return common_charges, blocks, start_positions, row_locations, column_degeneracies -# List[Union[BaseCharge, ChargeCollection]]: A list of unique charges, one per block. -# List[np.ndarray]: A list containing the blocks. -# """ -# _check_flows(flows) -# if len(flows) != len(charges): -# raise ValueError("`len(flows)` is different from `len(charges)`") -# row_charges = charges[:partition] -# row_flows = flows[:partition] -# column_charges = charges[partition:] -# column_flows = flows[partition:] - -# #get the unique column-charges -# #we only care about their degeneracies, not their order; that's much faster -# #to compute since we don't have to fuse all charges explicitly -# #`compute_fused_charge_degeneracies` multiplies flows into the column_charges -# unique_column_charges, column_dims = compute_fused_charge_degeneracies( -# column_charges, column_flows) -# unique_row_charges = compute_unique_fused_charges(row_charges, row_flows) -# #get the charges common to rows and columns (only those matter) -# common_charges, label_to_row, label_to_column = unique_row_charges.intersect( -# unique_column_charges * True, return_indices=True) - -# #convenience container for storing the degeneracies of each -# #column charge -# #column_degeneracies = dict(zip(unique_column_charges, column_dims)) -# #column_degeneracies = dict(zip(unique_column_charges * True, column_dims)) -# print(common_charges) -# row_locations = find_sparse_positions( -# charges=row_charges, flows=row_flows, target_charges=common_charges) - -# degeneracy_vector = np.empty( -# np.sum([len(v) for v in row_locations.values()]), dtype=np.uint32) -# #for each charge `c` in `common_charges` we generate a boolean mask -# #for indexing the positions where `relevant_column_charges` has a value of `c`. -# for c in common_charges: -# degeneracy_vector[row_locations[c]] = column_degeneracies[c] - -# start_positions = (np.cumsum(degeneracy_vector) - degeneracy_vector).astype( -# np.uint32) -# blocks = [] - -# for c in common_charges: -# #numpy broadcasting is substantially faster than kron! -# rlocs = row_locations[c] -# rlocs.sort() #sort in place (we need it again later) -# cdegs = column_degeneracies[c] -# inds = np.ravel(np.add.outer(start_positions[rlocs], np.arange(cdegs))) -# blocks.append([inds, (len(rlocs), cdegs)]) -# return common_charges, blocks From c52e843ea0ecfa2af00b3acca72f41f49c9ba709 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 15:40:27 -0500 Subject: [PATCH 192/212] cleaning up --- tensornetwork/block_tensor/block_tensor.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 9bddb7e81..add050877 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -16,12 +16,9 @@ from __future__ import division from __future__ import print_function import numpy as np -#from tensornetwork.block_tensor.lookup import lookup from tensornetwork.backends import backend_factory -# pylint: disable=line-too-long from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index -# pylint: disable=line-too-long -from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, BaseCharge, fuse_ndarray_charges, intersect +from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, fuse_ndarray_charges, intersect import numpy as np import scipy as sp import itertools From 6166ff71b93bac5a5b0475ed6c870098d307d771 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 15:41:02 -0500 Subject: [PATCH 193/212] removed _check_flows --- tensornetwork/block_tensor/block_tensor.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index add050877..7200d99ae 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -81,10 +81,6 @@ def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray: return fused_arrays -def _check_flows(flows: List[int]) -> None: - return - - def _find_best_partition(dims: Iterable[int]) -> int: """ @@ -574,7 +570,6 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: indices: List of `Index` objecst, one for each leg. """ self.indices = indices - _check_flows(self.flows) num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) if num_non_zero_elements != len(data.flat): From 41b1488a042522e80411cab6798ce498fd7a252b Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 20:13:56 -0500 Subject: [PATCH 194/212] remove a print --- tensornetwork/block_tensor/charge.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 760453b46..227ffb547 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -99,9 +99,10 @@ def __add__(self, other: "BaseCharge") -> "BaseCharge": # find new labels using broadcasting charge_labels = charge_labels[( - self.charge_labels[:, None] + np.zeros([1, len(other)], dtype=np.int16) - ).ravel(), (other.charge_labels[None, :] + - np.zeros([len(self), 1], dtype=np.int16)).ravel()] + self.charge_labels[:, None] + + np.zeros([1, len(other)], dtype=np.int16)).ravel(), ( + other.charge_labels[None, :] + + np.zeros([len(self), 1], dtype=np.int16)).ravel()] obj = self.__new__(type(self)) obj.__init__(unique_charges, charge_labels, self.charge_types) @@ -299,13 +300,11 @@ def __eq__(self, target_charges.unique_charges[:, target_charges.charge_labels], axis=1) else: - print(isinstance(target_charges, type(self))) - print(type(target_charges), type(self)) targets = np.unique(target_charges, axis=1) inds = np.nonzero( np.logical_and.reduce( - np.expand_dims(self.unique_charges, 2) == np.expand_dims( - targets, 1), + np.expand_dims(self.unique_charges, + 2) == np.expand_dims(targets, 1), axis=0))[0] return np.expand_dims(self.charge_labels, 1) == np.expand_dims(inds, 0) From 56522fdea90803a50d49ddbd56b32d1bcbe9368c Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 21:00:16 -0500 Subject: [PATCH 195/212] fix bug --- tensornetwork/block_tensor/charge.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 227ffb547..5dc5244be 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -300,6 +300,8 @@ def __eq__(self, target_charges.unique_charges[:, target_charges.charge_labels], axis=1) else: + if target_charges.ndim == 1: + target_charges = np.expand_dims(target_charges, 0) targets = np.unique(target_charges, axis=1) inds = np.nonzero( np.logical_and.reduce( From 830ad64db2d652f1028bae6a4b40ac3cb835deba Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 21:00:54 -0500 Subject: [PATCH 196/212] nothing --- tensornetwork/block_tensor/charge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index 5dc5244be..60c503765 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -289,7 +289,7 @@ def __getitem__(self, n: Union[np.ndarray, int]) -> "BaseCharge": if isinstance(n, (np.integer, int)): n = np.asarray([n]) obj = self.__new__(type(self)) - obj.__init__(self.unique_charges, self.charge_labels[n], self.charge_types) + obj.__init__(self.unique_charges, self.charge_labels[n], self.charge_types return obj def __eq__(self, From d402758513361731de1a6cb29a2ec785033e8d69 Mon Sep 17 00:00:00 2001 From: mganahl Date: Wed, 29 Jan 2020 23:47:37 -0500 Subject: [PATCH 197/212] fix big in flatten_meta_data --- tensornetwork/block_tensor/block_tensor.py | 113 +++++++++++++-------- 1 file changed, 72 insertions(+), 41 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 7200d99ae..5c7d2ed93 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -101,9 +101,9 @@ def _find_best_partition(dims: Iterable[int]) -> int: return min_ind + 1 -def compute_fused_charge_degeneracies(charges: List[BaseCharge], - flows: List[bool] - ) -> Tuple[BaseCharge, np.ndarray]: +def compute_fused_charge_degeneracies( + charges: List[BaseCharge], + flows: List[bool]) -> Tuple[BaseCharge, np.ndarray]: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`, together with their respective degeneracies @@ -125,9 +125,8 @@ def compute_fused_charge_degeneracies(charges: List[BaseCharge], # get unique charges and their degeneracies on the first leg. # We are fusing from "left" to "right". - accumulated_charges, accumulated_degeneracies = (charges[0] * - flows[0]).unique( - return_counts=True) + accumulated_charges, accumulated_degeneracies = ( + charges[0] * flows[0]).unique(return_counts=True) for n in range(1, len(charges)): leg_charges, leg_degeneracies = charges[n].unique(return_counts=True) fused_charges = accumulated_charges + leg_charges * flows[n] @@ -144,9 +143,9 @@ def compute_fused_charge_degeneracies(charges: List[BaseCharge], return accumulated_charges, accumulated_degeneracies -def compute_unique_fused_charges(charges: List[BaseCharge], - flows: List[Union[bool, int]] - ) -> Tuple[BaseCharge, np.ndarray]: +def compute_unique_fused_charges( + charges: List[BaseCharge], + flows: List[Union[bool, int]]) -> Tuple[BaseCharge, np.ndarray]: """ For a list of charges, compute all possible fused charges resulting from fusing `charges`. @@ -201,12 +200,12 @@ def compute_num_nonzero(charges: List[BaseCharge], flows: List[bool]) -> int: return np.squeeze(accumulated_degeneracies[nz_inds][0]) -def reduce_charges(charges: List[BaseCharge], - flows: Iterable[bool], - target_charges: np.ndarray, - return_locations: Optional[bool] = False, - strides: Optional[np.ndarray] = None - ) -> Tuple[BaseCharge, np.ndarray]: +def reduce_charges( + charges: List[BaseCharge], + flows: Iterable[bool], + target_charges: np.ndarray, + return_locations: Optional[bool] = False, + strides: Optional[np.ndarray] = None) -> Tuple[BaseCharge, np.ndarray]: """ Add quantum numbers arising from combining two or more charges into a single index, keeping only the quantum numbers that appear in 'target_charges'. @@ -322,9 +321,9 @@ def reduce_charges(charges: List[BaseCharge], return obj -def _find_diagonal_sparse_blocks(charges: List[BaseCharge], flows: np.ndarray, - partition: int - ) -> (np.ndarray, np.ndarray, np.ndarray): +def _find_diagonal_sparse_blocks( + charges: List[BaseCharge], flows: np.ndarray, + partition: int) -> (np.ndarray, np.ndarray, np.ndarray): """ Find the location of all non-trivial symmetry blocks from the data vector of of SymTensor (when viewed as a matrix across some prescribed index @@ -390,9 +389,9 @@ def _find_diagonal_sparse_blocks(charges: List[BaseCharge], flows: np.ndarray, # calculate mappings for the position in datavector of each block if num_blocks < 15: # faster method for small number of blocks - row_locs = np.concatenate([ - (row_ind.charge_labels == n) for n in range(num_blocks) - ]).reshape(num_blocks, row_ind.dim) + row_locs = np.concatenate( + [(row_ind.charge_labels == n) for n in range(num_blocks)]).reshape( + num_blocks, row_ind.dim) else: # faster method for large number of blocks row_locs = np.zeros([num_blocks, row_ind.dim], dtype=bool) @@ -405,9 +404,8 @@ def _find_diagonal_sparse_blocks(charges: List[BaseCharge], flows: np.ndarray, [[row_degen[row_to_block[n]], col_degen[col_to_block[n]]] for n in range(num_blocks)], dtype=np.uint32).T - block_maps = [(cumulate_num_nz[row_locs[n, :]][:, None] + - np.arange(block_dims[1, n])[None, :]).ravel() - for n in range(num_blocks)] + block_maps = [(cumulate_num_nz[row_locs[n, :]][:, None] + np.arange( + block_dims[1, n])[None, :]).ravel() for n in range(num_blocks)] obj = charges[0].__new__(type(charges[0])) obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), charges[0].charge_types) @@ -727,8 +725,8 @@ def transpose( #check for trivial permutation if np.all(order == np.arange(len(order))): return self - flat_indices, flat_charges, flat_flows, _, flat_order, _ = flatten_meta_data( - self.indices, order, 0) + flat_indices, flat_charges, flat_flows, _, flat_order = flatten_meta_data( + self.indices, order) tr_partition = _find_best_partition( [len(flat_charges[n]) for n in flat_order]) @@ -908,11 +906,11 @@ def transpose(tensor: BlockSparseTensor, return result -def tensordot(tensor1: BlockSparseTensor, - tensor2: BlockSparseTensor, - axes: Sequence[Sequence[int]], - final_order: Optional[Union[List, np.ndarray]] = None - ) -> BlockSparseTensor: +def tensordot( + tensor1: BlockSparseTensor, + tensor2: BlockSparseTensor, + axes: Sequence[Sequence[int]], + final_order: Optional[Union[List, np.ndarray]] = None) -> BlockSparseTensor: """ Contract two `BlockSparseTensor`s along `axes`. Args: @@ -977,6 +975,13 @@ def tensordot(tensor1: BlockSparseTensor, new_order1 = free_axes1 + list(axes1) new_order2 = list(axes2) + free_axes2 + contr_flat_indices_1 = [] + for n in axes1: + contr_flat_indices_1.extend(tensor1.indices[n].get_elementary_indices()) + + contr_flat_indices_2 = [] + for n in axes2: + contr_flat_indices_2.extend(tensor2.indices[n].get_elementary_indices()) #get the flattened indices for the output tensor left_indices = [] right_indices = [] @@ -984,17 +989,30 @@ def tensordot(tensor1: BlockSparseTensor, left_indices.extend(tensor1.indices[n].get_elementary_indices()) for n in free_axes2: right_indices.extend(tensor2.indices[n].get_elementary_indices()) + indices = left_indices + right_indices - _, flat_charges1, flat_flows1, flat_strides1, flat_order1, tr_partition1 = flatten_meta_data( - tensor1.indices, new_order1, len(free_axes1)) + flat_charges1 = [i.charges for i in left_indices + ] + [i.charges for i in contr_flat_indices_1] + flat_flows1 = [i.flow for i in left_indices + ] + [i.flow for i in contr_flat_indices_1] + + flat_charges2 = [i.charges for i in contr_flat_indices_2 + ] + [i.charges for i in right_indices] + + flat_flows2 = [i.flow for i in contr_flat_indices_2 + ] + [i.flow for i in right_indices] + + flat_order1 = new_flat_order(tensor1.indices, new_order1) + flat_order2 = new_flat_order(tensor2.indices, new_order2) + + tr_partition1 = len(left_indices) + tr_partition2 = len(contr_flat_indices_2) tr_sparse_blocks_1, charges1, shapes_1 = _find_transposed_diagonal_sparse_blocks( flat_charges1, flat_flows1, tr_partition1, flat_order1) - _, flat_charges2, flat_flows2, flat_strides2, flat_order2, tr_partition2 = flatten_meta_data( - tensor2.indices, new_order2, len(axes2)) tr_sparse_blocks_2, charges2, shapes_2 = _find_transposed_diagonal_sparse_blocks( flat_charges2, flat_flows2, tr_partition2, flat_order2) - #common_charges = charges1.intersect(charges2) + common_charges, label_to_common_1, label_to_common_2 = intersect( charges1.unique_charges, charges2.unique_charges, @@ -1062,14 +1080,27 @@ def tensordot(tensor1: BlockSparseTensor, return BlockSparseTensor(data=data, indices=indices) -def flatten_meta_data(indices, order, partition): +def new_flat_order(indices, order): + elementary_indices = {} + flat_elementary_indices = [] + for n in range(len(indices)): + elementary_indices[n] = indices[n].get_elementary_indices() + flat_elementary_indices.extend(elementary_indices[n]) + flat_index_list = np.arange(len(flat_elementary_indices)) + cum_num_legs = np.append( + 0, np.cumsum([len(elementary_indices[n]) for n in range(len(indices))])) + + flat_order = np.concatenate( + [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + + return flat_order + + +def flatten_meta_data(indices, order): elementary_indices = {} flat_elementary_indices = [] - new_partition = 0 for n in range(len(indices)): elementary_indices[n] = indices[n].get_elementary_indices() - if n < partition: - new_partition += len(elementary_indices[n]) flat_elementary_indices.extend(elementary_indices[n]) flat_index_list = np.arange(len(flat_elementary_indices)) cum_num_legs = np.append( @@ -1082,4 +1113,4 @@ def flatten_meta_data(indices, order, partition): flat_order = np.concatenate( [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - return flat_elementary_indices, flat_charges, flat_flows, flat_strides, flat_order, new_partition + return flat_elementary_indices, flat_charges, flat_flows, flat_strides, flat_order From 9615d105d512685883cf4cd2af91c59037886acd Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 09:43:54 -0500 Subject: [PATCH 198/212] added bunch of tests --- .../block_tensor/block_tensor_test.py | 372 ++++++------------ 1 file changed, 129 insertions(+), 243 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index 2c8ada9c0..adc77fc6a 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -8,6 +8,52 @@ np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] +def get_contractable_tensors(R1, R2, cont): + DsA = np.random.randint(5, 10, R1) + DsB = np.random.randint(5, 10, R2) + assert R1 >= cont + assert R2 >= cont + chargesA = [ + U1Charge(np.random.randint(-5, 5, DsA[n])) for n in range(R1 - cont) + ] + commoncharges = [ + U1Charge(np.random.randint(-5, 5, DsA[n + R1 - cont])) + for n in range(cont) + ] + chargesB = [ + U1Charge(np.random.randint(-5, 5, DsB[n])) for n in range(R2 - cont) + ] + #contracted indices + indsA = np.random.choice(np.arange(R1), cont, replace=False) + indsB = np.random.choice(np.arange(R2), cont, replace=False) + + flowsA = np.full(R1, False, dtype=np.bool) + flowsB = np.full(R2, False, dtype=np.bool) + flowsB[indsB] = True + + indicesA = [None for _ in range(R1)] + indicesB = [None for _ in range(R2)] + for n in range(len(indsA)): + indicesA[indsA[n]] = Index(commoncharges[n], flowsA[indsA[n]]) + indicesB[indsB[n]] = Index(commoncharges[n], flowsB[indsB[n]]) + compA = list(set(np.arange(R1)) - set(indsA)) + compB = list(set(np.arange(R2)) - set(indsB)) + + for n in range(len(compA)): + indicesA[compA[n]] = Index(chargesA[n], flowsA[compA[n]]) + for n in range(len(compB)): + indicesB[compB[n]] = Index(chargesB[n], flowsB[compB[n]]) + indices_final = [] + for n in sorted(compA): + indices_final.append(indicesA[n]) + for n in sorted(compB): + indices_final.append(indicesB[n]) + shapes = tuple([i.dim for i in indices_final]) + A = BlockSparseTensor.random(indices=indicesA) + B = BlockSparseTensor.random(indices=indicesB) + return A, B, indsA, indsB + + @pytest.mark.parametrize("dtype", np_dtypes) def test_block_sparse_init(dtype): D = 10 #bond dimension @@ -23,17 +69,16 @@ def test_block_sparse_init(dtype): Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) for n in range(rank) ] - num_elements = compute_num_nonzero([i.charges for i in indices], - [i.flow for i in indices]) + num_elements = compute_num_nonzero(charges, flows) A = BlockSparseTensor.random(indices=indices, dtype=dtype) assert A.dtype == dtype for r in range(rank): - assert A.indices[r].name == 'index{}'.format(r) + assert A.indices[r].name[0] == 'index{}'.format(r) assert A.dense_shape == tuple([D] * rank) assert len(A.data) == num_elements -def test_find_dense_positions(): +def test_reduce_charges(): left_charges = np.asarray([-2, 0, 1, 0, 0]).astype(np.int16) right_charges = np.asarray([-1, 0, 2, 1]).astype(np.int16) target_charge = np.zeros((1, 1), dtype=np.int16) @@ -49,7 +94,7 @@ def test_find_dense_positions(): def test_transpose(): R = 4 - Ds = [20, 3, 4, 5] + Ds = np.random.randint(10, 20, R) final_order = np.arange(R) np.random.shuffle(final_order) charges = [U1Charge(np.random.randint(-5, 5, Ds[n])) for n in range(R)] @@ -58,253 +103,94 @@ def test_transpose(): A = BlockSparseTensor.random(indices=indices) Adense = A.todense() dense_res = np.transpose(Adense, final_order) - A.transpose(final_order) - np.testing.assert_allclose(dense_res, A.todense()) + B = A.transpose(final_order) + np.testing.assert_allclose(dense_res, B.todense()) -def test_tensordot(): +def test_reshape(): R = 4 - DsA = [10, 12, 14, 16] - DsB = [14, 16, 18, 20] - chargesA = [U1Charge(np.random.randint(-5, 5, DsA[n])) for n in range(R // 2)] - commoncharges = [ - U1Charge(np.random.randint(-5, 5, DsA[n + R // 2])) for n in range(R // 2) - ] - chargesB = [ - U1Charge(np.random.randint(-5, 5, DsB[n + R // 2])) for n in range(R // 2) - ] - indsA = np.random.choice(np.arange(R), R // 2, replace=False) - indsB = np.random.choice(np.arange(R), R // 2, replace=False) - flowsA = np.full(R, False, dtype=np.bool) - flowsB = np.full(R, False, dtype=np.bool) + Ds = [3, 4, 5, 6] + charges = [U1Charge(np.random.randint(-5, 5, Ds[n])) for n in range(R)] + flows = np.full(R, fill_value=False, dtype=np.bool) + indices = [Index(charges[n], flows[n]) for n in range(R)] + A = BlockSparseTensor.random(indices=indices) + B = A.reshape([Ds[0] * Ds[1], Ds[2], Ds[3]]) + Adense = A.todense() + Bdense = Adense.reshape([Ds[0] * Ds[1], Ds[2], Ds[3]]) + np.testing.assert_allclose(Bdense, B.todense()) - flowsB[indsB] = True - indicesA = [None for _ in range(R)] - indicesB = [None for _ in range(R)] - for n in range(len(indsA)): - indicesA[indsA[n]] = Index(commoncharges[n], flowsA[indsA[n]]) - indicesB[indsB[n]] = Index(commoncharges[n], flowsB[indsB[n]]) - compA = list(set(np.arange(R)) - set(indsA)) - compB = list(set(np.arange(R)) - set(indsB)) - for n in range(len(compA)): - indicesA[compA[n]] = Index(chargesA[n], flowsA[compA[n]]) - indicesB[compB[n]] = Index(chargesB[n], flowsB[compB[n]]) - indices_final = [] - for n in sorted(compA): - indices_final.append(indicesA[n]) - for n in sorted(compB): - indices_final.append(indicesB[n]) - shapes = tuple([i.dimension for i in indices_final]) - A = BlockSparseTensor.random(indices=indicesA) - B = BlockSparseTensor.random(indices=indicesB) +def test_reshape_transpose(): + R = 4 + Ds = [3, 4, 5, 6] + charges = [U1Charge(np.random.randint(-5, 5, Ds[n])) for n in range(R)] + flows = np.full(R, fill_value=False, dtype=np.bool) + indices = [Index(charges[n], flows[n]) for n in range(R)] + A = BlockSparseTensor.random(indices=indices) + B = A.reshape([Ds[0] * Ds[1], Ds[2], Ds[3]]).transpose([2, 0, 1]) + dense = A.todense().reshape([Ds[0] * Ds[1], Ds[2], + Ds[3]]).transpose([2, 0, 1]) + np.testing.assert_allclose(dense, B.todense()) - final_order = np.arange(R) + +@pytest.mark.parametrize("R1, R2, cont", [(4, 4, 2), (4, 3, 3), (3, 4, 3)]) +def test_tensordot(R1, R2, cont): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont) + res = tensordot(A, B, (indsA, indsB)) + dense_res = np.tensordot(A.todense(), B.todense(), (indsA, indsB)) + np.testing.assert_allclose(dense_res, res.todense()) + + +def test_tensordot_reshape(): + R1 = 4 + R2 = 4 + + q = np.random.randint(-5, 5, 10, dtype=np.int16) + charges1 = [U1Charge(q) for n in range(R1)] + charges2 = [U1Charge(q) for n in range(R2)] + flowsA = np.asarray([False] * R1) + flowsB = np.asarray([True] * R2) + A = BlockSparseTensor.random(indices=[ + Index(charges1[n], flowsA[n], name='a{}'.format(n)) for n in range(R1) + ]) + B = BlockSparseTensor.random(indices=[ + Index(charges2[n], flowsB[n], name='b{}'.format(n)) for n in range(R2) + ]) + + Adense = A.todense().reshape((10, 10 * 10, 10)) + Bdense = B.todense().reshape((10 * 10, 10, 10)) + + A = A.reshape((10, 10 * 10, 10)) + B = B.reshape((10 * 10, 10, 10)) + + res = tensordot(A, B, ([0, 1], [2, 0])) + dense = np.tensordot(Adense, Bdense, ([0, 1], [2, 0])) + np.testing.assert_allclose(dense, res.todense()) + + +@pytest.mark.parametrize("R1, R2, cont", [(4, 4, 2), (4, 3, 3), (3, 4, 3)]) +def test_tensordot_final_order(R1, R2, cont): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont) + final_order = np.arange(R1 + R2 - 2 * cont) np.random.shuffle(final_order) - Adense = A.todense() - Bdense = B.todense() + res = tensordot(A, B, (indsA, indsB), final_order=final_order) dense_res = np.transpose( - np.tensordot(Adense, Bdense, (indsA, indsB)), final_order) + np.tensordot(A.todense(), B.todense(), (indsA, indsB)), final_order) + np.testing.assert_allclose(dense_res, res.todense()) - res = tensordot(A, B, (indsA, indsB), final_order=final_order) + +@pytest.mark.parametrize("R1, R2", [(2, 2), (3, 3), (4, 4), (1, 1)]) +def test_tensordot_inner(R1, R2): + + A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0) + res = tensordot(A, B, (indsA, indsB)) + dense_res = np.tensordot(A.todense(), B.todense(), (indsA, indsB)) np.testing.assert_allclose(dense_res, res.todense()) -# def test_find_dense_positions_2(): -# D = 40 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# rank = 4 -# flows = np.asarray([1 for _ in range(rank)]) -# flows[-2::] = -1 -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# for _ in range(rank) -# ] -# indices = [ -# Index( -# charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# n1 = compute_num_nonzero([i.charges for i in indices], -# [i.flow for i in indices]) - -# i01 = indices[0] * indices[1] -# i23 = indices[2] * indices[3] -# positions = find_dense_positions([i01.charges, i23.charges], [1, 1], -# U1Charge(np.asarray([0]))) -# assert len(positions[0]) == n1 - -# def test_find_sparse_positions(): -# D = 40 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# rank = 4 -# flows = np.asarray([1 for _ in range(rank)]) -# flows[-2::] = -1 -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# for _ in range(rank) -# ] -# indices = [ -# Index( -# charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# n1 = compute_num_nonzero([i.charges for i in indices], -# [i.flow for i in indices]) -# i01 = indices[0] * indices[1] -# i23 = indices[2] * indices[3] -# unique_row_charges = np.unique(i01.charges.charges) -# unique_column_charges = np.unique(i23.charges.charges) -# common_charges = np.intersect1d( -# unique_row_charges, -unique_column_charges, assume_unique=True) -# blocks = find_sparse_positions([i01.charges, i23.charges], [1, 1], -# target_charges=U1Charge(np.asarray([0]))) -# assert sum([len(v) for v in blocks.values()]) == n1 -# np.testing.assert_allclose(np.sort(blocks[0]), np.arange(n1)) - -# def test_find_sparse_positions_2(): -# D = 1000 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# index = Index(charges=U1Charge(charges), flow=1, name='index0') -# targets = np.asarray([-1, 0, 1]) -# blocks = find_sparse_positions([index.charges], [index.flow], -# target_charges=U1Charge(targets)) - -# inds = np.isin(charges, targets) -# relevant_charges = charges[inds] -# blocks_ = {t: np.nonzero(relevant_charges == t)[0] for t in targets} -# assert np.all( -# np.asarray(list(blocks.keys())) == np.asarray(list(blocks_.keys()))) -# for k in blocks.keys(): -# assert np.all(blocks[k] == blocks_[k]) - -# def test_find_sparse_positions_3(): -# D = 40 #bond dimension -# B = 4 #number of blocks -# dtype = np.int16 #the dtype of the quantum numbers -# flows = [1, -1] - -# rank = len(flows) -# charges = [ -# np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype) -# for _ in range(rank) -# ] -# indices = [ -# Index( -# charges=U1Charge(charges[n]), flow=flows[n], name='index{}'.format(n)) -# for n in range(rank) -# ] -# i1, i2 = indices -# common_charges = np.intersect1d(i1.charges.charges, i2.charges.charges) -# row_locations = find_sparse_positions( -# charges=[i1.charges, i2.charges], -# flows=flows, -# target_charges=U1Charge(common_charges)) -# fused = (i1 * i2).charges -# relevant = fused.charges[np.isin(fused.charges, common_charges)] -# for k, v in row_locations.items(): -# np.testing.assert_allclose(np.nonzero(relevant == k)[0], np.sort(v)) - -# # def test_dense_transpose(): -# # Ds = [10, 11, 12] #bond dimension -# # rank = len(Ds) -# # flows = np.asarray([1 for _ in range(rank)]) -# # flows[-2::] = -1 -# # charges = [U1Charge(np.zeros(Ds[n], dtype=np.int16)) for n in range(rank)] -# # indices = [ -# # Index(charges=charges[n], flow=flows[n], name='index{}'.format(n)) -# # for n in range(rank) -# # ] -# # A = BlockSparseTensor.random(indices=indices, dtype=np.float64) -# # B = np.transpose(np.reshape(A.data.copy(), Ds), (1, 0, 2)) -# # A.transpose((1, 0, 2)) -# # np.testing.assert_allclose(A.data, B.flat) - -# # B = np.transpose(np.reshape(A.data.copy(), [11, 10, 12]), (1, 0, 2)) -# # A.transpose((1, 0, 2)) - -# # np.testing.assert_allclose(A.data, B.flat) - -# @pytest.mark.parametrize("R", [1, 2]) -# def test_find_diagonal_dense_blocks(R): -# rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] -# cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] -# charges = rs + cs - -# left_fused = fuse_charges(charges[0:R], [1] * R) -# right_fused = fuse_charges(charges[R:], [1] * R) -# left_unique = left_fused.unique() -# right_unique = right_fused.unique() -# zero = left_unique.zero_charge -# blocks = {} -# rdim = len(right_fused) -# for lu in left_unique: -# linds = np.nonzero(left_fused == lu)[0] -# rinds = np.nonzero(right_fused == lu * (-1))[0] -# if (len(linds) > 0) and (len(rinds) > 0): -# blocks[lu] = fuse_ndarrays([linds * rdim, rinds]) -# comm, blocks_ = _find_diagonal_dense_blocks(rs, cs, [1] * R, [1] * R) -# for n in range(len(comm)): -# assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) - -# # #@pytest.mark.parametrize("dtype", np_dtypes) -# # def test_find_diagonal_dense_blocks_2(): -# # R = 1 -# # rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] -# # cs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] -# # charges = rs + cs - -# # left_fused = fuse_charges(charges[0:R], [1] * R) -# # right_fused = fuse_charges(charges[R:], [1] * R) -# # left_unique = left_fused.unique() -# # right_unique = right_fused.unique() -# # zero = left_unique.zero_charge -# # blocks = {} -# # rdim = len(right_fused) -# # for lu in left_unique: -# # linds = np.nonzero(left_fused == lu)[0] -# # rinds = np.nonzero(right_fused == lu * (-1))[0] -# # if (len(linds) > 0) and (len(rinds) > 0): -# # blocks[lu] = fuse_ndarrays([linds * rdim, rinds]) -# # comm, blocks_ = _find_diagonal_dense_blocks(rs, cs, [1] * R, [1] * R) -# # for n in range(len(comm)): -# # assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) - -# @pytest.mark.parametrize("R", [1, 2]) -# def test_find_diagonal_dense_blocks_transposed(R): -# order = np.arange(2 * R) -# np.random.shuffle(order) -# rs = [U1Charge(np.random.randint(-4, 4, 50)) for _ in range(R)] -# cs = [U1Charge(np.random.randint(-4, 4, 40)) for _ in range(R)] -# charges = rs + cs -# dims = np.asarray([len(c) for c in charges]) -# strides = np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) -# stride_arrays = [np.arange(dims[n]) * strides[n] for n in range(2 * R)] - -# left_fused = fuse_charges([charges[n] for n in order[0:R]], [1] * R) -# right_fused = fuse_charges([charges[n] for n in order[R:]], [1] * R) -# lstrides = fuse_ndarrays([stride_arrays[n] for n in order[0:R]]) -# rstrides = fuse_ndarrays([stride_arrays[n] for n in order[R:]]) - -# left_unique = left_fused.unique() -# right_unique = right_fused.unique() -# blocks = {} -# rdim = len(right_fused) -# for lu in left_unique: -# linds = np.nonzero(left_fused == lu)[0] -# rinds = np.nonzero(right_fused == lu * (-1))[0] -# if (len(linds) > 0) and (len(rinds) > 0): -# tmp = fuse_ndarrays([linds * rdim, rinds]) -# blocks[lu] = _find_values_in_fused(tmp, lstrides, rstrides) - -# comm, blocks_ = _find_diagonal_dense_blocks([charges[n] for n in order[0:R]], -# [charges[n] for n in order[R:]], -# [1] * R, [1] * R, -# row_strides=strides[order[0:R]], -# column_strides=strides[order[R:]]) -# for n in range(len(comm)): -# assert np.all(blocks[comm.charges[n]] == blocks_[n][0]) +@pytest.mark.parametrize("R1, R2", [(2, 2), (2, 1), (1, 2), (1, 1)]) +def test_tensordot_outer(R1, R2): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0) + res = tensordot(A, B, axes=0) + dense_res = np.tensordot(A.todense(), B.todense(), axes=0) + np.testing.assert_allclose(dense_res, res.todense()) From 925a696f56276553128ba45b3ae215cdd5c04e0d Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 09:44:05 -0500 Subject: [PATCH 199/212] added inner and outer product --- tensornetwork/block_tensor/block_tensor.py | 531 ++++++++++++--------- 1 file changed, 296 insertions(+), 235 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 5c7d2ed93..4f0520b34 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -27,6 +27,25 @@ Tensor = Any +def get_flat_order(indices, order): + flat_charges, _ = get_flat_meta_data(indices) + flat_labels = np.arange(len(flat_charges)) + cum_num_legs = np.append(0, np.cumsum([len(i.flat_charges) for i in indices])) + flat_order = np.concatenate( + [flat_labels[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) + + return flat_order + + +def get_flat_meta_data(indices): + charges = [] + flows = [] + for i in indices: + flows.extend(i.flat_flows) + charges.extend(i.flat_charges) + return charges, flows + + def fuse_stride_arrays(dims: np.ndarray, strides: np.ndarray) -> np.ndarray: return fuse_ndarrays([ np.arange(0, strides[n] * dims[n], strides[n], dtype=np.uint32) @@ -193,11 +212,9 @@ def compute_num_nonzero(charges: List[BaseCharge], flows: List[bool]) -> int: charges, flows) res = accumulated_charges == accumulated_charges.identity_charges nz_inds = np.nonzero(res)[0] - if len(nz_inds) == 0: - raise ValueError( - "given leg-charges `charges` and flows `flows` are incompatible " - "with a symmetric tensor") - return np.squeeze(accumulated_degeneracies[nz_inds][0]) + if len(nz_inds) > 0: + return np.squeeze(accumulated_degeneracies[nz_inds][0]) + return 0 def reduce_charges( @@ -326,7 +343,7 @@ def _find_diagonal_sparse_blocks( partition: int) -> (np.ndarray, np.ndarray, np.ndarray): """ Find the location of all non-trivial symmetry blocks from the data vector of - of SymTensor (when viewed as a matrix across some prescribed index + of BlockSparseTensor (when viewed as a matrix across some prescribed index bi-partition). Args: charges (List[SymIndex]): list of SymIndex. @@ -355,7 +372,11 @@ def _find_diagonal_sparse_blocks( if partition == len(flows): block_dims = np.flipud(block_dims) - return block_maps, block_qnums, block_dims + obj = charges[0].__new__(type(charges[0])) + obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), + charges[0].charge_types) + + return block_maps, obj, block_dims else: unique_row_qnums, row_degen = compute_fused_charge_degeneracies( @@ -419,12 +440,9 @@ def _find_transposed_diagonal_sparse_blocks( tr_partition: int, order: np.ndarray = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ - Find the location of all non-trivial symmetry blocks from the data vector of - of SymTensor after transposition (and then viewed as a matrix across some - prescribed index bi-tr_partition). Produces and equivalent result to - retrieve_blocks acting on a transposed SymTensor, but is much faster. + Args: - charges (List[SymIndex]): list of SymIndex. + charges (List[BaseCharge]): List of charges. flows (np.ndarray): vector of bools describing index orientations. tr_partition (int): location of tensor partition (i.e. such that the tensor is viewed as a matrix between first partition charges and @@ -444,18 +462,115 @@ def _find_transposed_diagonal_sparse_blocks( # no transpose order return _find_diagonal_sparse_blocks(charges, flows, tr_partition) + # general case: non-trivial transposition is required + num_inds = len(charges) + tensor_dims = np.array([charges[n].dim for n in range(num_inds)], dtype=int) + strides = np.append(np.flip(np.cumprod(np.flip(tensor_dims[1:]))), 1) + + # compute qnums of row/cols in original tensor + orig_partition = _find_best_partition(tensor_dims) + orig_width = np.prod(tensor_dims[orig_partition:]) + + orig_unique_row_qnums = compute_unique_fused_charges(charges[:orig_partition], + flows[:orig_partition]) + orig_unique_col_qnums, orig_col_degen = compute_fused_charge_degeneracies( + charges[orig_partition:], np.logical_not(flows[orig_partition:])) + + orig_block_qnums, row_map, col_map = intersect( + orig_unique_row_qnums.unique_charges, + orig_unique_col_qnums.unique_charges, + axis=1, + return_indices=True) + orig_num_blocks = orig_block_qnums.shape[1] + if orig_num_blocks == 0: + # special case: trivial number of non-zero elements + return [], np.array([], dtype=np.uint32), np.array([], dtype=np.uint32) + + orig_row_ind = fuse_charges(charges[:orig_partition], flows[:orig_partition]) + orig_col_ind = fuse_charges(charges[orig_partition:], + np.logical_not(flows[orig_partition:])) + + inv_row_map = -np.ones( + orig_unique_row_qnums.unique_charges.shape[1], dtype=np.int16) + for n in range(len(row_map)): + inv_row_map[row_map[n]] = n + + all_degens = np.append(orig_col_degen[col_map], + 0)[inv_row_map[orig_row_ind.charge_labels]] + all_cumul_degens = np.cumsum(np.insert(all_degens[:-1], 0, + 0)).astype(np.uint32) + dense_to_sparse = np.zeros(orig_width, dtype=np.uint32) + for n in range(orig_num_blocks): + dense_to_sparse[orig_col_ind.charge_labels == col_map[n]] = np.arange( + orig_col_degen[col_map[n]], dtype=np.uint32) + + # define properties of new tensor resulting from transposition + new_strides = strides[order] + new_row_charges = [charges[n] for n in order[:tr_partition]] + new_col_charges = [charges[n] for n in order[tr_partition:]] + new_row_flows = flows[order[:tr_partition]] + new_col_flows = flows[order[tr_partition:]] + + if (tr_partition == 0): + # special case: reshape into row vector + + # compute qnums of row/cols in transposed tensor + unique_col_qnums, new_col_degen = compute_fused_charge_degeneracies( + new_col_charges, np.logical_not(new_col_flows)) + identity_charges = charges[0].identity_charges + block_qnums, new_row_map, new_col_map = intersect( + identity_charges.unique_charges, + unique_col_qnums.unique_charges, + axis=1, + return_indices=True) + block_dims = np.array([[1], new_col_degen[new_col_map]], dtype=np.uint32) + num_blocks = 1 + col_ind, col_locs = reduce_charges( + new_col_charges, + np.logical_not(new_col_flows), + block_qnums, + return_locations=True, + strides=new_strides[tr_partition:]) + + # find location of blocks in transposed tensor (w.r.t positions in original) + orig_row_posR, orig_col_posR = np.divmod( + col_locs[col_ind.charge_labels == 0], orig_width) + block_maps = [(all_cumul_degens[orig_row_posR] + + dense_to_sparse[orig_col_posR]).ravel()] + obj = charges[0].__new__(type(charges[0])) + obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), + charges[0].charge_types) + + elif (tr_partition == len(charges)): + # special case: reshape into col vector + + # compute qnums of row/cols in transposed tensor + unique_row_qnums, new_row_degen = compute_fused_charge_degeneracies( + new_row_charges, new_row_flows) + identity_charges = charges[0].identity_charges + block_qnums, new_row_map, new_col_map = intersect( + unique_row_qnums.unique_charges, + identity_charges.unique_charges, + axis=1, + return_indices=True) + block_dims = np.array([new_row_degen[new_row_map], [1]], dtype=np.uint32) + num_blocks = 1 + row_ind, row_locs = reduce_charges( + new_row_charges, + new_row_flows, + block_qnums, + return_locations=True, + strides=new_strides[:tr_partition]) + + # find location of blocks in transposed tensor (w.r.t positions in original) + orig_row_posL, orig_col_posL = np.divmod( + row_locs[row_ind.charge_labels == 0], orig_width) + block_maps = [(all_cumul_degens[orig_row_posL] + + dense_to_sparse[orig_col_posL]).ravel()] + obj = charges[0].__new__(type(charges[0])) + obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), + charges[0].charge_types) else: - # non-trivial transposition is required - num_inds = len(charges) - tensor_dims = np.array([charges[n].dim for n in range(num_inds)], dtype=int) - strides = np.append(np.flip(np.cumprod(np.flip(tensor_dims[1:]))), 1) - - # define properties of new tensor resulting from transposition - new_strides = strides[order] - new_row_charges = [charges[n] for n in order[:tr_partition]] - new_col_charges = [charges[n] for n in order[tr_partition:]] - new_row_flows = flows[order[:tr_partition]] - new_col_flows = flows[order[tr_partition:]] unique_row_qnums, new_row_degen = compute_fused_charge_degeneracies( new_row_charges, new_row_flows) @@ -471,6 +586,7 @@ def _find_transposed_diagonal_sparse_blocks( [new_row_degen[new_row_map], new_col_degen[new_col_map]], dtype=np.uint32) num_blocks = len(new_row_map) + row_ind, row_locs = reduce_charges( new_row_charges, new_row_flows, @@ -484,38 +600,6 @@ def _find_transposed_diagonal_sparse_blocks( block_qnums, return_locations=True, strides=new_strides[tr_partition:]) - orig_partition = _find_best_partition(tensor_dims) - orig_width = np.prod(tensor_dims[orig_partition:]) - - orig_unique_row_qnums = compute_unique_fused_charges( - charges[:orig_partition], flows[:orig_partition]) - orig_unique_col_qnums, orig_col_degen = compute_fused_charge_degeneracies( - charges[orig_partition:], np.logical_not(flows[orig_partition:])) - orig_block_qnums, row_map, col_map = intersect( - orig_unique_row_qnums.unique_charges, - orig_unique_col_qnums.unique_charges, - axis=1, - return_indices=True) - orig_num_blocks = orig_block_qnums.shape[1] - - orig_row_ind = fuse_charges(charges[:orig_partition], - flows[:orig_partition]) - orig_col_ind = fuse_charges(charges[orig_partition:], - np.logical_not(flows[orig_partition:])) - - inv_row_map = -np.ones( - orig_unique_row_qnums.unique_charges.shape[1], dtype=np.int16) - for n in range(len(row_map)): - inv_row_map[row_map[n]] = n - - all_degens = np.append(orig_col_degen[col_map], - 0)[inv_row_map[orig_row_ind.charge_labels]] - all_cumul_degens = np.cumsum(np.insert(all_degens[:-1], 0, - 0)).astype(np.uint32) - dense_to_sparse = np.zeros(orig_width, dtype=np.uint32) - for n in range(orig_num_blocks): - dense_to_sparse[orig_col_ind.charge_labels == col_map[n]] = np.arange( - orig_col_degen[col_map[n]], dtype=np.uint32) block_maps = [0] * num_blocks for n in range(num_blocks): @@ -530,7 +614,7 @@ def _find_transposed_diagonal_sparse_blocks( obj.__init__(block_qnums, np.arange(block_qnums.shape[1], dtype=np.int16), charges[0].charge_types) - return block_maps, obj, block_dims + return block_maps, obj, block_dims class BlockSparseTensor: @@ -540,25 +624,9 @@ class BlockSparseTensor: The class currently only supports a single U(1) symmetry and only numpy.ndarray. - Attributes: - * self.data: A 1d np.ndarray storing the underlying - data of the tensor - * self.charges: A list of `np.ndarray` of shape - (D,), where D is the bond dimension. Once we go beyond - a single U(1) symmetry, this has to be updated. - - * self.flows: A list of integers of length `k`. - `self.flows` determines the flows direction of charges - on each leg of the tensor. A value of `-1` denotes - outflowing charge, a value of `1` denotes inflowing - charge. - The tensor data is stored in self.data, a 1d np.ndarray. """ - def copy(self): - return BlockSparseTensor(self.data.copy(), [i.copy() for i in self.indices]) - def __init__(self, data: np.ndarray, indices: List[Index]) -> None: """ Args: @@ -568,7 +636,8 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: indices: List of `Index` objecst, one for each leg. """ self.indices = indices - num_non_zero_elements = compute_num_nonzero(self.charges, self.flows) + num_non_zero_elements = compute_num_nonzero(self.flat_charges, + self.flat_flows) if num_non_zero_elements != len(data.flat): raise ValueError("number of tensor elements {} defined " @@ -578,18 +647,25 @@ def __init__(self, data: np.ndarray, indices: List[Index]) -> None: self.data = np.asarray(data.flat) #do not copy data + def copy(self): + return BlockSparseTensor(self.data.copy(), [i.copy() for i in self.indices]) + def todense(self) -> np.ndarray: """ Map the sparse tensor to dense storage. """ out = np.asarray(np.zeros(self.dense_shape, dtype=self.dtype).flat) - charges = self.charges + charges = self.flat_charges out[np.nonzero( - fuse_charges(charges, self.flows) == charges[0].identity_charges) + fuse_charges(charges, self.flat_flows) == charges[0].identity_charges) [0]] = self.data return np.reshape(out, self.dense_shape) + @property + def ndim(self): + return len(self.indices) + @classmethod def randn(cls, indices: List[Index], dtype: Optional[Type[np.number]] = None) -> "BlockSparseTensor": @@ -601,8 +677,7 @@ def randn(cls, indices: List[Index], Returns: BlockSparseTensor """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] + charges, flows = get_flat_meta_data(indices) num_non_zero_elements = compute_num_nonzero(charges, flows) backend = backend_factory.get_backend('numpy') data = backend.randn((num_non_zero_elements,), dtype=dtype) @@ -619,8 +694,7 @@ def ones(cls, indices: List[Index], Returns: BlockSparseTensor """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] + charges, flows = get_flat_meta_data(indices) num_non_zero_elements = compute_num_nonzero(charges, flows) backend = backend_factory.get_backend('numpy') data = backend.ones((num_non_zero_elements,), dtype=dtype) @@ -637,8 +711,7 @@ def zeros(cls, indices: List[Index], Returns: BlockSparseTensor """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] + charges, flows = get_flat_meta_data(indices) num_non_zero_elements = compute_num_nonzero(charges, flows) backend = backend_factory.get_backend('numpy') data = backend.zeros((num_non_zero_elements,), dtype=dtype) @@ -655,9 +728,7 @@ def random(cls, indices: List[Index], Returns: BlockSparseTensor """ - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] - + charges, flows = get_flat_meta_data(indices) num_non_zero_elements = compute_num_nonzero(charges, flows) dtype = dtype if dtype is not None else np.float64 @@ -683,7 +754,7 @@ def dense_shape(self) -> Tuple: Returns: Tuple: A tuple of `int`. """ - return tuple([i.dimension for i in self.indices]) + return tuple([i.dim for i in self.indices]) @property def shape(self) -> Tuple: @@ -706,6 +777,20 @@ def flows(self): def charges(self): return [i.charges for i in self.indices] + @property + def flat_charges(self): + flat = [] + for i in self.indices: + flat.extend(i.flat_charges) + return flat + + @property + def flat_flows(self): + flat = [] + for i in self.indices: + flat.extend(i.flat_flows) + return flat + def transpose( self, order: Union[List[int], np.ndarray], @@ -724,9 +809,10 @@ def transpose( #check for trivial permutation if np.all(order == np.arange(len(order))): - return self - flat_indices, flat_charges, flat_flows, _, flat_order = flatten_meta_data( - self.indices, order) + return BlockSparseTensor(self.data, self.indices) + flat_charges, flat_flows = get_flat_meta_data(self.indices) + flat_order = get_flat_order(self.indices, order) + print(flat_order) tr_partition = _find_best_partition( [len(flat_charges[n]) for n in flat_order]) @@ -742,25 +828,8 @@ def transpose( ind = np.nonzero(tr_charges == charges[n])[0][0] permutation = tr_sparse_blocks[ind] data[sparse_block] = self.data[permutation] - self.indices = [self.indices[o] for o in order] - self.data = data - return self - - def reset_shape(self) -> None: - """ - Bring the tensor back into its elementary shape. - """ - self.indices = self.get_elementary_indices() - def get_elementary_indices(self) -> List: - """ - Compute the elementary indices of the array. - """ - elementary_indices = [] - for i in self.indices: - elementary_indices.extend(i.get_elementary_indices()) - - return elementary_indices + return BlockSparseTensor(data, [self.indices[o] for o in order]) def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: """ @@ -801,52 +870,38 @@ def reshape(self, shape: Union[Iterable[Index], Iterable[int]]) -> None: new_shape = [] for s in shape: if isinstance(s, Index): - new_shape.append(s.dimension) + new_shape.append(s.dim) else: new_shape.append(s) + # a few simple checks if np.prod(new_shape) != np.prod(self.dense_shape): raise ValueError("A tensor with {} elements cannot be " "reshaped into a tensor with {} elements".format( - np.prod(self.shape), np.prod(self.dense_shape))) - - #keep a copy of the old indices for the case where reshaping fails - #FIXME: this is pretty hacky! - indices = [i.copy() for i in self.indices] - flat_indices = [] - for i in indices: - flat_indices.extend(i.get_elementary_indices()) - - def raise_error(): - #if this error is raised then `shape` is incompatible - #with the elementary indices. We then reset the shape - #to what is was before the call to `reshape`. - # self.indices = index_copy - raise ValueError("The shape {} is incompatible with the " - "elementary shape {} of the tensor.".format( - new_shape, - tuple([e.dimension for e in flat_indices]))) + np.prod(self.shape), np.prod(new_shape))) + flat_charges, flat_flows = get_flat_meta_data(self.indices) + flat_dims = [f.dim for f in flat_charges] + + partitions = [0] for n in range(len(new_shape)): - if new_shape[n] > flat_indices[n].dimension: - while new_shape[n] > flat_indices[n].dimension: - #fuse flat_indices - i1, i2 = flat_indices.pop(n), flat_indices.pop(n) - #note: the resulting flow is set to one since the flow - #is multiplied into the charges. As a result the tensor - #will then be invariant in any case. - flat_indices.insert(n, fuse_index_pair(i1, i2)) - if flat_indices[n].dimension > new_shape[n]: - raise_error() - elif new_shape[n] < flat_indices[n].dimension: - raise_error() - #at this point the first len(new_shape) flat_indices of the tensor - #match the `new_shape`. - while len(new_shape) < len(flat_indices): - i2, i1 = flat_indices.pop(), flat_indices.pop() - flat_indices.append(fuse_index_pair(i1, i2)) - - result = BlockSparseTensor(data=self.data, indices=flat_indices) + tmp = np.nonzero(np.cumprod(flat_dims) == new_shape[n])[0] + if len(tmp) == 0: + raise ValueError("The shape {} is incompatible with the " + "elementary shape {} of the tensor.".format( + new_shape, tuple([e.dim for e in flat_charges]))) + + partitions.append(tmp[0] + 1) + flat_dims = flat_dims[partitions[-1]:] + partitions = np.cumsum(partitions) + new_flat_charges = [] + new_flat_flows = [] + for n in range(1, len(partitions)): + new_flat_charges.append(flat_charges[partitions[n - 1]:partitions[n]]) + new_flat_flows.append(flat_flows[partitions[n - 1]:partitions[n]]) + + indices = [Index(c, f) for c, f in zip(new_flat_charges, new_flat_flows)] + result = BlockSparseTensor(data=self.data, indices=indices) return result @@ -901,15 +956,40 @@ def transpose(tensor: BlockSparseTensor, Returns: BlockSparseTensor: The transposed tensor. """ - result = tensor.copy() - result.transpose(order) - return result + return tensor.transpose() + + +def outerproduct(tensor1: BlockSparseTensor, + tensor2: BlockSparseTensor) -> BlockSparseTensor: + """ + Compute the outer product of two BlockSparseTensor. + Args: + tensor1: A tensor. + tensor2: A tensor. + Returns: + BlockSparseTensor: The result of taking the outer product. + """ + + final_charges = tensor1.flat_charges + tensor2.flat_charges + final_flows = tensor1.flat_flows + tensor2.flat_flows + data = np.zeros( + compute_num_nonzero(final_charges, final_flows), dtype=tensor1.dtype) + if ((len(tensor1.data) > 0) and (len(tensor2.data) > 0)) and (len(data) > 0): + # find the location of the zero block in the output + final_block_maps, final_block_charges, final_block_dims = _find_diagonal_sparse_blocks( + final_charges, final_flows, len(tensor1.flat_charges)) + index = np.nonzero( + final_block_charges == final_block_charges.identity_charges)[0][0] + data[final_block_maps[index].ravel()] = np.outer(tensor1.data, + tensor2.data).ravel() + + return BlockSparseTensor(data, tensor1.indices + tensor2.indices) def tensordot( tensor1: BlockSparseTensor, tensor2: BlockSparseTensor, - axes: Sequence[Sequence[int]], + axes: Optional[Union[Sequence[Sequence[int]], int]] = 2, final_order: Optional[Union[List, np.ndarray]] = None) -> BlockSparseTensor: """ Contract two `BlockSparseTensor`s along `axes`. @@ -922,6 +1002,14 @@ def tensordot( BlockSparseTensor: The result of the tensor contraction. """ + + if isinstance(axes, (np.integer, int)): + axes = [ + np.arange(tensor1.ndim - axes, tensor1.ndim, dtype=np.int16), + np.arange(0, axes, dtype=np.int16) + ] + elif isinstance(axes[0], (np.integer, int)): + axes = [np.array(axes, dtype=np.int16), np.array(axes, dtype=np.int16)] axes1 = axes[0] axes2 = axes[1] if not np.all(np.unique(axes1) == np.sort(axes1)): @@ -931,6 +1019,22 @@ def tensordot( raise ValueError( "Some values in axes[1] = {} appear more than once!".format(axes2n)) + if len(axes1) == 0: + res = outerproduct(tensor1, tensor2) + if final_order is not None: + return res.transpose(final_order) + return res + + if (len(axes1) == tensor1.ndim) and (len(axes2) == tensor2.ndim): + isort = np.argsort(axes1) + data = np.dot(tensor1.data, + tensor2.transpose(np.asarray(axes2)[isort]).data) + if len(tensor1.indices[0].flat_charges) > 0: + identity_charges = tensor1.indices[0].flat_charges[0].identity_charges + + return BlockSparseTensor( + data=data, indices=[Index(identity_charges, flow=False)]) + if max(axes1) >= len(tensor1.shape): raise ValueError( "rank of `tensor1` is smaller than `max(axes1) = {}.`".format( @@ -940,22 +1044,27 @@ def tensordot( raise ValueError( "rank of `tensor2` is smaller than `max(axes2) = {}`".format( max(axes1))) - elementary_1, elementary_2 = [], [] + + contr_flows_1 = [] + contr_flows_2 = [] + contr_charges_1 = [] + contr_charges_2 = [] for a in axes1: - elementary_1.extend(tensor1.indices[a].get_elementary_indices()) + contr_flows_1.extend(tensor1.indices[a].flat_flows) + contr_charges_1.extend(tensor1.indices[a].flat_charges) for a in axes2: - elementary_2.extend(tensor2.indices[a].get_elementary_indices()) + contr_flows_2.extend(tensor2.indices[a].flat_flows) + contr_charges_2.extend(tensor2.indices[a].flat_charges) - if len(elementary_2) != len(elementary_1): - raise ValueError("axes1 and axes2 have incompatible elementary" - " shapes {} and {}".format(elementary_1, elementary_2)) + if len(contr_charges_2) != len(contr_charges_1): + raise ValueError( + "axes1 and axes2 have incompatible elementary" + " shapes {} and {}".format([e.dim for e in contr_charges_1], + [e.dim for e in contr_charges_2])) if not np.all( - np.array([i.flow for i in elementary_1]) == np.array( - [not i.flow for i in elementary_2])): + np.asarray(contr_flows_1) == np.logical_not(np.asarray(contr_flows_2))): raise ValueError("axes1 and axes2 have incompatible elementary" - " flows {} and {}".format( - np.array([i.flow for i in elementary_1]), - np.array([i.flow for i in elementary_2]))) + " flows {} and {}".format(contr_flows_1, contr_flows_2)) free_axes1 = sorted(set(np.arange(len(tensor1.shape))) - set(axes1)) free_axes2 = sorted(set(np.arange(len(tensor2.shape))) - set(axes2)) @@ -975,43 +1084,31 @@ def tensordot( new_order1 = free_axes1 + list(axes1) new_order2 = list(axes2) + free_axes2 - contr_flat_indices_1 = [] - for n in axes1: - contr_flat_indices_1.extend(tensor1.indices[n].get_elementary_indices()) + flat_charges_1, flat_flows_1 = get_flat_meta_data(tensor1.indices) + flat_charges_2, flat_flows_2 = get_flat_meta_data(tensor2.indices) + + flat_order_1 = get_flat_order(tensor1.indices, new_order1) + flat_order_2 = get_flat_order(tensor2.indices, new_order2) - contr_flat_indices_2 = [] - for n in axes2: - contr_flat_indices_2.extend(tensor2.indices[n].get_elementary_indices()) - #get the flattened indices for the output tensor - left_indices = [] - right_indices = [] + left_charges = [] + right_charges = [] + left_flows = [] + right_flows = [] + free_indices = [] for n in free_axes1: - left_indices.extend(tensor1.indices[n].get_elementary_indices()) + free_indices.append(tensor1.indices[n]) + left_charges.extend(tensor1.indices[n].flat_charges) + left_flows.extend(tensor1.indices[n].flat_flows) for n in free_axes2: - right_indices.extend(tensor2.indices[n].get_elementary_indices()) - - indices = left_indices + right_indices - - flat_charges1 = [i.charges for i in left_indices - ] + [i.charges for i in contr_flat_indices_1] - flat_flows1 = [i.flow for i in left_indices - ] + [i.flow for i in contr_flat_indices_1] - - flat_charges2 = [i.charges for i in contr_flat_indices_2 - ] + [i.charges for i in right_indices] + free_indices.append(tensor2.indices[n]) + right_charges.extend(tensor2.indices[n].flat_charges) + right_flows.extend(tensor2.indices[n].flat_flows) - flat_flows2 = [i.flow for i in contr_flat_indices_2 - ] + [i.flow for i in right_indices] - - flat_order1 = new_flat_order(tensor1.indices, new_order1) - flat_order2 = new_flat_order(tensor2.indices, new_order2) - - tr_partition1 = len(left_indices) - tr_partition2 = len(contr_flat_indices_2) tr_sparse_blocks_1, charges1, shapes_1 = _find_transposed_diagonal_sparse_blocks( - flat_charges1, flat_flows1, tr_partition1, flat_order1) + flat_charges_1, flat_flows_1, len(left_charges), flat_order_1) + tr_sparse_blocks_2, charges2, shapes_2 = _find_transposed_diagonal_sparse_blocks( - flat_charges2, flat_flows2, tr_partition2, flat_order2) + flat_charges_2, flat_flows_2, len(contr_charges_2), flat_order_2) common_charges, label_to_common_1, label_to_common_2 = intersect( charges1.unique_charges, @@ -1023,14 +1120,14 @@ def tensordot( if final_order is not None: #in this case we view the result of the diagonal multiplication #as a transposition of the final tensor - final_indices = [indices[n] for n in final_order] + final_indices = [free_indices[n] for n in final_order] _, reverse_order = np.unique(final_order, return_index=True) - - flat_final_indices, flat_final_charges, flat_final_flows, flat_final_strides, flat_final_order, tr_partition = flatten_meta_data( - final_indices, reverse_order, len(free_axes1)) + flat_reversed_order = get_flat_order(final_indices, reverse_order) + flat_final_charges, flat_final_flows = get_flat_meta_data(final_indices) sparse_blocks_final, charges_final, shapes_final = _find_transposed_diagonal_sparse_blocks( - flat_final_charges, flat_final_flows, tr_partition, flat_final_order) + flat_final_charges, flat_final_flows, len(left_charges), + flat_reversed_order) num_nonzero_elements = np.sum([len(v) for v in sparse_blocks_final]) data = np.zeros( @@ -1055,10 +1152,10 @@ def tensordot( return BlockSparseTensor(data=data, indices=final_indices) else: #Note: `cs` may contain charges that are not present in `common_charges` - charges = [i.charges for i in indices] - flows = [i.flow for i in indices] + charges = left_charges + right_charges + flows = left_flows + right_flows sparse_blocks, cs, shapes = _find_diagonal_sparse_blocks( - charges, flows, len(left_indices)) + charges, flows, len(left_charges)) num_nonzero_elements = np.sum([len(v) for v in sparse_blocks]) #Note that empty is not a viable choice here. data = np.zeros( @@ -1077,40 +1174,4 @@ def tensordot( tensor1.data[tr_sparse_blocks_1[n1].reshape( shapes_1[:, n1])] @ tensor2.data[tr_sparse_blocks_2[n2].reshape( shapes_2[:, n2])]).ravel() - return BlockSparseTensor(data=data, indices=indices) - - -def new_flat_order(indices, order): - elementary_indices = {} - flat_elementary_indices = [] - for n in range(len(indices)): - elementary_indices[n] = indices[n].get_elementary_indices() - flat_elementary_indices.extend(elementary_indices[n]) - flat_index_list = np.arange(len(flat_elementary_indices)) - cum_num_legs = np.append( - 0, np.cumsum([len(elementary_indices[n]) for n in range(len(indices))])) - - flat_order = np.concatenate( - [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - - return flat_order - - -def flatten_meta_data(indices, order): - elementary_indices = {} - flat_elementary_indices = [] - for n in range(len(indices)): - elementary_indices[n] = indices[n].get_elementary_indices() - flat_elementary_indices.extend(elementary_indices[n]) - flat_index_list = np.arange(len(flat_elementary_indices)) - cum_num_legs = np.append( - 0, np.cumsum([len(elementary_indices[n]) for n in range(len(indices))])) - - flat_charges = [i.charges for i in flat_elementary_indices] - flat_flows = [i.flow for i in flat_elementary_indices] - flat_dims = [len(c) for c in flat_charges] - flat_strides = _get_strides(flat_dims) - flat_order = np.concatenate( - [flat_index_list[cum_num_legs[n]:cum_num_legs[n + 1]] for n in order]) - - return flat_elementary_indices, flat_charges, flat_flows, flat_strides, flat_order + return BlockSparseTensor(data=data, indices=free_indices) From a7acb5ca3eba3171d20a2d36b03a706fdb0e6fad Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 09:44:26 -0500 Subject: [PATCH 200/212] removed binary tree, switched to a list of charges --- tensornetwork/block_tensor/index.py | 197 ++++++++++++++++++++-------- 1 file changed, 142 insertions(+), 55 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index fc2d033d9..111924b4d 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -16,7 +16,7 @@ from __future__ import division from __future__ import print_function import numpy as np -from tensornetwork.block_tensor.charge import BaseCharge +from tensornetwork.block_tensor.charge import BaseCharge, fuse_charges import copy from typing import List, Union, Any, Optional, Tuple, Text @@ -29,47 +29,27 @@ class Index: """ def __init__(self, - charges: BaseCharge, - flow: int, - name: Optional[Text] = None, - left_child: Optional["Index"] = None, - right_child: Optional["Index"] = None): - self._charges = charges #ChargeCollection([charges]) + charges: Union[List[BaseCharge], BaseCharge], + flow: Union[List[int], int], + name: Optional[Union[List[Text], Text]] = None) -> None: + if isinstance(charges, BaseCharge): + charges = [charges] + self._charges = charges + if isinstance(flow, (np.bool_, bool, np.bool)): + flow = [flow] + if not all([isinstance(f, (np.bool_, np.bool, bool)) for f in flow]): + raise TypeError("flows have to be boolean") self.flow = flow - self.left_child = left_child - self.right_child = right_child + if isinstance(name, str): + name = [name] self.name = name def __repr__(self): - return str(self.dimension) + return str(self.dim) @property - def is_leave(self): - return (self.left_child is None) and (self.right_child is None) - - @property - def dimension(self): - return np.prod([len(i.charges) for i in self.get_elementary_indices()]) - - def _copy_helper(self, index: "Index", copied_index: "Index") -> None: - """ - Helper function for copy - """ - if index.left_child != None: - left_copy = Index( - charges=copy.deepcopy(index.left_child.charges), - flow=copy.deepcopy(index.left_child.flow), - name=copy.deepcopy(index.left_child.name)) - - copied_index.left_child = left_copy - self._copy_helper(index.left_child, left_copy) - if index.right_child != None: - right_copy = Index( - charges=copy.deepcopy(index.right_child.charges), - flow=copy.deepcopy(index.right_child.flow), - name=copy.deepcopy(index.right_child.name)) - copied_index.right_child = right_copy - self._copy_helper(index.right_child, right_copy) + def dim(self): + return np.prod([i.dim for i in self._charges]) def copy(self): """ @@ -78,30 +58,28 @@ def copy(self): `Index` are copied as well. """ index_copy = Index( - charges=copy.deepcopy(self._charges), + charges=[c.copy() for c in self._charges], flow=copy.deepcopy(self.flow), - name=self.name) - - self._copy_helper(self, index_copy) + name=copy.deepcopy(self.name)) return index_copy - def _leave_helper(self, index: "Index", leave_list: List) -> None: - if index.left_child: - self._leave_helper(index.left_child, leave_list) - if index.right_child: - self._leave_helper(index.right_child, leave_list) - if (index.left_child is None) and (index.right_child is None): - leave_list.append(index) + @property + def flat_charges(self) -> List: + """ + Returns: + List: A list containing the elementary indices (the leaves) + of `Index`. + """ + return self._charges - def get_elementary_indices(self) -> List: + @property + def flat_flows(self) -> List: """ Returns: List: A list containing the elementary indices (the leaves) of `Index`. """ - leave_list = [] - self._leave_helper(self, leave_list) - return leave_list + return self.flow def __mul__(self, index: "Index") -> "Index": """ @@ -114,9 +92,117 @@ def __mul__(self, index: "Index") -> "Index": @property def charges(self): - if self.is_leave: - return self._charges - return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow + return fuse_charges(self.flat_charges, self.flat_flows) + + """ + An index class to store indices of a symmetric tensor. + An index keeps track of all its childs by storing references + to them (i.e. it is a binary tree). + """ + + +# class Index: +# """ +# An index class to store indices of a symmetric tensor. +# An index keeps track of all its childs by storing references +# to them (i.e. it is a binary tree). +# """ + +# def __init__(self, +# charges: BaseCharge, +# flow: int, +# name: Optional[Text] = None, +# left_child: Optional["Index"] = None, +# right_child: Optional["Index"] = None): +# self._charges = charges #ChargeCollection([charges]) +# self.flow = flow +# self.left_child = left_child +# self.right_child = right_child +# self.name = name + +# def __repr__(self): +# return str(self.dimension) + +# @property +# def is_leave(self): +# return (self.left_child is None) and (self.right_child is None) + +# @property +# def dimension(self): +# return np.prod([len(i.charges) for i in self.get_elementary_indices()]) + +# def _copy_helper(self, index: "Index", copied_index: "Index") -> None: +# """ +# Helper function for copy +# """ +# if index.left_child != None: +# left_copy = Index( +# charges=copy.deepcopy(index.left_child.charges), +# flow=copy.deepcopy(index.left_child.flow), +# name=copy.deepcopy(index.left_child.name)) + +# copied_index.left_child = left_copy +# self._copy_helper(index.left_child, left_copy) +# if index.right_child != None: +# right_copy = Index( +# charges=copy.deepcopy(index.right_child.charges), +# flow=copy.deepcopy(index.right_child.flow), +# name=copy.deepcopy(index.right_child.name)) +# copied_index.right_child = right_copy +# self._copy_helper(index.right_child, right_copy) + +# def copy(self): +# """ +# Returns: +# Index: A deep copy of `Index`. Note that all children of +# `Index` are copied as well. +# """ +# index_copy = Index( +# charges=copy.deepcopy(self._charges), +# flow=copy.deepcopy(self.flow), +# name=self.name) + +# self._copy_helper(self, index_copy) +# return index_copy + +# def _leave_helper(self, index: "Index", leave_list: List) -> None: +# if index.left_child: +# self._leave_helper(index.left_child, leave_list) +# if index.right_child: +# self._leave_helper(index.right_child, leave_list) +# if (index.left_child is None) and (index.right_child is None): +# leave_list.append(index) + +# def get_elementary_indices(self) -> List: +# """ +# Returns: +# List: A list containing the elementary indices (the leaves) +# of `Index`. +# """ +# leave_list = [] +# self._leave_helper(self, leave_list) +# return leave_list + +# def __mul__(self, index: "Index") -> "Index": +# """ +# Merge `index` and self into a single larger index. +# The flow of the resulting index is set to 1. +# Flows of `self` and `index` are multiplied into +# the charges upon fusing.n +# """ +# return fuse_index_pair(self, index) + +# @property +# def charges(self): +# if self.is_leave: +# return self._charges +# return self.left_child.charges * self.left_child.flow + self.right_child.charges * self.right_child.flow + +# """ +# An index class to store indices of a symmetric tensor. +# An index keeps track of all its childs by storing references +# to them (i.e. it is a binary tree). +# """ def fuse_index_pair(left_index: Index, @@ -137,7 +223,8 @@ def fuse_index_pair(left_index: Index, "index1 and index2 are the same object. Can only fuse distinct objects") return Index( - charges=None, flow=flow, left_child=left_index, right_child=right_index) + charges=left_index.flat_charges + right_index.flat_charges, + flow=left_index.flat_flows + right_index.flat_flows) def fuse_indices(indices: List[Index], flow: Optional[int] = False) -> Index: From 919867e46bdf91d88663d0137f988b5964015be3 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 09:45:14 -0500 Subject: [PATCH 201/212] added copy() --- tensornetwork/block_tensor/charge.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tensornetwork/block_tensor/charge.py b/tensornetwork/block_tensor/charge.py index e5cf1d094..3aa223492 100644 --- a/tensornetwork/block_tensor/charge.py +++ b/tensornetwork/block_tensor/charge.py @@ -121,6 +121,13 @@ def dual(self, take_dual: Optional[bool] = False) -> np.ndarray: return obj return self + def copy(self): + obj = self.__new__(type(self)) + obj.__init__( + charges=self.unique_charges.copy(), + charge_labels=self.charge_labels.copy(), + charge_types=self.charge_types) + @property def charges(self): return self.unique_charges[:, self.charge_labels] From f9b26af0c86214ee70089439ab7f7681619f29e3 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 10:06:03 -0500 Subject: [PATCH 202/212] bugfix --- tensornetwork/block_tensor/block_tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 8ab2f3898..2318434bf 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -957,7 +957,7 @@ def transpose(tensor: BlockSparseTensor, Returns: BlockSparseTensor: The transposed tensor. """ - return tensor.transpose() + return tensor.transpose(order) def outerproduct(tensor1: BlockSparseTensor, From 4105650b1482ebab38f591b6f3e5d90070082e9e Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 13:12:28 -0500 Subject: [PATCH 203/212] added __add__ __sub__ __mul__ __rmul__ --- tensornetwork/block_tensor/block_tensor.py | 42 +++++++++++++++++----- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 2318434bf..9abb7eee1 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -744,6 +744,40 @@ def init_random(): return cls(data=init_random(), indices=indices) + def __sub__(self, other: "BlockSparseTensor"): + if self.dense_shape != other.dense_shape: + raise ValueError("cannot subtract tensors with shapes {}and {}".format( + self.dense_shape, other.dense_shape)) + if len(self.indices) != len(other.indices): + raise ValueError( + "cannot subtract tensors with different index-lengths {} and {}" + .format(len(self.indices), len(other.indices))) + + if not np.all( + self.indices[n] == other.indices[n] for n in range(len(self.indices))): + raise ValueError("cannot subtract tensors non-matching indices") + return BlockSparseTensor(data=self.data - other.data, indices=self.indices) + + def __add__(self, other: "BlockSparseTensor"): + if self.dense_shape != other.dense_shape: + raise ValueError("cannot add tensors with shapes {}and {}".format( + self.dense_shape, other.dense_shape)) + if len(self.indices) != len(other.indices): + raise ValueError( + "cannot add tensors with different index-lengths {} and {}".format( + len(self.indices), len(other.indices))) + + if not np.all( + self.indices[n] == other.indices[n] for n in range(len(self.indices))): + raise ValueError("cannot add tensors non-matching indices") + return BlockSparseTensor(data=self.data + other.data, indices=self.indices) + + def __mul__(self, number: np.number): + return BlockSparseTensor(data=self.data * number, indices=self.indices) + + def __rmul__(self, number: np.number): + return BlockSparseTensor(data=self.data * number, indices=self.indices) + @property def rank(self): return len(self.indices) @@ -770,14 +804,6 @@ def shape(self) -> Tuple: def dtype(self) -> Type[np.number]: return self.data.dtype - @property - def flows(self): - return [i.flow for i in self.indices] - - @property - def charges(self): - return [i.charges for i in self.indices] - @property def flat_charges(self): flat = [] From d70ce28748e0e74c5842975d8f227b2579cb86da Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 13:12:51 -0500 Subject: [PATCH 204/212] added __eq__ --- tensornetwork/block_tensor/index.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 111924b4d..12d55da42 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -51,6 +51,18 @@ def __repr__(self): def dim(self): return np.prod([i.dim for i in self._charges]) + def __eq__(self, other): + if len(other._charges) != len(self._charges): + return False + for n in range(len(self._charges)): + if not np.all( + self._charges[n].unique_charges == other._charges[n].unique_charges): + return False + if not np.all( + self._charges[n].charge_labels == other._charges[n].charge_labels): + return False + return True + def copy(self): """ Returns: From 6ad61a52d788ef7b83ac4af46afcdf2ad577cab5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 14:28:59 -0500 Subject: [PATCH 205/212] change dosctring --- tensornetwork/block_tensor/index.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 12d55da42..a7445a892 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -24,8 +24,6 @@ class Index: """ An index class to store indices of a symmetric tensor. - An index keeps track of all its childs by storing references - to them (i.e. it is a binary tree). """ def __init__(self, From 4c6c083e621bea45efbd5555ab59de8ffba78bd2 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 21:35:08 -0500 Subject: [PATCH 206/212] added svd --- tensornetwork/block_tensor/block_tensor.py | 74 ++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 9abb7eee1..63a39da5a 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -1202,3 +1202,77 @@ def tensordot( shapes_1[:, n1])] @ tensor2.data[tr_sparse_blocks_2[n2].reshape( shapes_2[:, n2])]).ravel() return BlockSparseTensor(data=data, indices=free_indices) + + +def svd(tensor: BlockSparseTensor, + full_matrices: Optional[bool] = True, + compute_uv: Optional[bool] = True, + hermitian: Optional[bool] = False): + if tensor.rank != 2: + raise NotImplementedError("SVD currently supports only rank-2 tensors.") + + flat_charges = tensor.indices[0]._charges + tensor.indices[1]._charges + flat_flows = tensor.flat_flows + partition = len(tensor.indices[0].flat_charges) + blocks, charges, shapes = _find_diagonal_sparse_blocks( + flat_charges, flat_flows, partition) + + u_blocks = [] + singvals = [] + v_blocks = [] + for n in range(len(blocks)): + u, s, v = np.linalg.svd( + np.reshape(tensor.data[blocks[n]], shapes[:, n]), full_matrices, + compute_uv, hermitian) + u_blocks.append(u) + v_blocks.append(v) + singvals.append(np.diag(s)) + + #define the new charges on the two central bonds + new_left_charge = charges.__new__(type(charges)) + new_right_charge = charges.__new__(type(charges)) + left_charge_labels = np.concatenate([ + np.full(u_blocks[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(u_blocks)) + ]) + right_charge_labels = np.concatenate([ + np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(v_blocks)) + ]) + left_singval_charge = charges.__new__(type(charges)) + right_singval_charge = charges.__new__(type(charges)) + left_singval_charge_labels = np.concatenate([ + np.full(singvals[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(singvals)) + ]) + right_singval_charge_labels = np.concatenate([ + np.full(singvals[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(singvals)) + ]) + + new_left_charge.__init__(charges.unique_charges, left_charge_labels, + charges.charge_types) + new_right_charge.__init__(charges.unique_charges, right_charge_labels, + charges.charge_types) + left_singval_charge.__init__(charges.unique_charges, + left_singval_charge_labels, charges.charge_types) + right_singval_charge.__init__( + charges.unique_charges, right_singval_charge_labels, charges.charge_types) + + #get the indices of the new tensors U,S and V + indices_u = [Index(new_left_charge, True), tensor.indices[0]] + indices_v = [Index(new_right_charge, False), tensor.indices[1]] + indices_s = [ + Index(left_singval_charge, False), + Index(right_singval_charge, True) + ] + #We fill in data into the transposed U + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(u.T) for u in u_blocks]), indices_u).transpose( + (1, 0)), BlockSparseTensor( + np.concatenate([np.ravel(s) for s in singvals]), + indices_s), BlockSparseTensor( + np.concatenate([np.ravel(v) for v in v_blocks]), + indices_v), u_blocks, v_blocks, charges From d325569aac3af890f282acb3240762f211736aa5 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 21:35:52 -0500 Subject: [PATCH 207/212] better check --- tensornetwork/block_tensor/index.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index a7445a892..209ef477d 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -30,13 +30,16 @@ def __init__(self, charges: Union[List[BaseCharge], BaseCharge], flow: Union[List[int], int], name: Optional[Union[List[Text], Text]] = None) -> None: + """ + Initialize an `Index` object. + """ if isinstance(charges, BaseCharge): charges = [charges] self._charges = charges - if isinstance(flow, (np.bool_, bool, np.bool)): + if np.isscalar(flow): flow = [flow] if not all([isinstance(f, (np.bool_, np.bool, bool)) for f in flow]): - raise TypeError("flows have to be boolean") + raise TypeError("flows have to be boolean. Found flow = {}".format(flow)) self.flow = flow if isinstance(name, str): name = [name] From 4686273b2a061b363b1ec67c4d9d471a7d937f78 Mon Sep 17 00:00:00 2001 From: mganahl Date: Fri, 31 Jan 2020 22:01:28 -0500 Subject: [PATCH 208/212] add proper compute_uv flag , remove artifact return values --- tensornetwork/block_tensor/block_tensor.py | 100 ++++++++++++--------- 1 file changed, 59 insertions(+), 41 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 298806f74..1a57d79c5 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -1204,16 +1204,30 @@ def tensordot( return BlockSparseTensor(data=data, indices=free_indices) -def svd(tensor: BlockSparseTensor, +def svd(matrix: BlockSparseTensor, full_matrices: Optional[bool] = True, compute_uv: Optional[bool] = True, hermitian: Optional[bool] = False): - if tensor.rank != 2: + """ + Compute the singular value decomposition of `matrix`. + The matrix if factorized into `u * s * vh`, with + `u` and `vh` the left and right eigenvectors of `matrix`, + and `s` its singular values. + Args: + matrix: A matrix (i.e. a rank-2 tensor) of type `BlockSparseTensor` + full_matrices: If `True`, exand `u` and `v` to square matrices + If `False` return the "economic" svd, i.e. `u.shape[1]=s.shape[0]` + and `v.shape[0]=s.shape[1]` + compute_yv: If `True`, return `u` and `v`. + hermitian: If `True`, assume hermiticity of `matrix`. + """ + + if matrix.rank != 2: raise NotImplementedError("SVD currently supports only rank-2 tensors.") - flat_charges = tensor.indices[0]._charges + tensor.indices[1]._charges - flat_flows = tensor.flat_flows - partition = len(tensor.indices[0].flat_charges) + flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges + flat_flows = matrix.flat_flows + partition = len(matrix.indices[0].flat_charges) blocks, charges, shapes = _find_diagonal_sparse_blocks( flat_charges, flat_flows, partition) @@ -1221,24 +1235,17 @@ def svd(tensor: BlockSparseTensor, singvals = [] v_blocks = [] for n in range(len(blocks)): - u, s, v = np.linalg.svd( - np.reshape(tensor.data[blocks[n]], shapes[:, n]), full_matrices, + out = np.linalg.svd( + np.reshape(matrix.data[blocks[n]], shapes[:, n]), full_matrices, compute_uv, hermitian) - u_blocks.append(u) - v_blocks.append(v) - singvals.append(np.diag(s)) - - #define the new charges on the two central bonds - new_left_charge = charges.__new__(type(charges)) - new_right_charge = charges.__new__(type(charges)) - left_charge_labels = np.concatenate([ - np.full(u_blocks[n].shape[1], fill_value=n, dtype=np.int16) - for n in range(len(u_blocks)) - ]) - right_charge_labels = np.concatenate([ - np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16) - for n in range(len(v_blocks)) - ]) + if compute_uv: + u_blocks.append(out[0]) + singvals.append(np.diag(out[1])) + v_blocks.append(out[2]) + + else: + singvals.append(np.diag(out)) + left_singval_charge = charges.__new__(type(charges)) right_singval_charge = charges.__new__(type(charges)) left_singval_charge_labels = np.concatenate([ @@ -1250,30 +1257,41 @@ def svd(tensor: BlockSparseTensor, for n in range(len(singvals)) ]) - new_left_charge.__init__(charges.unique_charges, left_charge_labels, - charges.charge_types) - new_right_charge.__init__(charges.unique_charges, right_charge_labels, - charges.charge_types) left_singval_charge.__init__(charges.unique_charges, left_singval_charge_labels, charges.charge_types) right_singval_charge.__init__( charges.unique_charges, right_singval_charge_labels, charges.charge_types) - - #get the indices of the new tensors U,S and V - indices_u = [Index(new_left_charge, True), tensor.indices[0]] - indices_v = [Index(new_right_charge, False), tensor.indices[1]] indices_s = [ Index(left_singval_charge, False), Index(right_singval_charge, True) ] - - #We fill in data into the transposed U - #TODO: reuse data from _find_diagonal_sparse_blocks above - #to avoid the transpose - return BlockSparseTensor( - np.concatenate([np.ravel(u.T) for u in u_blocks]), indices_u).transpose( - (1, 0)), BlockSparseTensor( - np.concatenate([np.ravel(s) for s in singvals]), - indices_s), BlockSparseTensor( - np.concatenate([np.ravel(v) for v in v_blocks]), - indices_v), u_blocks, v_blocks, charges + S = BlockSparseTensor( + np.concatenate([np.ravel(s) for s in singvals]), indices_s) + if compute_uv: + #define the new charges on the two central bonds + new_left_charge = charges.__new__(type(charges)) + new_right_charge = charges.__new__(type(charges)) + left_charge_labels = np.concatenate([ + np.full(u_blocks[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(u_blocks)) + ]) + right_charge_labels = np.concatenate([ + np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(v_blocks)) + ]) + new_left_charge.__init__(charges.unique_charges, left_charge_labels, + charges.charge_types) + new_right_charge.__init__(charges.unique_charges, right_charge_labels, + charges.charge_types) + + #get the indices of the new tensors U,S and V + indices_u = [Index(new_left_charge, True), matrix.indices[0]] + indices_v = [Index(new_right_charge, False), matrix.indices[1]] + #We fill in data into the transposed U + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(u.T) for u in u_blocks]), indices_u).transpose( + (1, 0)), S, BlockSparseTensor( + np.concatenate([np.ravel(v) for v in v_blocks]), indices_v) + return S From 9d477c629be2f1ba5922a1693afb5847513d6350 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 1 Feb 2020 22:01:33 -0500 Subject: [PATCH 209/212] added qr, eigh, eig --- tensornetwork/block_tensor/block_tensor.py | 260 +++++++++++++++++++-- 1 file changed, 244 insertions(+), 16 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index af786e561..83f874916 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -23,11 +23,22 @@ import scipy as sp import itertools import time -from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable, Sequence +from typing import List, Union, Any, Tuple, Type, Optional, Dict, Iterable, Sequence, Text Tensor = Any -def get_flat_order(indices, order): +def get_flat_order(indices: List[Index], + order: Union[List[int], np.ndarray]) -> np.ndarray: + """ + Compute the flat order of the + flattened `indices` corresponding to `order`. + Args: + indices: A list of `Index` objects. + order: An order. + Returns: + The flat order of the flat indices correspondint + to the `order` of `indices`. + """ flat_charges, _ = get_flat_meta_data(indices) flat_labels = np.arange(len(flat_charges)) cum_num_legs = np.append(0, np.cumsum([len(i.flat_charges) for i in indices])) @@ -38,6 +49,12 @@ def get_flat_order(indices, order): def get_flat_meta_data(indices): + """ + Return charges and flows of flattened `indices`. + Args: + indices: A list of `Index` objects. + + """ charges = [] flows = [] for i in indices: @@ -818,6 +835,29 @@ def flat_flows(self): flat.extend(i.flat_flows) return flat + def __matmul__(self, other): + + if self.rank != 2: + raise ValueError('__matmul__ only implemented for matrices') + + if other.rank != 2: + raise ValueError('__matmul__ only implemented for matrices') + return tensordot(self, other, ([1], [0])) + + def conj(self): + """ + Transpose the tensor in place into the new order `order`. + Args: + order: The new order of indices. + Returns: + BlockSparseTensor: The transposed tensor. + """ + indices = [ + Index(i.flat_charges, list(np.logical_not(i.flat_flows)), i.name) + for i in self.indices + ] + return BlockSparseTensor(np.conj(self.data), indices) + def transpose( self, order: Union[List[int], np.ndarray], @@ -839,7 +879,6 @@ def transpose( return BlockSparseTensor(self.data, self.indices) flat_charges, flat_flows = get_flat_meta_data(self.indices) flat_order = get_flat_order(self.indices, order) - print(flat_order) tr_partition = _find_best_partition( [len(flat_charges[n]) for n in flat_order]) @@ -938,8 +977,9 @@ def reshape(tensor: BlockSparseTensor, Reshape `tensor` into `shape`. `reshape` works essentially the same as the dense version, with the notable exception that the tensor can only be reshaped into a form - compatible with its elementary indices. The elementary indices are - the indices at the leaves of the `Index` objects `tensors.indices`. + compatible with its elementary shape. The elementary shape is + the shape determined by the flattened charges of all `Index` objects + in `tensors.indices`. For example, while the following reshaping is possible for regular dense numpy tensor, ``` @@ -948,14 +988,14 @@ def reshape(tensor: BlockSparseTensor, ``` the same code for BlockSparseTensor ``` - q1 = np.random.randint(0,10,6) - q2 = np.random.randint(0,10,6) - q3 = np.random.randint(0,10,6) - i1 = Index(charges=q1,flow=1) - i2 = Index(charges=q2,flow=-1) - i3 = Index(charges=q3,flow=1) + q1 = U1Charge(np.random.randint(0,10,6)) + q2 = U1Charge(np.random.randint(0,10,6)) + q3 = U1Charge(np.random.randint(0,10,6)) + i1 = Index(charges=q1,flow=False) + i2 = Index(charges=q2,flow=True) + i3 = Index(charges=q3,flow=False) A=BlockSparseTensor.randn(indices=[i1,i2,i3]) - print(nA.shape) #prints (6,6,6) + print(A.shape) #prints (6,6,6) reshape(A, (2,3,6,6)) #raises ValueError ``` raises a `ValueError` since (2,3,6,6) @@ -975,8 +1015,8 @@ def reshape(tensor: BlockSparseTensor, def transpose(tensor: BlockSparseTensor, order: Union[List[int], np.ndarray]) -> "BlockSparseTensor": """ - Transpose `tensor` into the new order `order`. This routine currently shuffles - data. + Transpose `tensor` into the new order `order`. + This routine currently shuffles data. Args: tensor: The tensor to be transposed. order: The new order of indices. @@ -1207,7 +1247,8 @@ def tensordot( def svd(matrix: BlockSparseTensor, full_matrices: Optional[bool] = True, compute_uv: Optional[bool] = True, - hermitian: Optional[bool] = False): + hermitian: Optional[bool] = False + ) -> Tuple[BlockSparseTensor, BlockSparseTensor, BlockSparseTensor]: """ Compute the singular value decomposition of `matrix`. The matrix if factorized into `u * s * vh`, with @@ -1220,10 +1261,14 @@ def svd(matrix: BlockSparseTensor, and `v.shape[0]=s.shape[1]` compute_yv: If `True`, return `u` and `v`. hermitian: If `True`, assume hermiticity of `matrix`. + Returns: + If `compute_uv` is `True`: Three BlockSparseTensors `U,S,V`. + If `compute_uv` is `False`: A BlockSparseTensors `S` containing the + singular values. """ if matrix.rank != 2: - raise NotImplementedError("SVD currently supports only rank-2 tensors.") + raise NotImplementedError("svd currently supports only rank-2 tensors.") flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges flat_flows = matrix.flat_flows @@ -1296,3 +1341,186 @@ def svd(matrix: BlockSparseTensor, np.concatenate([np.ravel(v) for v in v_blocks]), indices_v) return S + + +def qr(matrix: BlockSparseTensor, mode: Optional[Text] = 'reduced' + ) -> [BlockSparseTensor, BlockSparseTensor]: + """ + Compute the qr decomposition of an `M` by `N` matrix `matrix`. + The matrix is factorized into `q*r`, with + `q` an orthogonal matrix and `r` an upper triangular matrix. + Args: + matrix: A matrix (i.e. a rank-2 tensor) of type `BlockSparseTensor` + mode : Can take values {'reduced', 'complete', 'r', 'raw'}. + If K = min(M, N), then + + * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) + * 'complete' : returns q, r with dimensions (M, M), (M, N) + * 'r' : returns r only with dimensions (K, N) + + Returns: + (BlockSparseTensor,BlockSparseTensor): If mode = `reduced` or `complete` + BlockSparseTensor: If mode = `r`. + """ + if mode == 'raw': + raise NotImplementedError('mode `raw` currenntly not supported') + if matrix.rank != 2: + raise NotImplementedError("qr currently supports only rank-2 tensors.") + + flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges + flat_flows = matrix.flat_flows + partition = len(matrix.indices[0].flat_charges) + blocks, charges, shapes = _find_diagonal_sparse_blocks( + flat_charges, flat_flows, partition) + + q_blocks = [] + r_blocks = [] + for n in range(len(blocks)): + out = np.linalg.qr(np.reshape(matrix.data[blocks[n]], shapes[:, n]), mode) + if mode in ('reduced', 'complete'): + q_blocks.append(out[0]) + r_blocks.append(out[1]) + elif mode == 'r': + r_blocks.append(out) + else: + raise ValueError('unknown value {} for input `mode`'.format(mode)) + + left_r_charge = charges.__new__(type(charges)) + left_r_charge_labels = np.concatenate([ + np.full(r_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(r_blocks)) + ]) + + left_r_charge.__init__(charges.unique_charges, left_r_charge_labels, + charges.charge_types) + indices_r = [Index(left_r_charge, False), matrix.indices[1]] + + R = BlockSparseTensor( + np.concatenate([np.ravel(r) for r in r_blocks]), indices_r) + if mode in ('reduced', 'complete'): + right_q_charge = charges.__new__(type(charges)) + right_q_charge_labels = np.concatenate([ + np.full(q_blocks[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(q_blocks)) + ]) + right_q_charge.__init__(charges.unique_charges, right_q_charge_labels, + charges.charge_types) + + indices_q = [Index(right_q_charge, True), matrix.indices[0]] + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(q.T) for q in q_blocks]), indices_q).transpose( + (1, 0)), R + + return R + + +def eigh(matrix: BlockSparseTensor, + UPLO: Optional[Text] = 'L') -> [BlockSparseTensor, BlockSparseTensor]: + """ + Compute the eigen decomposition of a hermitian `M` by `M` matrix `matrix`. + Args: + matrix: A matrix (i.e. a rank-2 tensor) of type `BlockSparseTensor` + + Returns: + (BlockSparseTensor,BlockSparseTensor): The eigenvalues and eigenvectors + + """ + if matrix.rank != 2: + raise NotImplementedError("qr currently supports only rank-2 tensors.") + + flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges + flat_flows = matrix.flat_flows + partition = len(matrix.indices[0].flat_charges) + blocks, charges, shapes = _find_diagonal_sparse_blocks( + flat_charges, flat_flows, partition) + + eigvals = [] + v_blocks = [] + for n in range(len(blocks)): + e, v = np.linalg.eigh( + np.reshape(matrix.data[blocks[n]], shapes[:, n]), UPLO) + eigvals.append(np.diag(e)) + v_blocks.append(v) + + left_v_charge = charges.__new__(type(charges)) + left_v_charge_labels = np.concatenate([ + np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(v_blocks)) + ]) + + left_v_charge.__init__(charges.unique_charges, left_v_charge_labels, + charges.charge_types) + indices_v = [Index(left_v_charge, False), matrix.indices[1]] + + V = BlockSparseTensor( + np.concatenate([np.ravel(v) for v in v_blocks]), indices_v) + eigvalscharge = charges.__new__(type(charges)) + eigvalscharge_labels = np.concatenate([ + np.full(eigvals[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(eigvals)) + ]) + eigvalscharge.__init__(charges.unique_charges, eigvalscharge_labels, + charges.charge_types) + + indices_q = [Index(eigvalscharge, True), matrix.indices[0]] + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(q.T) for q in eigvals]), indices_q).transpose( + (1, 0)), V + + +def eig(matrix: BlockSparseTensor) -> [BlockSparseTensor, BlockSparseTensor]: + """ + Compute the eigen decomposition of an `M` by `M` matrix `matrix`. + Args: + matrix: A matrix (i.e. a rank-2 tensor) of type `BlockSparseTensor` + + Returns: + (BlockSparseTensor,BlockSparseTensor): The eigenvalues and eigenvectors + + """ + if matrix.rank != 2: + raise NotImplementedError("qr currently supports only rank-2 tensors.") + + flat_charges = matrix.indices[0]._charges + matrix.indices[1]._charges + flat_flows = matrix.flat_flows + partition = len(matrix.indices[0].flat_charges) + blocks, charges, shapes = _find_diagonal_sparse_blocks( + flat_charges, flat_flows, partition) + + eigvals = [] + v_blocks = [] + for n in range(len(blocks)): + e, v = np.linalg.eig(np.reshape(matrix.data[blocks[n]], shapes[:, n])) + eigvals.append(np.diag(e)) + v_blocks.append(v) + + left_v_charge = charges.__new__(type(charges)) + left_v_charge_labels = np.concatenate([ + np.full(v_blocks[n].shape[0], fill_value=n, dtype=np.int16) + for n in range(len(v_blocks)) + ]) + + left_v_charge.__init__(charges.unique_charges, left_v_charge_labels, + charges.charge_types) + indices_v = [Index(left_v_charge, False), matrix.indices[1]] + + V = BlockSparseTensor( + np.concatenate([np.ravel(v) for v in v_blocks]), indices_v) + eigvalscharge = charges.__new__(type(charges)) + eigvalscharge_labels = np.concatenate([ + np.full(eigvals[n].shape[1], fill_value=n, dtype=np.int16) + for n in range(len(eigvals)) + ]) + eigvalscharge.__init__(charges.unique_charges, eigvalscharge_labels, + charges.charge_types) + + indices_q = [Index(eigvalscharge, True), matrix.indices[0]] + #TODO: reuse data from _find_diagonal_sparse_blocks above + #to avoid the transpose + return BlockSparseTensor( + np.concatenate([np.ravel(q.T) for q in eigvals]), indices_q).transpose( + (1, 0)), V From d0419806b13324cd5d090cb7e370e73a8ba179fb Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 1 Feb 2020 22:01:57 -0500 Subject: [PATCH 210/212] added tests --- .../block_tensor/block_tensor_test.py | 72 +++++++++++++++---- 1 file changed, 60 insertions(+), 12 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor_test.py b/tensornetwork/block_tensor/block_tensor_test.py index adc77fc6a..3baf0cd8d 100644 --- a/tensornetwork/block_tensor/block_tensor_test.py +++ b/tensornetwork/block_tensor/block_tensor_test.py @@ -3,12 +3,13 @@ from tensornetwork.block_tensor.charge import U1Charge, fuse_charges from tensornetwork.block_tensor.index import Index -from tensornetwork.block_tensor.block_tensor import compute_num_nonzero, reduce_charges, BlockSparseTensor, fuse_ndarrays, tensordot +from tensornetwork.block_tensor.block_tensor import compute_num_nonzero, reduce_charges, BlockSparseTensor, fuse_ndarrays, tensordot, svd, qr np_dtypes = [np.float32, np.float16, np.float64, np.complex64, np.complex128] +np_tensordot_dtypes = [np.float16, np.float64, np.complex128] -def get_contractable_tensors(R1, R2, cont): +def get_contractable_tensors(R1, R2, cont, dtype): DsA = np.random.randint(5, 10, R1) DsB = np.random.randint(5, 10, R2) assert R1 >= cont @@ -49,8 +50,8 @@ def get_contractable_tensors(R1, R2, cont): for n in sorted(compB): indices_final.append(indicesB[n]) shapes = tuple([i.dim for i in indices_final]) - A = BlockSparseTensor.random(indices=indicesA) - B = BlockSparseTensor.random(indices=indicesB) + A = BlockSparseTensor.random(indices=indicesA, dtype=dtype) + B = BlockSparseTensor.random(indices=indicesB, dtype=dtype) return A, B, indsA, indsB @@ -133,9 +134,10 @@ def test_reshape_transpose(): np.testing.assert_allclose(dense, B.todense()) +@pytest.mark.parametrize("dtype", np_tensordot_dtypes) @pytest.mark.parametrize("R1, R2, cont", [(4, 4, 2), (4, 3, 3), (3, 4, 3)]) -def test_tensordot(R1, R2, cont): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont) +def test_tensordot(R1, R2, cont, dtype): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont, dtype) res = tensordot(A, B, (indsA, indsB)) dense_res = np.tensordot(A.todense(), B.todense(), (indsA, indsB)) np.testing.assert_allclose(dense_res, res.todense()) @@ -168,9 +170,10 @@ def test_tensordot_reshape(): np.testing.assert_allclose(dense, res.todense()) +@pytest.mark.parametrize("dtype", np_tensordot_dtypes) @pytest.mark.parametrize("R1, R2, cont", [(4, 4, 2), (4, 3, 3), (3, 4, 3)]) -def test_tensordot_final_order(R1, R2, cont): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont) +def test_tensordot_final_order(R1, R2, cont, dtype): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, cont, dtype) final_order = np.arange(R1 + R2 - 2 * cont) np.random.shuffle(final_order) res = tensordot(A, B, (indsA, indsB), final_order=final_order) @@ -179,18 +182,63 @@ def test_tensordot_final_order(R1, R2, cont): np.testing.assert_allclose(dense_res, res.todense()) +@pytest.mark.parametrize("dtype", np_dtypes) @pytest.mark.parametrize("R1, R2", [(2, 2), (3, 3), (4, 4), (1, 1)]) -def test_tensordot_inner(R1, R2): +def test_tensordot_inner(R1, R2, dtype): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0) + A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0, dtype) res = tensordot(A, B, (indsA, indsB)) dense_res = np.tensordot(A.todense(), B.todense(), (indsA, indsB)) np.testing.assert_allclose(dense_res, res.todense()) +@pytest.mark.parametrize("dtype", np_dtypes) @pytest.mark.parametrize("R1, R2", [(2, 2), (2, 1), (1, 2), (1, 1)]) -def test_tensordot_outer(R1, R2): - A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0) +def test_tensordot_outer(R1, R2, dtype): + A, B, indsA, indsB = get_contractable_tensors(R1, R2, 0, dtype) res = tensordot(A, B, axes=0) dense_res = np.tensordot(A.todense(), B.todense(), axes=0) np.testing.assert_allclose(dense_res, res.todense()) + + +@pytest.mark.parametrize("dtype", np_dtypes) +@pytest.mark.parametrize("R, R1, R2", [(2, 1, 1), (3, 2, 1), (3, 1, 2)]) +def test_svd_prod(dtype, R, R1, R2): + D = 30 + charges = [U1Charge.random(-5, 5, D) for n in range(R)] + flows = [True] * R + A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)]) + A = A.reshape([D**R1, D**R2]) + U, S, V = svd(A, full_matrices=False) + A_ = U @ S @ V + np.testing.assert_allclose(A.data, A_.data) + + +@pytest.mark.parametrize("dtype", np_dtypes) +@pytest.mark.parametrize("R, R1, R2", [(2, 1, 1), (3, 2, 1), (3, 1, 2)]) +def test_svd_singvals(dtype, R, R1, R2): + D = 30 + charges = [U1Charge.random(-5, 5, D) for n in range(R)] + flows = [True] * R + A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)]) + A = A.reshape([D**R1, D**R2]) + U1, S1, V1 = svd(A, full_matrices=False) + S2 = svd(A, full_matrices=False, compute_uv=False) + np.testing.assert_allclose(S1.data, S2.data) + Sdense = np.linalg.svd(A.todense(), compute_uv=False) + np.testing.assert_allclose( + np.sort(Sdense[Sdense > 1E-15]), np.sort(S2.data[S2.data > 0.0])) + + +@pytest.mark.parametrize("mode", ['complete', 'reduced']) +@pytest.mark.parametrize("dtype", np_dtypes) +@pytest.mark.parametrize("R, R1, R2", [(2, 1, 1), (3, 2, 1), (3, 1, 2)]) +def test_qr_prod(dtype, R, R1, R2, mode): + D = 30 + charges = [U1Charge.random(-5, 5, D) for n in range(R)] + flows = [True] * R + A = BlockSparseTensor.random([Index(charges[n], flows[n]) for n in range(R)]) + A = A.reshape([D**R1, D**R2]) + Q, R = qr(A, mode=mode) + A_ = Q @ R + np.testing.assert_allclose(A.data, A_.data) From 12d8b758db53624e5f8478a57b10c87f6d50cf75 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 1 Feb 2020 22:20:26 -0500 Subject: [PATCH 211/212] fix tests --- tensornetwork/block_tensor/index_test.py | 100 ++--------------------- 1 file changed, 6 insertions(+), 94 deletions(-) diff --git a/tensornetwork/block_tensor/index_test.py b/tensornetwork/block_tensor/index_test.py index 438984952..2d0bf846a 100644 --- a/tensornetwork/block_tensor/index_test.py +++ b/tensornetwork/block_tensor/index_test.py @@ -1,6 +1,6 @@ import numpy as np # pylint: disable=line-too-long -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index, fuse_indices +from tensornetwork.block_tensor.index import Index, fuse_index_pair, fuse_indices from tensornetwork.block_tensor.charge import U1Charge, BaseCharge @@ -17,8 +17,6 @@ def test_index_fusion_mul(): i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 i12 = i1 * i2 - assert i12.left_child is i1 - assert i12.right_child is i2 for n in range(len(i12.charges.charges)): assert np.all(i12.charges.charges == (q1 + q2).charges) @@ -35,96 +33,10 @@ def test_fuse_indices(): i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 i12 = fuse_indices([i1, i2]) - assert i12.left_child is i1 - assert i12.right_child is i2 for n in range(len(i12.charges.charges)): assert np.all(i12.charges.charges == (q1 + q2).charges) -def test_split_index(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=False, name='index1') #index on leg 1 - i2 = Index(charges=q2, flow=False, name='index2') #index on leg 2 - - i12 = i1 * i2 - i1_, i2_ = split_index(i12) - assert i1 is i1_ - assert i2 is i2_ - np.testing.assert_allclose(q1.charges, i1.charges.charges) - np.testing.assert_allclose(q2.charges, i2.charges.charges) - np.testing.assert_allclose(q1.charges, i1_.charges.charges) - np.testing.assert_allclose(q2.charges, i2_.charges.charges) - assert i1_.name == 'index1' - assert i2_.name == 'index2' - assert i1_.flow == i1.flow - assert i2_.flow == i2.flow - - -def test_elementary_indices(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - q3 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - q4 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype)) - i1 = Index(charges=q1, flow=False, name='index1') - i2 = Index(charges=q2, flow=False, name='index2') - i3 = Index(charges=q3, flow=False, name='index3') - i4 = Index(charges=q4, flow=False, name='index4') - - i12 = i1 * i2 - i34 = i3 * i4 - elmt12 = i12.get_elementary_indices() - assert elmt12[0] is i1 - assert elmt12[1] is i2 - - i1234 = i12 * i34 - elmt1234 = i1234.get_elementary_indices() - assert elmt1234[0] is i1 - assert elmt1234[1] is i2 - assert elmt1234[2] is i3 - assert elmt1234[3] is i4 - assert elmt1234[0].name == 'index1' - assert elmt1234[1].name == 'index2' - assert elmt1234[2].name == 'index3' - assert elmt1234[3].name == 'index4' - assert elmt1234[0].flow == i1.flow - assert elmt1234[1].flow == i2.flow - assert elmt1234[2].flow == i3.flow - assert elmt1234[3].flow == i4.flow - - np.testing.assert_allclose(q1.charges, i1.charges.charges) - np.testing.assert_allclose(q2.charges, i2.charges.charges) - np.testing.assert_allclose(q3.charges, i3.charges.charges) - np.testing.assert_allclose(q4.charges, i4.charges.charges) - - -def test_leave(): - D = 10 - B = 4 - dtype = np.int16 - q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, - D).astype(dtype)) #quantum numbers on leg 1 - - i1 = Index(charges=q1, flow=False, name='index1') - i2 = Index(charges=q2, flow=False, name='index2') - assert i1.is_leave - assert i2.is_leave - - i12 = i1 * i2 - assert not i12.is_leave - - def test_copy(): D = 10 B = 4 @@ -144,8 +56,8 @@ def test_copy(): i1234 = i12 * i34 i1234_copy = i1234.copy() - elmt1234 = i1234_copy.get_elementary_indices() - assert elmt1234[0] is not i1 - assert elmt1234[1] is not i2 - assert elmt1234[2] is not i3 - assert elmt1234[3] is not i4 + flat1234 = i1234_copy.flat_charges + assert flat1234[0] is not i1.flat_charges[0] + assert flat1234[1] is not i2.flat_charges[0] + assert flat1234[2] is not i3.flat_charges[0] + assert flat1234[3] is not i4.flat_charges[0] From f67aa20f008c8b94332f58ce578bb631047c2528 Mon Sep 17 00:00:00 2001 From: mganahl Date: Sat, 1 Feb 2020 22:20:35 -0500 Subject: [PATCH 212/212] fix index bugs --- tensornetwork/block_tensor/block_tensor.py | 2 +- tensornetwork/block_tensor/index.py | 14 -------------- 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/tensornetwork/block_tensor/block_tensor.py b/tensornetwork/block_tensor/block_tensor.py index 83f874916..abe71be77 100644 --- a/tensornetwork/block_tensor/block_tensor.py +++ b/tensornetwork/block_tensor/block_tensor.py @@ -17,7 +17,7 @@ from __future__ import print_function import numpy as np from tensornetwork.backends import backend_factory -from tensornetwork.block_tensor.index import Index, fuse_index_pair, split_index +from tensornetwork.block_tensor.index import Index, fuse_index_pair from tensornetwork.block_tensor.charge import fuse_degeneracies, fuse_charges, fuse_degeneracies, BaseCharge, fuse_ndarray_charges, intersect import numpy as np import scipy as sp diff --git a/tensornetwork/block_tensor/index.py b/tensornetwork/block_tensor/index.py index 209ef477d..422e5d0c0 100644 --- a/tensornetwork/block_tensor/index.py +++ b/tensornetwork/block_tensor/index.py @@ -254,17 +254,3 @@ def fuse_indices(indices: List[Index], flow: Optional[int] = False) -> Index: for n in range(1, len(indices)): index = fuse_index_pair(index, indices[n], flow=flow) return index - - -def split_index(index: Index) -> Tuple[Index, Index]: - """ - Split an index (leg) of a symmetric tensor into two legs. - Args: - index: A tensor Index. - Returns: - Tuple[Index, Index]: The result of splitting `index`. - """ - if index.is_leave: - raise ValueError("cannot split an elementary index") - - return index.left_child, index.right_child