diff --git a/tensornetwork/backends/base_backend.py b/tensornetwork/backends/base_backend.py index 0368e6d6b..9e718998f 100644 --- a/tensornetwork/backends/base_backend.py +++ b/tensornetwork/backends/base_backend.py @@ -391,6 +391,32 @@ def eigsh_lanczos(self, raise NotImplementedError( "Backend '{}' has not implemented eighs_lanczos.".format(self.name)) + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + """ + Return the default multiplication of `tensor`. + A backend can override such implementation. + Args: + tensor1: A tensor. + tensor2: A tensor. + Returns: + Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented addition.".format(self.name)) + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + """ + Return the default multiplication of `tensor`. + A backend can override such implementation. + Args: + tensor1: A tensor. + tensor2: A tensor. + Returns: + Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented subtraction.".format(self.name)) + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: """ Return the default multiplication of `tensor`. @@ -404,6 +430,19 @@ def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: raise NotImplementedError( "Backend '{}' has not implemented multiply.".format(self.name)) + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + """ + Return the default divide of `tensor`. + A backend can override such implementation. + Args: + tensor1: A tensor. + tensor2: A tensor. + Returns: + Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented divide.".format(self.name)) + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: """ diff --git a/tensornetwork/backends/numpy/numpy_backend.py b/tensornetwork/backends/numpy/numpy_backend.py index 41a0061c7..7a1606f9d 100644 --- a/tensornetwork/backends/numpy/numpy_backend.py +++ b/tensornetwork/backends/numpy/numpy_backend.py @@ -371,9 +371,18 @@ def eigsh_lanczos(self, eigenvectors.append(state / self.np.linalg.norm(state)) return eigvals[0:numeig], eigenvectors + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 + tensor2 + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 - tensor2 + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: return tensor1 * tensor2 + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 / tensor2 + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: t = self.np.copy(tensor) diff --git a/tensornetwork/backends/numpy/numpy_backend_test.py b/tensornetwork/backends/numpy/numpy_backend_test.py index e8688ce62..fe73271e8 100644 --- a/tensornetwork/backends/numpy/numpy_backend_test.py +++ b/tensornetwork/backends/numpy/numpy_backend_test.py @@ -321,15 +321,67 @@ def test_eigsh_lanczos_raises(): @pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 2), + pytest.param(1., np.ones((1, 2, 3)), 2*np.ones((1, 2, 3))), + pytest.param(2.*np.ones(()), 1., 3.*np.ones((1, 2, 3))), + pytest.param(2.*np.ones(()), 1.*np.ones((1, 2, 3)), 3.*np.ones((1, 2, 3))), +]) +def test_addition(a, b, expected): + backend = numpy_backend.NumPyBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.addition(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 0), + pytest.param(2., 1.*np.ones((1, 2, 3)), 1.*np.ones((1, 2, 3))), + pytest.param(np.ones((1, 2, 3)), 1., np.zeros((1, 2, 3))), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))), +]) +def test_subtraction(a, b, expected): + backend = numpy_backend.NumPyBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.subtraction(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 1), + pytest.param(2., 1.*np.ones((1, 2, 3)), 2.*np.ones((1, 2, 3))), + pytest.param(np.ones((1, 2, 3)), 1., np.ones((1, 2, 3))), pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))), - pytest.param(2. * np.ones(()), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))), ]) def test_multiply(a, b, expected): backend = numpy_backend.NumPyBackend() tensor1 = backend.convert_to_tensor(a) tensor2 = backend.convert_to_tensor(b) + result = backend.multiply(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(2., 2., 1.), + pytest.param(2., 0.5*np.ones((1, 2, 3)), 4.*np.ones((1, 2, 3))), + pytest.param(np.ones(()), 2., 0.5*np.ones((1, 2, 3))), + pytest.param(np.ones(()), 2.*np.ones((1, 2, 3)), 0.5*np.ones((1, 2, 3))), +]) +def test_divide(a, b, expected): + backend = numpy_backend.NumPyBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.divide(tensor1, tensor2) - np.testing.assert_allclose(backend.multiply(tensor1, tensor2), expected) + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype def find(which, vector): diff --git a/tensornetwork/backends/pytorch/pytorch_backend.py b/tensornetwork/backends/pytorch/pytorch_backend.py index b8e9a1ed7..788d4258b 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend.py +++ b/tensornetwork/backends/pytorch/pytorch_backend.py @@ -275,9 +275,18 @@ def eigsh_lanczos(self, eigenvectors.append(state / self.torch.norm(state)) return eigvals[0:numeig], eigenvectors + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 + tensor2 + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 - tensor2 + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: return tensor1 * tensor2 + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 / tensor2 + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: #make a copy diff --git a/tensornetwork/backends/pytorch/pytorch_backend_test.py b/tensornetwork/backends/pytorch/pytorch_backend_test.py index e55d71b4b..e25542ac8 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend_test.py +++ b/tensornetwork/backends/pytorch/pytorch_backend_test.py @@ -293,15 +293,59 @@ def test_eigsh_lanczos_raises(): @pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 2), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), 2.*np.ones((1, 2, 3))), +]) +def test_addition(a, b, expected): + backend = pytorch_backend.PyTorchBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.addition(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 0), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))), +]) +def test_subtraction(a, b, expected): + backend = pytorch_backend.PyTorchBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.subtraction(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 1), pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))), - pytest.param(2. * np.ones(()), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))), ]) def test_multiply(a, b, expected): backend = pytorch_backend.PyTorchBackend() tensor1 = backend.convert_to_tensor(a) tensor2 = backend.convert_to_tensor(b) + result = backend.multiply(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(2., 2., 1.), + pytest.param(np.ones(()), 2.*np.ones((1, 2, 3)), 0.5*np.ones((1, 2, 3))), +]) +def test_divide(a, b, expected): + backend = pytorch_backend.PyTorchBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.divide(tensor1, tensor2) - np.testing.assert_allclose(backend.multiply(tensor1, tensor2), expected) + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype def test_eigh(): diff --git a/tensornetwork/backends/shell/shell_backend.py b/tensornetwork/backends/shell/shell_backend.py index 33b30a99c..6cc6d9ae2 100644 --- a/tensornetwork/backends/shell/shell_backend.py +++ b/tensornetwork/backends/shell/shell_backend.py @@ -290,11 +290,20 @@ def eigsh_lanczos(self, raise ValueError( '`A` has no attribut shape adn no `initial_state` is given.') + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + raise NotImplementedError("Shell tensor has not implemented addition( + )") + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + raise NotImplementedError("Shell tensor has not implemented subtraction( - )") + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: a = np.ones(tensor1.shape) b = np.ones(tensor2.shape) return ShellTensor((a * b).shape) + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + raise NotImplementedError("Shell tensor has not implemented add( / )") + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: return ShellTensor(tensor.shape) diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend.py b/tensornetwork/backends/tensorflow/tensorflow_backend.py index c87d464c5..2d9c0ed4d 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend.py @@ -182,9 +182,18 @@ def eigsh_lanczos(self, raise NotImplementedError( "Backend '{}' has not implemented eighs_lanczos.".format(self.name)) + def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 + tensor2 + + def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 - tensor2 + def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: return tensor1 * tensor2 + def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: + return tensor1 / tensor2 + def index_update(self, tensor: Tensor, mask: Tensor, assignee: Tensor) -> Tensor: #returns a copy (unfortunately) diff --git a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py index 838771598..8073a61e2 100644 --- a/tensornetwork/backends/tensorflow/tensorflow_backend_test.py +++ b/tensornetwork/backends/tensorflow/tensorflow_backend_test.py @@ -247,15 +247,59 @@ def test_conj(): @pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 2), + pytest.param(2.*np.ones(()), 1.*np.ones((1, 2, 3)), 3.*np.ones((1, 2, 3))), +]) +def test_addition(a, b, expected): + backend = tensorflow_backend.TensorFlowBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.addition(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 0), + pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))), +]) +def test_subtraction(a, b, expected): + backend = tensorflow_backend.TensorFlowBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.subtraction(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(1, 1, 1), pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))), - pytest.param(2. * np.ones(()), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))), ]) def test_multiply(a, b, expected): backend = tensorflow_backend.TensorFlowBackend() tensor1 = backend.convert_to_tensor(a) tensor2 = backend.convert_to_tensor(b) + result = backend.multiply(tensor1, tensor2) + + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype + + +@pytest.mark.parametrize("a, b, expected", [ + pytest.param(2., 2., 1.), + pytest.param(np.ones(()), 2.*np.ones((1, 2, 3)), 0.5*np.ones((1, 2, 3))), +]) +def test_divide(a, b, expected): + backend = tensorflow_backend.TensorFlowBackend() + tensor1 = backend.convert_to_tensor(a) + tensor2 = backend.convert_to_tensor(b) + result = backend.divide(tensor1, tensor2) - np.testing.assert_allclose(backend.multiply(tensor1, tensor2), expected) + np.testing.assert_allclose(result, expected) + assert tensor1.dtype == tensor2.dtype == result.dtype @pytest.mark.parametrize("dtype", [tf.float64, tf.complex128]) diff --git a/tensornetwork/network_components.py b/tensornetwork/network_components.py index 8d564df5c..0da188416 100644 --- a/tensornetwork/network_components.py +++ b/tensornetwork/network_components.py @@ -105,6 +105,18 @@ def __init__(self, super().__init__() + def __add__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented addition ( + )") + + def __sub__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented subtraction ( - )") + + def __mul__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented multiply ( * )") + + def __truediv__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented divide ( / )") + @property def dtype(self): #any derived instance of BaseNode always has to have a tensor @@ -555,6 +567,71 @@ def __init__(self, backend=backend_obj, shape=backend_obj.shape_tuple(self._tensor)) + def op_protection(self, other: Union[int, float, "Node"]) -> "Node": + if not isinstance(other, (int, float, Node)): + raise TypeError("Operand should be one of int, float, Node type") + if not hasattr(self, '_tensor'): + raise AttributeError("Please provide a valid tensor for this Node.") + if isinstance(other, Node): + if not self.backend.name == other.backend.name: + raise TypeError("Operands backend must match.\noperand 1 backend: {}\ + \noperand 2 backend: {}".format(self.backend.name, + other.backend.name)) + if not hasattr(other, '_tensor'): + raise AttributeError("Please provide a valid tensor for this Node.") + else: + other_tensor = self.backend.convert_to_tensor(other) + other = Node(tensor=other_tensor, backend=self.backend.name) + return other + + def __add__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.addition(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + + def __sub__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.subtraction(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + + def __mul__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.multiply(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + + def __truediv__(self, other: Union[int, float, "Node"]) -> "Node": + other = self.op_protection(other) + new_tensor = self.backend.divide(self.tensor, other.tensor) + if len(self.axis_names) > len(other.axis_names): + axis_names = self.axis_names + else: + axis_names = other.axis_names + return Node(tensor=new_tensor, + name=self.name, + axis_names=axis_names, + backend=self.backend.name) + def get_tensor(self) -> Tensor: return self.tensor @@ -653,6 +730,18 @@ def __init__(self, backend=backend_obj, shape=(dimension,) * rank) + def __add__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented addition ( + )") + + def __sub__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented subtraction ( - )") + + def __mul__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented multiply ( * )") + + def __truediv__(self, other: Union[int, float, "BaseNode"]) -> "BaseNode": + raise NotImplementedError("BaseNode has not implemented divide ( / )") + @property def dtype(self): # Override so we don't construct the dense tensor when asked for the dtype! diff --git a/tensornetwork/tests/network_components_free_test.py b/tensornetwork/tests/network_components_free_test.py index a25993c4b..5a1e27ba0 100644 --- a/tensornetwork/tests/network_components_free_test.py +++ b/tensornetwork/tests/network_components_free_test.py @@ -1,5 +1,6 @@ import numpy as np import tensorflow as tf +import torch import pytest from unittest.mock import patch from collections import namedtuple @@ -25,6 +26,18 @@ def get_tensor(self): #pylint: disable=useless-super-delegation def set_tensor(self, tensor): #pylint: disable=useless-super-delegation return super().set_tensor(tensor) + def __add__(self, other): #pylint: disable=useless-super-delegation + return super().__add__(other) + + def __sub__(self, other): #pylint: disable=useless-super-delegation + return super().__sub__(other) + + def __mul__(self, other): #pylint: disable=useless-super-delegation + return super().__mul__(other) + + def __truediv__(self, other): #pylint: disable=useless-super-delegation + return super().__truediv__(other) + @property def shape(self): return super().shape @@ -383,6 +396,320 @@ def test_node_magic_matmul(backend): np.testing.assert_allclose(actual.tensor, expected) +def test_between_node_add_op(backend): + node1 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend=backend) + node3 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[11, 12], [13, 14]]) + result = (node1 + node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[3, 4], [5, 6]]) + result = (node1 + int_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + expected = np.array([[3, 4], [5, 6]]) + result = (int_node + node1).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + + expected = np.array([[3.5, 4.5], [5.5, 6.5]]) + result = (node3 + float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + expected = np.array([[3.5, 4.5], [5.5, 6.5]]) + result = (float_node + node3).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_add_op(backend): + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.int32), backend=backend) + expected = np.array([[3, 4], [5, 6]]) + result = (node + 2).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'int64' + else: + assert node.tensor.dtype == result.dtype + + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.float32), backend=backend) + expected = np.array([[3.5, 4.5], [5.5, 6.5]]) + result = (node + 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_between_node_sub_op(backend): + node1 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend=backend) + node3 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[-9, -8], [-7, -6]]) + result = (node1 - node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[-1, 0], [1, 2]]) + result = (node1 - int_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + expected = np.array([[1, 0], [-1, -2]]) + result = (int_node - node1).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + + expected = np.array([[-1.5, -0.5], [0.5, 1.5]]) + result = (node3 - float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + expected = np.array([[1.5, 0.5], [-0.5, -1.5]]) + result = (float_node - node3).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_sub_op(backend): + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.int32), backend=backend) + expected = np.array([[-1, 0], [1, 2]]) + result = (node - 2).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'int64' + else: + assert node.tensor.dtype == result.dtype + + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.float32), backend=backend) + expected = np.array([[-1.5, -0.5], [0.5, 1.5]]) + result = (node - 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_between_node_mul_op(backend): + node1 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend=backend) + node3 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[10, 20], [30, 40]]) + result = (node1 * node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[2, 4], [6, 8]]) + result = (node1 * int_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == int_node.tensor.dtype == result.dtype + result = (int_node * node1).tensor + np.testing.assert_almost_equal(result, expected) + + expected = np.array([[2.5, 5], [7.5, 10]]) + result = (node3 * float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + result = (float_node * node3).tensor + np.testing.assert_almost_equal(result, expected) + assert node3.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_mul_op(backend): + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.int32), backend=backend) + expected = np.array([[2, 4], [6, 8]]) + result = (node * 2).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'int64' + else: + assert node.tensor.dtype == result.dtype + + node = Node(tensor=np.array([[1, 2], [3, 4]], dtype=np.float32), backend=backend) + expected = np.array([[2.5, 5], [7.5, 10]]) + result = (node * 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_between_node_div_op(backend): + node1 = Node(tensor=np.array([[1., 2.], [3., 4.]]), backend=backend) + node2 = Node(tensor=np.array([[10., 10.], [10., 10.]]), backend=backend) + node3 = Node(tensor=np.array([[1, 2], [3, 4]]), backend=backend) + int_node = Node(tensor=np.array(2, dtype=np.int64), backend=backend) + float_node = Node(tensor=np.array(2.5, dtype=np.float64), backend=backend) + + expected = np.array([[0.1, 0.2], [0.3, 0.4]]) + result = (node1 / node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node1.tensor.dtype == node2.tensor.dtype == result.dtype + + expected = np.array([[0.5, 1.], [1.5, 2.]]) + expected_pytorch = np.array([[0, 1], [1, 2]]) + result = (node3 / int_node).tensor + if backend == 'pytorch': + np.testing.assert_almost_equal(result, expected_pytorch) + assert node3.tensor.dtype == result.dtype == torch.int64 + else: + np.testing.assert_almost_equal(result, expected) + assert node3.tensor.dtype == 'int64' + assert result.dtype == 'float64' + + expected = np.array([[2., 1.], [2/3, 0.5]]) + expected_pytorch = np.array([[2, 1], [0, 0]]) + result = (int_node / node3).tensor + if backend == 'pytorch': + np.testing.assert_almost_equal(result, expected_pytorch) + assert node3.tensor.dtype == result.dtype == torch.int64 + else: + np.testing.assert_almost_equal(result, expected) + assert node3.tensor.dtype == 'int64' + assert result.dtype == 'float64' + + expected = np.array([[4., 4.], [4., 4.]]) + result = (node2 / float_node).tensor + np.testing.assert_almost_equal(result, expected) + assert node2.dtype == float_node.dtype == result.dtype + expected = np.array([[0.25, 0.25], [0.25, 0.25]]) + result = (float_node / node2).tensor + np.testing.assert_almost_equal(result, expected) + assert node2.dtype == float_node.dtype == result.dtype + + +def test_node_and_scalar_div_op(backend): + node = Node(tensor=np.array([[5, 10], [15, 20]], dtype=np.int32), backend=backend) + expected = np.array([[0.5, 1.], [1.5, 2.]]) + expected_pytorch = np.array([[0, 1], [1, 2]]) + result = (node / 10).tensor + if backend == 'pytorch': + np.testing.assert_almost_equal(result, expected_pytorch) + assert node.tensor.dtype == result.dtype == torch.int32 + else: + np.testing.assert_almost_equal(result, expected) + assert result.dtype == 'float64' + assert node.tensor.dtype == 'int32' + + node = Node(tensor=np.array([[5., 10.], [15., 20.]], dtype=np.float32), backend=backend) + expected = np.array([[2., 4.], [6., 8.]]) + result = (node / 2.5).tensor + np.testing.assert_almost_equal(result, expected) + if backend == 'jax': + assert result.dtype == 'float64' + else: + assert node.tensor.dtype == result.dtype + + +def test_node_add_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 + node2 + result = node2 + node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 + node2 + result = node1 + copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 + node2 + #pytype: enable=unsupported-operands + + +def test_node_sub_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 - node2 + result = node2 - node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 - node2 + result = node1 - copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 - node2 + #pytype: enable=unsupported-operands + + +def test_node_mul_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 * node2 + result = node2 * node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 * node2 + result = node1 * copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 * node2 + #pytype: enable=unsupported-operands + + +def test_node_div_input_error(): + #pylint: disable=unused-variable + #pytype: disable=unsupported-operands + node1 = Node(tensor=2, backend='numpy') + node1 = Node(tensor=2, backend='numpy') + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='numpy') + + del node1._tensor + with pytest.raises(AttributeError): + result = node1 / node2 + result = node2 / node1 + + node1.tensor = 1 + node2 = 'str' + copynode = tn.CopyNode(rank=4, dimension=3) + with pytest.raises(TypeError): + result = node1 / node2 + result = node1 / copynode + + node2 = Node(tensor=np.array([[10, 10], [10, 10]]), backend='pytorch') + with pytest.raises(TypeError): + result = node1 / node2 + #pytype: enable=unsupported-operands + + def test_node_save_structure(tmp_path, single_node_edge): node = single_node_edge.node with h5py.File(tmp_path / 'nodes', 'w') as node_file: @@ -1265,4 +1592,4 @@ def test_remove_edges_trace_raises_value_error(single_node_edge): node = single_node_edge.node edge = tn.connect(node[1], node[2]) with pytest.raises(ValueError): - _remove_edges(edge, node, node, node) # pytype: disable=wrong-arg-types \ No newline at end of file + _remove_edges(edge, node, node, node) # pytype: disable=wrong-arg-types