From 6dc39f9e98ecbe4d22f4610c9b9feca86cc0e61f Mon Sep 17 00:00:00 2001 From: Vahid Tavanashad <120411540+vtavana@users.noreply.github.com> Date: Sun, 8 Dec 2024 01:25:28 -0600 Subject: [PATCH] updating tests - Part1 (#2210) This is part 1 of a series of PRs in which the tests are refactored. In this PR, `test_linalg.py`, `test_product.py`, `test_statistics.py`, `test_fft.py`, and `test_sort.py` are updated. --- dpnp/dpnp_iface_linearalgebra.py | 63 +- dpnp/dpnp_iface_sorting.py | 17 +- dpnp/dpnp_utils/dpnp_utils_linearalgebra.py | 73 +- dpnp/tests/test_fft.py | 70 +- dpnp/tests/test_linalg.py | 1397 +++++++++---------- dpnp/tests/test_mathematical.py | 68 +- dpnp/tests/test_product.py | 222 ++- dpnp/tests/test_sort.py | 190 +-- dpnp/tests/test_statistics.py | 291 ++-- 9 files changed, 1111 insertions(+), 1280 deletions(-) diff --git a/dpnp/dpnp_iface_linearalgebra.py b/dpnp/dpnp_iface_linearalgebra.py index 49e1e3df892..bb89f0769d9 100644 --- a/dpnp/dpnp_iface_linearalgebra.py +++ b/dpnp/dpnp_iface_linearalgebra.py @@ -37,9 +37,7 @@ """ - import numpy -from dpctl.tensor._numpy_helper import normalize_axis_tuple import dpnp @@ -48,6 +46,7 @@ dpnp_dot, dpnp_kron, dpnp_matmul, + dpnp_tensordot, dpnp_vecdot, ) @@ -1047,65 +1046,7 @@ def tensordot(a, b, axes=2): # TODO: use specific scalar-vector kernel return dpnp.multiply(a, b) - try: - iter(axes) - except Exception as e: # pylint: disable=broad-exception-caught - if not isinstance(axes, int): - raise TypeError("Axes must be an integer.") from e - if axes < 0: - raise ValueError("Axes must be a non-negative integer.") from e - axes_a = tuple(range(-axes, 0)) - axes_b = tuple(range(0, axes)) - else: - if len(axes) != 2: - raise ValueError("Axes must consist of two sequences.") - - axes_a, axes_b = axes - axes_a = (axes_a,) if dpnp.isscalar(axes_a) else axes_a - axes_b = (axes_b,) if dpnp.isscalar(axes_b) else axes_b - - if len(axes_a) != len(axes_b): - raise ValueError("Axes length mismatch.") - - # Make the axes non-negative - a_ndim = a.ndim - b_ndim = b.ndim - axes_a = normalize_axis_tuple(axes_a, a_ndim, "axis_a") - axes_b = normalize_axis_tuple(axes_b, b_ndim, "axis_b") - - if a.ndim == 0 or b.ndim == 0: - # TODO: use specific scalar-vector kernel - return dpnp.multiply(a, b) - - a_shape = a.shape - b_shape = b.shape - for axis_a, axis_b in zip(axes_a, axes_b): - if a_shape[axis_a] != b_shape[axis_b]: - raise ValueError( - "shape of input arrays is not similar at requested axes." - ) - - # Move the axes to sum over, to the end of "a" - not_in = tuple(k for k in range(a_ndim) if k not in axes_a) - newaxes_a = not_in + axes_a - n1 = int(numpy.prod([a_shape[ax] for ax in not_in])) - n2 = int(numpy.prod([a_shape[ax] for ax in axes_a])) - newshape_a = (n1, n2) - olda = [a_shape[axis] for axis in not_in] - - # Move the axes to sum over, to the front of "b" - not_in = tuple(k for k in range(b_ndim) if k not in axes_b) - newaxes_b = tuple(axes_b + not_in) - n1 = int(numpy.prod([b_shape[ax] for ax in axes_b])) - n2 = int(numpy.prod([b_shape[ax] for ax in not_in])) - newshape_b = (n1, n2) - oldb = [b_shape[axis] for axis in not_in] - - at = dpnp.transpose(a, newaxes_a).reshape(newshape_a) - bt = dpnp.transpose(b, newaxes_b).reshape(newshape_b) - res = dpnp.matmul(at, bt) - - return res.reshape(olda + oldb) + return dpnp_tensordot(a, b, axes=axes) def vdot(a, b): diff --git a/dpnp/dpnp_iface_sorting.py b/dpnp/dpnp_iface_sorting.py index 640f17da55b..db6c36268dd 100644 --- a/dpnp/dpnp_iface_sorting.py +++ b/dpnp/dpnp_iface_sorting.py @@ -64,13 +64,18 @@ def _wrap_sort_argsort( if order is not None: raise NotImplementedError( - "order keyword argument is only supported with its default value." - ) - if kind is not None and stable is not None: - raise ValueError( - "`kind` and `stable` parameters can't be provided at the same time." - " Use only one of them." + "`order` keyword argument is only supported with its default value." ) + if stable is not None: + if stable not in [True, False]: + raise ValueError( + "`stable` parameter should be None, True, or False." + ) + if kind is not None: + raise ValueError( + "`kind` and `stable` parameters can't be provided at" + " the same time. Use only one of them." + ) usm_a = dpnp.get_usm_ndarray(a) if axis is None: diff --git a/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py b/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py index eb4ea10d803..90832f99f8c 100644 --- a/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py +++ b/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py @@ -36,7 +36,14 @@ from dpnp.dpnp_array import dpnp_array from dpnp.dpnp_utils import get_usm_allocations -__all__ = ["dpnp_cross", "dpnp_dot", "dpnp_kron", "dpnp_matmul", "dpnp_vecdot"] +__all__ = [ + "dpnp_cross", + "dpnp_dot", + "dpnp_kron", + "dpnp_matmul", + "dpnp_tensordot", + "dpnp_vecdot", +] def _compute_res_dtype(*arrays, sycl_queue, dtype=None, casting="no"): @@ -974,6 +981,70 @@ def dpnp_matmul( return result +def dpnp_tensordot(a, b, axes=2): + """Tensor dot product of two arrays.""" + + try: + iter(axes) + except Exception as e: # pylint: disable=broad-exception-caught + if not isinstance(axes, int): + raise TypeError("Axes must be an integer.") from e + if axes < 0: + raise ValueError("Axes must be a non-negative integer.") from e + axes_a = tuple(range(-axes, 0)) + axes_b = tuple(range(0, axes)) + else: + if len(axes) != 2: + raise ValueError("Axes must consist of two sequences.") + + axes_a, axes_b = axes + axes_a = (axes_a,) if dpnp.isscalar(axes_a) else axes_a + axes_b = (axes_b,) if dpnp.isscalar(axes_b) else axes_b + + if len(axes_a) != len(axes_b): + raise ValueError("Axes length mismatch.") + + # Make the axes non-negative + a_ndim = a.ndim + b_ndim = b.ndim + axes_a = normalize_axis_tuple(axes_a, a_ndim, "axis_a") + axes_b = normalize_axis_tuple(axes_b, b_ndim, "axis_b") + + if a.ndim == 0 or b.ndim == 0: + # TODO: use specific scalar-vector kernel + return dpnp.multiply(a, b) + + a_shape = a.shape + b_shape = b.shape + for axis_a, axis_b in zip(axes_a, axes_b): + if a_shape[axis_a] != b_shape[axis_b]: + raise ValueError( + "shape of input arrays is not similar at requested axes." + ) + + # Move the axes to sum over, to the end of "a" + not_in = tuple(k for k in range(a_ndim) if k not in axes_a) + newaxes_a = not_in + axes_a + n1 = int(numpy.prod([a_shape[ax] for ax in not_in])) + n2 = int(numpy.prod([a_shape[ax] for ax in axes_a])) + newshape_a = (n1, n2) + olda = [a_shape[axis] for axis in not_in] + + # Move the axes to sum over, to the front of "b" + not_in = tuple(k for k in range(b_ndim) if k not in axes_b) + newaxes_b = tuple(axes_b + not_in) + n1 = int(numpy.prod([b_shape[ax] for ax in axes_b])) + n2 = int(numpy.prod([b_shape[ax] for ax in not_in])) + newshape_b = (n1, n2) + oldb = [b_shape[axis] for axis in not_in] + + at = dpnp.transpose(a, newaxes_a).reshape(newshape_a) + bt = dpnp.transpose(b, newaxes_b).reshape(newshape_b) + res = dpnp.matmul(at, bt) + + return res.reshape(olda + oldb) + + def dpnp_vecdot( x1, x2, diff --git a/dpnp/tests/test_fft.py b/dpnp/tests/test_fft.py index d2e730692ff..196b7a57345 100644 --- a/dpnp/tests/test_fft.py +++ b/dpnp/tests/test_fft.py @@ -736,7 +736,7 @@ def setup_method(self): ) @pytest.mark.parametrize("n", [None, 5, 20]) @pytest.mark.parametrize("norm", [None, "forward", "ortho"]) - def test_fft_1D(self, dtype, n, norm): + def test_irfft_1D(self, dtype, n, norm): x = dpnp.linspace(-1, 1, 11, dtype=dtype) a = dpnp.sin(x) a_np = dpnp.asnumpy(a) @@ -750,7 +750,7 @@ def test_fft_1D(self, dtype, n, norm): @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize("n", [None, 5, 18]) @pytest.mark.parametrize("norm", ["forward", "backward", "ortho"]) - def test_fft_1D_complex(self, dtype, n, norm): + def test_irfft_1D_complex(self, dtype, n, norm): x = dpnp.linspace(-1, 1, 11) a = dpnp.sin(x) + 1j * dpnp.cos(x) a = _make_array_Hermitian(a, n) @@ -766,7 +766,7 @@ def test_fft_1D_complex(self, dtype, n, norm): @pytest.mark.parametrize("axis", [-1, 1, 0]) @pytest.mark.parametrize("norm", [None, "forward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) - def test_fft_1D_on_2D_array(self, dtype, n, axis, norm, order): + def test_irfft_1D_on_2D_array(self, dtype, n, axis, norm, order): a_np = numpy.arange(12, dtype=dtype).reshape(3, 4, order=order) a = dpnp.asarray(a_np) @@ -779,7 +779,7 @@ def test_fft_1D_on_2D_array(self, dtype, n, axis, norm, order): @pytest.mark.parametrize("axis", [0, 1, 2]) @pytest.mark.parametrize("norm", ["forward", "backward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) - def test_fft_1D_on_3D_array(self, dtype, n, axis, norm, order): + def test_irfft_1D_on_3D_array(self, dtype, n, axis, norm, order): x1 = numpy.random.uniform(-10, 10, 120) x2 = numpy.random.uniform(-10, 10, 120) a_np = numpy.array(x1 + 1j * x2, dtype=dtype).reshape( @@ -814,7 +814,7 @@ def test_fft_1D_on_3D_array(self, dtype, n, axis, norm, order): ) @pytest.mark.parametrize("n", [None, 5, 18]) - def test_fft_usm_ndarray(self, n): + def test_irfft_usm_ndarray(self, n): x = dpnp.linspace(-1, 1, 11) a = dpnp.sin(x) + 1j * dpnp.cos(x) a = _make_array_Hermitian(a, n) @@ -831,7 +831,7 @@ def test_fft_usm_ndarray(self, n): @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize("n", [None, 5, 18]) @pytest.mark.parametrize("norm", ["forward", "backward", "ortho"]) - def test_fft_1D_out(self, dtype, n, norm): + def test_irfft_1D_out(self, dtype, n, norm): x = dpnp.linspace(-1, 1, 11) a = dpnp.sin(x) + 1j * dpnp.cos(x) a = _make_array_Hermitian(a, n) @@ -851,7 +851,7 @@ def test_fft_1D_out(self, dtype, n, norm): @pytest.mark.parametrize("axis", [-1, 0]) @pytest.mark.parametrize("norm", [None, "forward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) - def test_fft_1D_on_2D_array_out(self, dtype, n, axis, norm, order): + def test_irfft_1D_on_2D_array_out(self, dtype, n, axis, norm, order): a_np = numpy.arange(12, dtype=dtype).reshape(3, 4, order=order) a = dpnp.asarray(a_np) @@ -865,7 +865,7 @@ def test_fft_1D_on_2D_array_out(self, dtype, n, axis, norm, order): expected = numpy.fft.irfft(a_np, n=n, axis=axis, norm=norm) assert_dtype_allclose(result, expected, check_only_type_kind=True) - def test_fft_validate_out(self): + def test_irfft_validate_out(self): # Invalid dtype for c2r FFT a = dpnp.ones((10,), dtype=dpnp.complex64) out = dpnp.empty((18,), dtype=dpnp.complex64) @@ -882,7 +882,7 @@ def setup_method(self): @pytest.mark.parametrize( "shape", [(64,), (8, 8), (4, 16), (4, 4, 4), (2, 4, 4, 2)] ) - def test_fft_rfft(self, dtype, shape): + def test_rfft(self, dtype, shape): np_data = numpy.arange(64, dtype=dtype).reshape(shape) dpnp_data = dpnp.arange(64, dtype=dtype).reshape(shape) @@ -896,7 +896,7 @@ def test_fft_rfft(self, dtype, shape): ) @pytest.mark.parametrize("n", [None, 5, 20]) @pytest.mark.parametrize("norm", [None, "forward", "ortho"]) - def test_fft_1D(self, dtype, n, norm): + def test_rfft_1D(self, dtype, n, norm): x = dpnp.linspace(-1, 1, 11, dtype=dtype) a = dpnp.sin(x) a_np = dpnp.asnumpy(a) @@ -907,7 +907,7 @@ def test_fft_1D(self, dtype, n, norm): @pytest.mark.parametrize("n", [None, 5, 20]) @pytest.mark.parametrize("norm", [None, "forward", "ortho"]) - def test_fft_bool(self, n, norm): + def test_rfft_bool(self, n, norm): a = dpnp.ones(11, dtype=dpnp.bool) a_np = dpnp.asnumpy(a) @@ -920,7 +920,7 @@ def test_fft_bool(self, n, norm): @pytest.mark.parametrize("axis", [-1, 1, 0]) @pytest.mark.parametrize("norm", [None, "forward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) - def test_fft_1D_on_2D_array(self, dtype, n, axis, norm, order): + def test_rfft_1D_on_2D_array(self, dtype, n, axis, norm, order): a_np = numpy.arange(12, dtype=dtype).reshape(3, 4, order=order) a = dpnp.asarray(a_np) @@ -933,7 +933,7 @@ def test_fft_1D_on_2D_array(self, dtype, n, axis, norm, order): @pytest.mark.parametrize("axis", [0, 1, 2]) @pytest.mark.parametrize("norm", ["forward", "backward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) - def test_fft_1D_on_3D_array(self, dtype, n, axis, norm, order): + def test_rfft_1D_on_3D_array(self, dtype, n, axis, norm, order): a_np = numpy.arange(24, dtype=dtype).reshape(2, 3, 4, order=order) a = dpnp.asarray(a_np) @@ -942,7 +942,7 @@ def test_fft_1D_on_3D_array(self, dtype, n, axis, norm, order): assert_dtype_allclose(result, expected, check_only_type_kind=True) @pytest.mark.parametrize("n", [None, 5, 20]) - def test_fft_usm_ndarray(self, n): + def test_rfft_usm_ndarray(self, n): x = dpt.linspace(-1, 1, 11) a_usm = dpt.asarray(dpt.sin(x)) a_np = dpt.asnumpy(a_usm) @@ -958,7 +958,7 @@ def test_fft_usm_ndarray(self, n): @pytest.mark.parametrize("dtype", get_float_dtypes()) @pytest.mark.parametrize("n", [None, 5, 20]) @pytest.mark.parametrize("norm", ["forward", "backward", "ortho"]) - def test_fft_1D_out(self, dtype, n, norm): + def test_rfft_1D_out(self, dtype, n, norm): x = dpnp.linspace(-1, 1, 11) a = dpnp.sin(x) + 1j * dpnp.cos(x) a = dpnp.asarray(a, dtype=dtype) @@ -978,7 +978,7 @@ def test_fft_1D_out(self, dtype, n, norm): @pytest.mark.parametrize("axis", [-1, 0]) @pytest.mark.parametrize("norm", [None, "forward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) - def test_fft_1D_on_2D_array_out(self, dtype, n, axis, norm, order): + def test_rfft_1D_on_2D_array_out(self, dtype, n, axis, norm, order): a_np = numpy.arange(12, dtype=dtype).reshape(3, 4, order=order) a = dpnp.asarray(a_np) @@ -994,7 +994,7 @@ def test_fft_1D_on_2D_array_out(self, dtype, n, axis, norm, order): assert_dtype_allclose(result, expected, check_only_type_kind=True) @pytest.mark.parametrize("xp", [numpy, dpnp]) - def test_fft_error(self, xp): + def test_rfft_error(self, xp): a = xp.ones((4, 3), dtype=xp.complex64) # invalid dtype of input array for r2c FFT if xp == dpnp: @@ -1021,8 +1021,8 @@ def setup_method(self): @pytest.mark.parametrize("norm", ["forward", "backward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) def test_rfft2(self, dtype, axes, norm, order): - x1 = numpy.random.uniform(-10, 10, 24) - a_np = numpy.array(x1, dtype=dtype).reshape(2, 3, 4, order=order) + x = numpy.random.uniform(-10, 10, 24) + a_np = numpy.array(x, dtype=dtype).reshape(2, 3, 4, order=order) a = dpnp.asarray(a_np) result = dpnp.fft.rfft2(a, axes=axes, norm=norm) @@ -1036,9 +1036,9 @@ def test_rfft2(self, dtype, axes, norm, order): @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) def test_irfft2(self, dtype): - # x1 is Hermitian symmetric - x1 = numpy.array([[0, 1, 2], [5, 4, 6], [5, 7, 6]]) - a_np = numpy.array(x1, dtype=dtype) + # x is Hermitian symmetric + x = numpy.array([[0, 1, 2], [5, 4, 6], [5, 7, 6]]) + a_np = numpy.array(x, dtype=dtype) a = dpnp.asarray(a_np) result = dpnp.fft.irfft2(a) @@ -1047,8 +1047,8 @@ def test_irfft2(self, dtype): @pytest.mark.parametrize("s", [None, (3, 3), (10, 10), (3, 10)]) def test_rfft2_s(self, s): - x1 = numpy.random.uniform(-10, 10, 48) - a_np = numpy.array(x1, dtype=numpy.float32).reshape(6, 8) + x = numpy.random.uniform(-10, 10, 48) + a_np = numpy.array(x, dtype=numpy.float32).reshape(6, 8) a = dpnp.asarray(a_np) result = dpnp.fft.rfft2(a, s=s) @@ -1060,7 +1060,7 @@ def test_rfft2_s(self, s): assert_dtype_allclose(result, expected, check_only_type_kind=True) @pytest.mark.parametrize("xp", [numpy, dpnp]) - def test_fft_error(self, xp): + def test_rfft2_error(self, xp): a = xp.ones((2, 3)) # empty axes assert_raises(IndexError, xp.fft.rfft2, a, axes=()) @@ -1085,8 +1085,8 @@ def setup_method(self): @pytest.mark.parametrize("norm", ["forward", "backward", "ortho"]) @pytest.mark.parametrize("order", ["C", "F"]) def test_rfftn(self, dtype, axes, norm, order): - x1 = numpy.random.uniform(-10, 10, 120) - a_np = numpy.array(x1, dtype=dtype).reshape(2, 3, 4, 5, order=order) + x = numpy.random.uniform(-10, 10, 120) + a_np = numpy.array(x, dtype=dtype).reshape(2, 3, 4, 5, order=order) a = dpnp.asarray(a_np) result = dpnp.fft.rfftn(a, axes=axes, norm=norm) @@ -1104,8 +1104,8 @@ def test_rfftn(self, dtype, axes, norm, order): "axes", [(2, 0, 2, 0), (0, 1, 1), (2, 0, 1, 3, 2, 1)] ) def test_rfftn_repeated_axes(self, axes): - x1 = numpy.random.uniform(-10, 10, 120) - a_np = numpy.array(x1, dtype=numpy.float32).reshape(2, 3, 4, 5) + x = numpy.random.uniform(-10, 10, 120) + a_np = numpy.array(x, dtype=numpy.float32).reshape(2, 3, 4, 5) a = dpnp.asarray(a_np) result = dpnp.fft.rfftn(a, axes=axes) @@ -1130,8 +1130,8 @@ def test_rfftn_repeated_axes(self, axes): @pytest.mark.parametrize("axes", [(2, 3, 3, 2), (0, 0, 3, 3)]) @pytest.mark.parametrize("s", [(5, 4, 3, 3), (7, 8, 10, 9)]) def test_rfftn_repeated_axes_with_s(self, axes, s): - x1 = numpy.random.uniform(-10, 10, 120) - a_np = numpy.array(x1, dtype=numpy.float32).reshape(2, 3, 4, 5) + x = numpy.random.uniform(-10, 10, 120) + a_np = numpy.array(x, dtype=numpy.float32).reshape(2, 3, 4, 5) a = dpnp.asarray(a_np) result = dpnp.fft.rfftn(a, s=s, axes=axes) @@ -1151,8 +1151,8 @@ def test_rfftn_repeated_axes_with_s(self, axes, s): @pytest.mark.parametrize("axes", [(0, 1, 2, 3), (1, 2, 1, 2), (2, 2, 2, 3)]) @pytest.mark.parametrize("s", [(2, 3, 4, 5), (5, 6, 7, 9), (2, 5, 1, 2)]) def test_rfftn_out(self, axes, s): - x1 = numpy.random.uniform(-10, 10, 120) - a_np = numpy.array(x1, dtype=numpy.float32).reshape(2, 3, 4, 5) + x = numpy.random.uniform(-10, 10, 120) + a_np = numpy.array(x, dtype=numpy.float32).reshape(2, 3, 4, 5) a = dpnp.asarray(a_np) out_shape = list(a.shape) @@ -1185,8 +1185,8 @@ def test_rfftn_out(self, axes, s): assert_dtype_allclose(iresult, iexpected, check_only_type_kind=True) def test_rfftn_1d_array(self): - x1 = numpy.random.uniform(-10, 10, 20) - a_np = numpy.array(x1, dtype=numpy.float32) + x = numpy.random.uniform(-10, 10, 20) + a_np = numpy.array(x, dtype=numpy.float32) a = dpnp.asarray(a_np) result = dpnp.fft.rfftn(a) diff --git a/dpnp/tests/test_linalg.py b/dpnp/tests/test_linalg.py index a0ce71b8208..e6b065d5e1f 100644 --- a/dpnp/tests/test_linalg.py +++ b/dpnp/tests/test_linalg.py @@ -13,7 +13,7 @@ suppress_warnings, ) -import dpnp as inp +import dpnp from .helper import ( assert_dtype_allclose, @@ -112,13 +112,13 @@ def test_usm_ndarray_linalg_batch(func, gen_kwargs, func_kwargs): dpt.asarray(generate_random_numpy_array(shape, **gen_kwargs)) ] - result = getattr(inp.linalg, func)(*dpt_args, **func_kwargs) + result = getattr(dpnp.linalg, func)(*dpt_args, **func_kwargs) if isinstance(result, tuple): for res in result: - assert isinstance(res, inp.ndarray) + assert isinstance(res, dpnp.ndarray) else: - assert isinstance(result, inp.ndarray) + assert isinstance(result, dpnp.ndarray) # check linear algebra functions from dpnp @@ -134,9 +134,9 @@ def test_usm_ndarray_linearalgebra_batch(func): for _ in range(2) ] - result = getattr(inp, func)(*dpt_args) + result = getattr(dpnp, func)(*dpt_args) - assert isinstance(result, inp.ndarray) + assert isinstance(result, dpnp.ndarray) class TestCholesky: @@ -150,17 +150,13 @@ class TestCholesky: [[[7, 2], [2, 7]], [[8, 3], [3, 8]]], ], ], - ids=[ - "2D_array", - "3D_array", - "4D_array", - ], + ids=["2D_array", "3D_array", "4D_array"], ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_cholesky(self, array, dtype): a = numpy.array(array, dtype=dtype) - ia = inp.array(a) - result = inp.linalg.cholesky(ia) + ia = dpnp.array(a) + result = dpnp.linalg.cholesky(ia) expected = numpy.linalg.cholesky(a) assert_dtype_allclose(result, expected) @@ -174,16 +170,12 @@ def test_cholesky(self, array, dtype): [[[7, 2], [2, 7]], [[8, 3], [3, 8]]], ], ], - ids=[ - "2D_array", - "3D_array", - "4D_array", - ], + ids=["2D_array", "3D_array", "4D_array"], ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_cholesky_upper(self, array, dtype): - ia = inp.array(array, dtype=dtype) - result = inp.linalg.cholesky(ia, upper=True) + ia = dpnp.array(array, dtype=dtype) + result = dpnp.linalg.cholesky(ia, upper=True) if ia.ndim > 2: n = ia.shape[-1] @@ -192,7 +184,7 @@ def test_cholesky_upper(self, array, dtype): batch_size = ia_reshaped.shape[0] for idx in range(batch_size): # Reconstruct the matrix using the Cholesky decomposition result - if inp.issubdtype(dtype, inp.complexfloating): + if dpnp.issubdtype(dtype, dpnp.complexfloating): reconstructed = ( res_reshaped[idx].T.conj() @ res_reshaped[idx] ) @@ -203,7 +195,7 @@ def test_cholesky_upper(self, array, dtype): ) else: # Reconstruct the matrix using the Cholesky decomposition result - if inp.issubdtype(dtype, inp.complexfloating): + if dpnp.issubdtype(dtype, dpnp.complexfloating): reconstructed = result.T.conj() @ result else: reconstructed = result.T @ result @@ -221,17 +213,13 @@ def test_cholesky_upper(self, array, dtype): [[[7, 2], [2, 7]], [[8, 3], [3, 8]]], ], ], - ids=[ - "2D_array", - "3D_array", - "4D_array", - ], + ids=["2D_array", "3D_array", "4D_array"], ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_cholesky_upper_numpy(self, array, dtype): a = numpy.array(array, dtype=dtype) - ia = inp.array(a) - result = inp.linalg.cholesky(ia, upper=True) + ia = dpnp.array(a) + result = dpnp.linalg.cholesky(ia, upper=True) expected = numpy.linalg.cholesky(a, upper=True) assert_dtype_allclose(result, expected) @@ -246,52 +234,46 @@ def test_cholesky_strides(self): ] ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) # positive strides expected = numpy.linalg.cholesky(a_np[::2, ::2]) - result = inp.linalg.cholesky(a_dp[::2, ::2]) + result = dpnp.linalg.cholesky(a_dp[::2, ::2]) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) # negative strides expected = numpy.linalg.cholesky(a_np[::-2, ::-2]) - result = inp.linalg.cholesky(a_dp[::-2, ::-2]) + result = dpnp.linalg.cholesky(a_dp[::-2, ::-2]) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) @pytest.mark.parametrize( "shape", - [ - (0, 0), - (3, 0, 0), - (0, 2, 2), - ], - ids=[ - "(0, 0)", - "(3, 0, 0)", - "(0, 2, 2)", - ], + [(0, 0), (3, 0, 0), (0, 2, 2)], + ids=["(0, 0)", "(3, 0, 0)", "(0, 2, 2)"], ) def test_cholesky_empty(self, shape): a = numpy.empty(shape) - ia = inp.array(a) - result = inp.linalg.cholesky(ia) + ia = dpnp.array(a) + result = dpnp.linalg.cholesky(ia) expected = numpy.linalg.cholesky(a) assert_array_equal(expected, result) def test_cholesky_errors(self): - a_dp = inp.array([[1, 2], [2, 5]], dtype="float32") + a_dp = dpnp.array([[1, 2], [2, 5]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.cholesky, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.cholesky, a_np) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() - assert_raises(inp.linalg.LinAlgError, inp.linalg.cholesky, a_dp_ndim_1) + assert_raises( + dpnp.linalg.LinAlgError, dpnp.linalg.cholesky, a_dp_ndim_1 + ) # a is not square - a_dp = inp.ones((2, 3)) - assert_raises(inp.linalg.LinAlgError, inp.linalg.cholesky, a_dp) + a_dp = dpnp.ones((2, 3)) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.cholesky, a_dp) class TestCond: @@ -302,15 +284,13 @@ def setup_method(self): "shape", [(0, 4, 4), (4, 0, 3, 3)], ids=["(0, 5, 3)", "(4, 0, 2, 3)"] ) @pytest.mark.parametrize( - "p", - [None, -inp.inf, -2, -1, 1, 2, inp.inf, "fro"], - ids=["None", "-dpnp.inf", "-2", "-1", "1", "2", "dpnp.inf", "fro"], + "p", [None, -dpnp.inf, -2, -1, 1, 2, dpnp.inf, "fro"] ) - def test_cond_empty(self, shape, p): + def test_empty(self, shape, p): a = numpy.empty(shape) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.cond(ia, p=p) + result = dpnp.linalg.cond(ia, p=p) expected = numpy.linalg.cond(a, p=p) assert_dtype_allclose(result, expected) @@ -321,30 +301,15 @@ def test_cond_empty(self, shape, p): "shape", [(4, 4), (2, 4, 3, 3)], ids=["(4, 4)", "(2, 4, 3, 3)"] ) @pytest.mark.parametrize( - "p", - [None, -inp.inf, -2, -1, 1, 2, inp.inf, "fro"], - ids=["None", "-dpnp.inf", "-2", "-1", "1", "2", "dpnp.inf", "fro"], + "p", [None, -dpnp.inf, -2, -1, 1, 2, dpnp.inf, "fro"] ) - def test_cond(self, dtype, shape, p): + def test_basic(self, dtype, shape, p): a = numpy.array( numpy.random.uniform(-5, 5, numpy.prod(shape)), dtype=dtype ).reshape(shape) - ia = inp.array(a) - - result = inp.linalg.cond(ia, p=p) - expected = numpy.linalg.cond(a, p=p) - assert_dtype_allclose(result, expected) - - @pytest.mark.parametrize( - "p", - [None, -inp.inf, -2, -1, 1, 2, inp.inf, "fro"], - ids=["None", "-dpnp.inf", "-2", "-1", "1", "2", "dpnp.inf", "fro"], - ) - def test_cond_bool(self, p): - a = numpy.array([[True, True], [True, False]]) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.cond(ia, p=p) + result = dpnp.linalg.cond(ia, p=p) expected = numpy.linalg.cond(a, p=p) assert_dtype_allclose(result, expected) @@ -353,53 +318,54 @@ def test_cond_bool(self, p): "shape", [(4, 4), (2, 4, 3, 3)], ids=["(4, 4)", "(2, 4, 3, 3)"] ) @pytest.mark.parametrize( - "p", - [None, -inp.inf, -2, -1, 1, 2, inp.inf, "fro"], - ids=["None", "-dpnp.inf", "-2", "-1", "1", "2", "dpnp.inf", "fro"], + "p", [None, -dpnp.inf, -2, -1, 1, 2, dpnp.inf, "fro"] ) - def test_cond_complex(self, dtype, shape, p): + def test_complex(self, dtype, shape, p): x1 = numpy.random.uniform(-5, 5, numpy.prod(shape)) x2 = numpy.random.uniform(-5, 5, numpy.prod(shape)) a = numpy.array(x1 + 1j * x2, dtype=dtype).reshape(shape) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.cond(ia, p=p) + result = dpnp.linalg.cond(ia, p=p) expected = numpy.linalg.cond(a, p=p) assert_dtype_allclose(result, expected) @pytest.mark.parametrize( - "p", - [-inp.inf, -1, 1, inp.inf, "fro"], - ids=["-dpnp.inf", "-1", "1", "dpnp.inf", "fro"], + "p", [None, -dpnp.inf, -2, -1, 1, 2, dpnp.inf, "fro"] ) - def test_cond_nan_input(self, p): - a = numpy.array(numpy.random.uniform(-10, 10, 9)).reshape(3, 3) + def test_bool(self, p): + a = numpy.array([[True, True], [True, False]]) + ia = dpnp.array(a) + + result = dpnp.linalg.cond(ia, p=p) + expected = numpy.linalg.cond(a, p=p) + assert_dtype_allclose(result, expected) + + @pytest.mark.parametrize("p", [-dpnp.inf, -1, 1, dpnp.inf, "fro"]) + def test_nan_input(self, p): + a = generate_random_numpy_array((3, 3)) a[1, 1] = numpy.nan - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.cond(ia, p=p) + result = dpnp.linalg.cond(ia, p=p) expected = numpy.linalg.cond(a, p=p) assert_dtype_allclose(result, expected) @pytest.mark.parametrize( - "p", - [None, -inp.inf, -2, -1, 1, 2, inp.inf, "fro"], - ids=["None", "-dpnp.inf", "-2", "-1", "1", "2", "dpnp.inf", "fro"], + "p", [None, -dpnp.inf, -2, -1, 1, 2, dpnp.inf, "fro"] ) - def test_cond_nan(self, p): + def test_nan(self, p): a = numpy.array(numpy.random.uniform(-5, 5, 16)).reshape(2, 2, 2, 2) a[0, 0] = 0 a[1, 1] = 0 - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.cond(ia, p=p) + result = dpnp.linalg.cond(ia, p=p) expected = numpy.linalg.cond(a, p=p) assert_dtype_allclose(result, expected) @pytest.mark.parametrize( - "p", - [None, -inp.inf, -2, -1, 1, 2, inp.inf, "fro"], - ids=["None", "-dpnp.inf", "-2", "-1", "1", "2", "dpnp.inf", "fro"], + "p", [None, -dpnp.inf, -2, -1, 1, 2, dpnp.inf, "fro"] ) @pytest.mark.parametrize( "stride", @@ -411,22 +377,22 @@ def test_cond_nan(self, p): "(-1, 3, 3, -3)", ], ) - def test_cond_strided(self, p, stride): + def test_strided(self, p, stride): A = numpy.random.rand(6, 8, 10, 10) - B = inp.asarray(A) + B = dpnp.asarray(A) slices = tuple(slice(None, None, stride[i]) for i in range(A.ndim)) a = A[slices] b = B[slices] - result = inp.linalg.cond(b, p=p) + result = dpnp.linalg.cond(b, p=p) expected = numpy.linalg.cond(a, p=p) assert_dtype_allclose(result, expected, factor=24) - def test_cond_error(self): + def test_error(self): # cond is not defined on empty arrays - ia = inp.empty((2, 0)) + ia = dpnp.empty((2, 0)) with pytest.raises(ValueError): - inp.linalg.cond(ia, p=1) + dpnp.linalg.cond(ia, p=1) class TestDet: @@ -440,17 +406,13 @@ class TestDet: [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], ], ], - ids=[ - "2D_array", - "3D_array", - "4D_array", - ], + ids=["2D_array", "3D_array", "4D_array"], ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_det(self, array, dtype): a = numpy.array(array, dtype=dtype) - ia = inp.array(a) - result = inp.linalg.det(ia) + ia = dpnp.array(a) + result = dpnp.linalg.det(ia) expected = numpy.linalg.det(a) assert_allclose(expected, result) @@ -465,24 +427,24 @@ def test_det_strides(self): ] ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) # positive strides expected = numpy.linalg.det(a_np[::2, ::2]) - result = inp.linalg.det(a_dp[::2, ::2]) + result = dpnp.linalg.det(a_dp[::2, ::2]) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) # negative strides expected = numpy.linalg.det(a_np[::-2, ::-2]) - result = inp.linalg.det(a_dp[::-2, ::-2]) + result = dpnp.linalg.det(a_dp[::-2, ::-2]) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) def test_det_empty(self): a = numpy.empty((0, 0, 2, 2), dtype=numpy.float32) - ia = inp.array(a) + ia = dpnp.array(a) np_det = numpy.linalg.det(a) - dpnp_det = inp.linalg.det(ia) + dpnp_det = dpnp.linalg.det(ia) assert dpnp_det.dtype == np_det.dtype assert dpnp_det.shape == np_det.shape @@ -510,10 +472,10 @@ def test_det_empty(self): ) def test_det_singular_matrix(self, matrix): a_np = numpy.array(matrix, dtype="float32") - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) expected = numpy.linalg.det(a_np) - result = inp.linalg.det(a_dp) + result = dpnp.linalg.det(a_dp) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) @@ -525,19 +487,19 @@ def test_det_singular_matrix_3D(self): a_np = numpy.array( [[[1, 2], [3, 4]], [[1, 2], [1, 2]], [[1, 3], [3, 1]]] ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) expected = numpy.linalg.det(a_np) - result = inp.linalg.det(a_dp) + result = dpnp.linalg.det(a_dp) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) def test_det_errors(self): - a_dp = inp.array([[1, 2], [3, 5]], dtype="float32") + a_dp = dpnp.array([[1, 2], [3, 5]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.det, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.det, a_np) class TestEigenvalue: @@ -547,7 +509,7 @@ class TestEigenvalue: def assert_eigen_decomposition(self, a, w, v, rtol=1e-5, atol=1e-5): a_ndim = a.ndim if a_ndim == 2: - assert_allclose(a @ v, v @ inp.diag(w), rtol=rtol, atol=atol) + assert_allclose(a @ v, v @ dpnp.diag(w), rtol=rtol, atol=atol) else: # a_ndim > 2 if a_ndim > 3: a = a.reshape(-1, *a.shape[-2:]) @@ -558,28 +520,14 @@ def assert_eigen_decomposition(self, a, w, v, rtol=1e-5, atol=1e-5): a[i].dot(v[i]), w[i] * v[i], rtol=rtol, atol=atol ) - @pytest.mark.parametrize( - "func", - [ - "eig", - "eigvals", - "eigh", - "eigvalsh", - ], - ) + @pytest.mark.parametrize("func", ["eig", "eigvals", "eigh", "eigvalsh"]) @pytest.mark.parametrize( "shape", [(2, 2), (2, 3, 3), (2, 2, 3, 3)], - ids=["(2,2)", "(2,3,3)", "(2,2,3,3)"], + ids=["(2, 2)", "(2, 3, 3)", "(2, 2, 3, 3)"], ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - @pytest.mark.parametrize( - "order", - [ - "C", - "F", - ], - ) + @pytest.mark.parametrize("order", ["C", "F"]) def test_eigenvalues(self, func, shape, dtype, order): # Set a `hermitian` flag for generate_random_numpy_array() to # get a symmetric array for eigh() and eigvalsh() or @@ -589,7 +537,7 @@ def test_eigenvalues(self, func, shape, dtype, order): shape, dtype, hermitian=is_hermitian, seed_value=81 ) a_order = numpy.array(a, order=order) - a_dp = inp.array(a, order=order) + a_dp = dpnp.array(a, order=order) # NumPy with OneMKL and with rocSOLVER sorts in ascending order, # so w's should be directly comparable. @@ -598,70 +546,56 @@ def test_eigenvalues(self, func, shape, dtype, order): # we verify them through the eigen equation A*v=w*v. if func in ("eig", "eigh"): w, _ = getattr(numpy.linalg, func)(a_order) - w_dp, v_dp = getattr(inp.linalg, func)(a_dp) + w_dp, v_dp = getattr(dpnp.linalg, func)(a_dp) self.assert_eigen_decomposition(a_dp, w_dp, v_dp) else: # eighvals or eigvalsh w = getattr(numpy.linalg, func)(a_order) - w_dp = getattr(inp.linalg, func)(a_dp) + w_dp = getattr(dpnp.linalg, func)(a_dp) assert_dtype_allclose(w_dp, w) # eigh() and eigvalsh() are tested in cupy tests - @pytest.mark.parametrize( - "func", - [ - "eig", - "eigvals", - ], - ) + @pytest.mark.parametrize("func", ["eig", "eigvals"]) @pytest.mark.parametrize( "shape", [(0, 0), (2, 0, 0), (0, 3, 3)], - ids=["(0,0)", "(2,0,0)", "(0,3,3)"], + ids=["(0, 0)", "(2, 0, 0)", "(0, 3, 3)"], ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_eigenvalue_empty(self, func, shape, dtype): a_np = numpy.empty(shape, dtype=dtype) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) if func == "eig": w, v = getattr(numpy.linalg, func)(a_np) - w_dp, v_dp = getattr(inp.linalg, func)(a_dp) + w_dp, v_dp = getattr(dpnp.linalg, func)(a_dp) assert_dtype_allclose(v_dp, v) else: # eigvals w = getattr(numpy.linalg, func)(a_np) - w_dp = getattr(inp.linalg, func)(a_dp) + w_dp = getattr(dpnp.linalg, func)(a_dp) assert_dtype_allclose(w_dp, w) - @pytest.mark.parametrize( - "func", - [ - "eig", - "eigvals", - "eigh", - "eigvalsh", - ], - ) + @pytest.mark.parametrize("func", ["eig", "eigvals", "eigh", "eigvalsh"]) def test_eigenvalue_errors(self, func): - a_dp = inp.array([[1, 3], [3, 2]], dtype="float32") + a_dp = dpnp.array([[1, 3], [3, 2]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - dpnp_func = getattr(inp.linalg, func) + a_np = dpnp.asnumpy(a_dp) + dpnp_func = getattr(dpnp.linalg, func) assert_raises(TypeError, dpnp_func, a_np) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() - assert_raises(inp.linalg.LinAlgError, dpnp_func, a_dp_ndim_1) + assert_raises(dpnp.linalg.LinAlgError, dpnp_func, a_dp_ndim_1) # a is not square - a_dp_not_scquare = inp.ones((2, 3)) - assert_raises(inp.linalg.LinAlgError, dpnp_func, a_dp_not_scquare) + a_dp_not_scquare = dpnp.ones((2, 3)) + assert_raises(dpnp.linalg.LinAlgError, dpnp_func, a_dp_not_scquare) # invalid UPLO if func in ("eigh", "eigvalsh"): @@ -670,61 +604,61 @@ def test_eigenvalue_errors(self, func): class TestEinsum: def test_einsum_trivial_cases(self): - a = inp.arange(25).reshape(5, 5) - b = inp.arange(5) + a = dpnp.arange(25).reshape(5, 5) + b = dpnp.arange(5) a_np = a.asnumpy() b_np = b.asnumpy() # one input, no optimization is needed - result = inp.einsum("i", b, optimize="greedy") + result = dpnp.einsum("i", b, optimize="greedy") expected = numpy.einsum("i", b_np, optimize="greedy") assert_dtype_allclose(result, expected) # two inputs, no optimization is needed - result = inp.einsum("ij,jk", a, a, optimize="greedy") + result = dpnp.einsum("ij,jk", a, a, optimize="greedy") expected = numpy.einsum("ij,jk", a_np, a_np, optimize="greedy") assert_dtype_allclose(result, expected) # no optimization in optimal mode - result = inp.einsum("ij,jk", a, a, optimize=["optimal", 1]) + result = dpnp.einsum("ij,jk", a, a, optimize=["optimal", 1]) expected = numpy.einsum("ij,jk", a_np, a_np, optimize=["optimal", 1]) assert_dtype_allclose(result, expected) # naive cost equal or smaller than optimized cost - result = inp.einsum("i,i,i", b, b, b, optimize="greedy") + result = dpnp.einsum("i,i,i", b, b, b, optimize="greedy") expected = numpy.einsum("i,i,i", b_np, b_np, b_np, optimize="greedy") assert_dtype_allclose(result, expected) def test_einsum_out(self): - a = inp.ones((5, 5)) - out = inp.empty((5,)) - result = inp.einsum("ii->i", a, out=out) + a = dpnp.ones((5, 5)) + out = dpnp.empty((5,)) + result = dpnp.einsum("ii->i", a, out=out) assert result is out expected = numpy.einsum("ii->i", a.asnumpy()) assert_dtype_allclose(result, expected) def test_einsum_error1(self): - a = inp.ones((5, 5)) - out = inp.empty((5,), sycl_queue=dpctl.SyclQueue()) + a = dpnp.ones((5, 5)) + out = dpnp.empty((5,), sycl_queue=dpctl.SyclQueue()) # inconsistent sycl_queue - assert_raises(ExecutionPlacementError, inp.einsum, "ii->i", a, out=out) + assert_raises(ExecutionPlacementError, dpnp.einsum, "ii->i", a, out=out) # unknown value for optimize keyword - assert_raises(TypeError, inp.einsum, "ii->i", a, optimize="blah") + assert_raises(TypeError, dpnp.einsum, "ii->i", a, optimize="blah") # repeated scripts in output - assert_raises(ValueError, inp.einsum, "ij,jk->ii", a, a) + assert_raises(ValueError, dpnp.einsum, "ij,jk->ii", a, a) - a = inp.ones((5, 4)) + a = dpnp.ones((5, 4)) # different size for same label 5 != 4 - assert_raises(ValueError, inp.einsum, "ii", a) + assert_raises(ValueError, dpnp.einsum, "ii", a) - a = inp.arange(25).reshape(5, 5) + a = dpnp.arange(25).reshape(5, 5) # subscript is not within the valid range [0, 52) - assert_raises(ValueError, inp.einsum, a, [53, 53]) + assert_raises(ValueError, dpnp.einsum, a, [53, 53]) @pytest.mark.parametrize("do_opt", [True, False]) - @pytest.mark.parametrize("xp", [numpy, inp]) + @pytest.mark.parametrize("xp", [numpy, dpnp]) def test_einsum_error2(self, do_opt, xp): a = xp.asarray(0) b = xp.asarray([0]) @@ -783,7 +717,7 @@ def test_einsum_error2(self, do_opt, xp): xp.einsum("aabcb,abc", a, b) @pytest.mark.parametrize("do_opt", [True, False]) - @pytest.mark.parametrize("xp", [numpy, inp]) + @pytest.mark.parametrize("xp", [numpy, dpnp]) def test_einsum_specific_errors(self, do_opt, xp): a = xp.asarray(0) # out parameter must be an array @@ -817,23 +751,23 @@ def check_einsum_sums(self, dtype, do_opt=False): # sum(a, axis=-1) for n in range(1, 17): a = numpy.arange(n, dtype=dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum("i->", a, optimize=do_opt) assert_dtype_allclose( - inp.einsum("i->", a_dp, optimize=do_opt), expected + dpnp.einsum("i->", a_dp, optimize=do_opt), expected ) assert_dtype_allclose( - inp.einsum(a_dp, [0], [], optimize=do_opt), expected + dpnp.einsum(a_dp, [0], [], optimize=do_opt), expected ) for n in range(1, 17): a = numpy.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum("...i->...", a, optimize=do_opt) - result = inp.einsum("...i->...", a_dp, optimize=do_opt) + result = dpnp.einsum("...i->...", a_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp, [Ellipsis, 0], [Ellipsis], optimize=do_opt ) assert_dtype_allclose(result, expected) @@ -841,24 +775,24 @@ def check_einsum_sums(self, dtype, do_opt=False): # sum(a, axis=0) for n in range(1, 17): a = numpy.arange(2 * n, dtype=dtype).reshape(2, n) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum("i...->...", a, optimize=do_opt) - result = inp.einsum("i...->...", a_dp, optimize=do_opt) + result = dpnp.einsum("i...->...", a_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp, [0, Ellipsis], [Ellipsis], optimize=do_opt ) assert_dtype_allclose(result, expected) for n in range(1, 17): a = numpy.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum("i...->...", a, optimize=do_opt) - result = inp.einsum("i...->...", a_dp, optimize=do_opt) + result = dpnp.einsum("i...->...", a_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp, [0, Ellipsis], [Ellipsis], optimize=do_opt ) assert_dtype_allclose(result, expected) @@ -866,34 +800,34 @@ def check_einsum_sums(self, dtype, do_opt=False): # trace(a) for n in range(1, 17): a = numpy.arange(n * n, dtype=dtype).reshape(n, n) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum("ii", a, optimize=do_opt) - result = inp.einsum("ii", a_dp, optimize=do_opt) + result = dpnp.einsum("ii", a_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum(a_dp, [0, 0], optimize=do_opt) + result = dpnp.einsum(a_dp, [0, 0], optimize=do_opt) assert_dtype_allclose(result, expected) # should accept dpnp array in subscript list - dp_array = inp.asarray([0, 0]) + dp_array = dpnp.asarray([0, 0]) assert_dtype_allclose( - inp.einsum(a_dp, dp_array, optimize=do_opt), expected + dpnp.einsum(a_dp, dp_array, optimize=do_opt), expected ) assert_dtype_allclose( - inp.einsum(a_dp, list(dp_array), optimize=do_opt), expected + dpnp.einsum(a_dp, list(dp_array), optimize=do_opt), expected ) # multiply(a, b) for n in range(1, 17): a = numpy.arange(3 * n, dtype=dtype).reshape(3, n) b = numpy.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("..., ...", a, b, optimize=do_opt) - result = inp.einsum("..., ...", a_dp, b_dp, optimize=do_opt) + result = dpnp.einsum("..., ...", a_dp, b_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp, [Ellipsis], b_dp, [Ellipsis], optimize=do_opt ) assert_dtype_allclose(result, expected) @@ -902,13 +836,13 @@ def check_einsum_sums(self, dtype, do_opt=False): for n in range(1, 17): a = numpy.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) b = numpy.arange(n, dtype=dtype) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("...i, ...i", a, b, optimize=do_opt) - result = inp.einsum("...i, ...i", a_dp, b_dp, optimize=do_opt) + result = dpnp.einsum("...i, ...i", a_dp, b_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp, [Ellipsis, 0], b_dp, [Ellipsis, 0], optimize=do_opt ) assert_dtype_allclose(result, expected) @@ -916,13 +850,13 @@ def check_einsum_sums(self, dtype, do_opt=False): for n in range(1, 11): a = numpy.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) b = numpy.arange(n, dtype=dtype) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("i..., i...", a, b, optimize=do_opt) - result = inp.einsum("i..., i...", a_dp, b_dp, optimize=do_opt) + result = dpnp.einsum("i..., i...", a_dp, b_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp, [0, Ellipsis], b_dp, [0, Ellipsis], optimize=do_opt ) assert_dtype_allclose(result, expected) @@ -931,14 +865,14 @@ def check_einsum_sums(self, dtype, do_opt=False): for n in range(1, 17): a = numpy.arange(3, dtype=dtype) + 1 b = numpy.arange(n, dtype=dtype) + 1 - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("i,j", a, b, optimize=do_opt) assert_dtype_allclose( - inp.einsum("i,j", a_dp, b_dp, optimize=do_opt), expected + dpnp.einsum("i,j", a_dp, b_dp, optimize=do_opt), expected ) assert_dtype_allclose( - inp.einsum(a_dp, [0], b_dp, [1], optimize=do_opt), expected + dpnp.einsum(a_dp, [0], b_dp, [1], optimize=do_opt), expected ) # Suppress the complex warnings for the 'as f8' tests @@ -949,18 +883,18 @@ def check_einsum_sums(self, dtype, do_opt=False): for n in range(1, 17): a = numpy.arange(4 * n, dtype=dtype).reshape(4, n) b = numpy.arange(n, dtype=dtype) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("ij, j", a, b, optimize=do_opt) - result = inp.einsum("ij, j", a_dp, b_dp, optimize=do_opt) + result = dpnp.einsum("ij, j", a_dp, b_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum(a_dp, [0, 1], b_dp, [1], optimize=do_opt) + result = dpnp.einsum(a_dp, [0, 1], b_dp, [1], optimize=do_opt) assert_dtype_allclose(result, expected) - c = inp.arange(4, dtype=a_dp.dtype) + c = dpnp.arange(4, dtype=a_dp.dtype) args = ["ij, j", a_dp, b_dp] - result = inp.einsum( + result = dpnp.einsum( *args, out=c, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is c @@ -968,7 +902,7 @@ def check_einsum_sums(self, dtype, do_opt=False): c[...] = 0 args = [a_dp, [0, 1], b_dp, [1]] - result = inp.einsum( + result = dpnp.einsum( *args, out=c, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is c @@ -977,20 +911,20 @@ def check_einsum_sums(self, dtype, do_opt=False): for n in range(1, 17): a = numpy.arange(4 * n, dtype=dtype).reshape(4, n) b = numpy.arange(n, dtype=dtype) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("ji,j", a.T, b.T, optimize=do_opt) - result = inp.einsum("ji,j", a_dp.T, b_dp.T, optimize=do_opt) + result = dpnp.einsum("ji,j", a_dp.T, b_dp.T, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp.T, [1, 0], b_dp.T, [1], optimize=do_opt ) assert_dtype_allclose(result, expected) - c = inp.arange(4, dtype=a_dp.dtype) + c = dpnp.arange(4, dtype=a_dp.dtype) args = ["ji,j", a_dp.T, b_dp.T] - result = inp.einsum( + result = dpnp.einsum( *args, out=c, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is c @@ -998,7 +932,7 @@ def check_einsum_sums(self, dtype, do_opt=False): c[...] = 0 args = [a_dp.T, [1, 0], b_dp.T, [1]] - result = inp.einsum( + result = dpnp.einsum( *args, out=c, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is c @@ -1008,28 +942,30 @@ def check_einsum_sums(self, dtype, do_opt=False): for n in range(1, 17): a = numpy.arange(4 * n, dtype=dtype).reshape(4, n) b = numpy.arange(n * 6, dtype=dtype).reshape(n, 6) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("ij, jk", a, b, optimize=do_opt) - result = inp.einsum("ij, jk", a_dp, b_dp, optimize=do_opt) + result = dpnp.einsum("ij, jk", a_dp, b_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum(a_dp, [0, 1], b_dp, [1, 2], optimize=do_opt) + result = dpnp.einsum( + a_dp, [0, 1], b_dp, [1, 2], optimize=do_opt + ) assert_dtype_allclose(result, expected) for n in range(1, 17): a = numpy.arange(4 * n, dtype=dtype).reshape(4, n) b = numpy.arange(n * 6, dtype=dtype).reshape(n, 6) c = numpy.arange(24, dtype=dtype).reshape(4, 6) - a_dp = inp.array(a) - b_dp = inp.array(b) - d = inp.array(c) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) + d = dpnp.array(c) args = ["ij, jk", a, b] expected = numpy.einsum( *args, out=c, dtype="f4", casting="unsafe", optimize=do_opt ) args = ["ij, jk", a_dp, b_dp] - result = inp.einsum( + result = dpnp.einsum( *args, out=d, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is d @@ -1037,7 +973,7 @@ def check_einsum_sums(self, dtype, do_opt=False): d[...] = 0 args = [a_dp, [0, 1], b_dp, [1, 2]] - result = inp.einsum( + result = dpnp.einsum( *args, out=d, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is d @@ -1047,9 +983,9 @@ def check_einsum_sums(self, dtype, do_opt=False): a = numpy.arange(12, dtype=dtype).reshape(3, 4) b = numpy.arange(20, dtype=dtype).reshape(4, 5) c = numpy.arange(30, dtype=dtype).reshape(5, 6) - a_dp = inp.array(a) - b_dp = inp.array(b) - c_dp = inp.array(c) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) + c_dp = dpnp.array(c) # equivalent of a.dot(b).dot(c) # if optimize is True, NumPy does not respect the given dtype args = ["ij,jk,kl", a, b, c] @@ -1057,25 +993,25 @@ def check_einsum_sums(self, dtype, do_opt=False): *args, dtype="f4", casting="unsafe", optimize=False ) args = ["ij,jk,kl", a_dp, b_dp, c_dp] - result = inp.einsum( + result = dpnp.einsum( *args, dtype="f4", casting="unsafe", optimize=do_opt ) assert_dtype_allclose(result, expected) args = a_dp, [0, 1], b_dp, [1, 2], c_dp, [2, 3] - result = inp.einsum( + result = dpnp.einsum( *args, dtype="f4", casting="unsafe", optimize=do_opt ) assert_dtype_allclose(result, expected) d = numpy.arange(18, dtype=dtype).reshape(3, 6) - d_dp = inp.array(d) + d_dp = dpnp.array(d) args = ["ij,jk,kl", a, b, c] expected = numpy.einsum( *args, out=d, dtype="f4", casting="unsafe", optimize=do_opt ) args = ["ij,jk,kl", a_dp, b_dp, c_dp] - result = inp.einsum( + result = dpnp.einsum( *args, out=d_dp, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is d_dp @@ -1083,7 +1019,7 @@ def check_einsum_sums(self, dtype, do_opt=False): d_dp[...] = 0 args = [a_dp, [0, 1], b_dp, [1, 2], c_dp, [2, 3]] - result = inp.einsum( + result = dpnp.einsum( *args, out=d_dp, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is d_dp @@ -1092,21 +1028,21 @@ def check_einsum_sums(self, dtype, do_opt=False): # tensordot(a, b) a = numpy.arange(60, dtype=dtype).reshape(3, 4, 5) b = numpy.arange(24, dtype=dtype).reshape(4, 3, 2) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) # equivalent of numpy.tensordot(a, b, axes=([1, 0], [0, 1])) expected = numpy.einsum("ijk, jil -> kl", a, b, optimize=do_opt) - result = inp.einsum("ijk, jil -> kl", a_dp, b_dp, optimize=do_opt) + result = dpnp.einsum("ijk, jil -> kl", a_dp, b_dp, optimize=do_opt) assert_dtype_allclose(result, expected) - result = inp.einsum( + result = dpnp.einsum( a_dp, [0, 1, 2], b_dp, [1, 0, 3], optimize=do_opt ) assert_dtype_allclose(result, expected) - c = inp.arange(10, dtype=a_dp.dtype).reshape(5, 2) + c = dpnp.arange(10, dtype=a_dp.dtype).reshape(5, 2) args = ["ijk, jil -> kl", a_dp, b_dp] - result = inp.einsum( + result = dpnp.einsum( *args, out=c, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is c @@ -1114,7 +1050,7 @@ def check_einsum_sums(self, dtype, do_opt=False): c[...] = 0 args = [a_dp, [0, 1, 2], b_dp, [1, 0, 3]] - result = inp.einsum( + result = dpnp.einsum( *args, out=c, dtype="f4", casting="unsafe", optimize=do_opt ) assert result is c @@ -1125,102 +1061,106 @@ def check_einsum_sums(self, dtype, do_opt=False): a = numpy.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype) b = numpy.array([0, 3.5, 0, neg_val, 0, 1, 3, 12], dtype=dtype) c = numpy.array([True, True, False, True, True, False, True, True]) - a_dp = inp.array(a) - b_dp = inp.array(b) - c_dp = inp.array(c) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) + c_dp = dpnp.array(c) expected = numpy.einsum( "i,i,i->i", a, b, c, dtype="?", casting="unsafe", optimize=do_opt ) args = ["i,i,i->i", a_dp, b_dp, c_dp] - result = inp.einsum(*args, dtype="?", casting="unsafe", optimize=do_opt) + result = dpnp.einsum( + *args, dtype="?", casting="unsafe", optimize=do_opt + ) assert_dtype_allclose(result, expected) args = [a_dp, [0], b_dp, [0], c_dp, [0], [0]] - result = inp.einsum(*args, dtype="?", casting="unsafe", optimize=do_opt) + result = dpnp.einsum( + *args, dtype="?", casting="unsafe", optimize=do_opt + ) assert_dtype_allclose(result, expected) - # NumPy >= 2.0 follows NEP-50 to determine the output dtype when one of - # the inputs is a scalar while NumPy < 2.0 does not + # NumPy >= 2.0 follows NEP-50 to determine the output dtype when one + # of the inputs is a scalar while NumPy < 2.0 does not if numpy.lib.NumpyVersion(numpy.__version__) < "2.0.0": check_type = False else: check_type = True a = numpy.arange(9, dtype=dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum(",i->", 3, a) assert_dtype_allclose( - inp.einsum(",i->", 3, a_dp), expected, check_type=check_type + dpnp.einsum(",i->", 3, a_dp), expected, check_type=check_type ) assert_dtype_allclose( - inp.einsum(3, [], a_dp, [0], []), expected, check_type=check_type + dpnp.einsum(3, [], a_dp, [0], []), expected, check_type=check_type ) expected = numpy.einsum("i,->", a, 3) assert_dtype_allclose( - inp.einsum("i,->", a_dp, 3), expected, check_type=check_type + dpnp.einsum("i,->", a_dp, 3), expected, check_type=check_type ) assert_dtype_allclose( - inp.einsum(a_dp, [0], 3, [], []), expected, check_type=check_type + dpnp.einsum(a_dp, [0], 3, [], []), expected, check_type=check_type ) # Various stride0, contiguous, and SSE aligned variants for n in range(1, 25): a = numpy.arange(n, dtype=dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) assert_dtype_allclose( - inp.einsum("...,...", a_dp, a_dp, optimize=do_opt), + dpnp.einsum("...,...", a_dp, a_dp, optimize=do_opt), numpy.einsum("...,...", a, a, optimize=do_opt), ) assert_dtype_allclose( - inp.einsum("i,i", a_dp, a_dp, optimize=do_opt), + dpnp.einsum("i,i", a_dp, a_dp, optimize=do_opt), numpy.einsum("i,i", a, a, optimize=do_opt), ) assert_dtype_allclose( - inp.einsum("i,->i", a_dp, 2, optimize=do_opt), + dpnp.einsum("i,->i", a_dp, 2, optimize=do_opt), numpy.einsum("i,->i", a, 2, optimize=do_opt), check_type=check_type, ) assert_dtype_allclose( - inp.einsum(",i->i", 2, a_dp, optimize=do_opt), + dpnp.einsum(",i->i", 2, a_dp, optimize=do_opt), numpy.einsum(",i->i", 2, a, optimize=do_opt), check_type=check_type, ) assert_dtype_allclose( - inp.einsum("i,->", a_dp, 2, optimize=do_opt), + dpnp.einsum("i,->", a_dp, 2, optimize=do_opt), numpy.einsum("i,->", a, 2, optimize=do_opt), check_type=check_type, ) assert_dtype_allclose( - inp.einsum(",i->", 2, a_dp, optimize=do_opt), + dpnp.einsum(",i->", 2, a_dp, optimize=do_opt), numpy.einsum(",i->", 2, a, optimize=do_opt), check_type=check_type, ) assert_dtype_allclose( - inp.einsum("...,...", a_dp[1:], a_dp[:-1], optimize=do_opt), + dpnp.einsum("...,...", a_dp[1:], a_dp[:-1], optimize=do_opt), numpy.einsum("...,...", a[1:], a[:-1], optimize=do_opt), ) assert_dtype_allclose( - inp.einsum("i,i", a_dp[1:], a_dp[:-1], optimize=do_opt), + dpnp.einsum("i,i", a_dp[1:], a_dp[:-1], optimize=do_opt), numpy.einsum("i,i", a[1:], a[:-1], optimize=do_opt), ) assert_dtype_allclose( - inp.einsum("i,->i", a_dp[1:], 2, optimize=do_opt), + dpnp.einsum("i,->i", a_dp[1:], 2, optimize=do_opt), numpy.einsum("i,->i", a[1:], 2, optimize=do_opt), check_type=check_type, ) assert_dtype_allclose( - inp.einsum(",i->i", 2, a_dp[1:], optimize=do_opt), + dpnp.einsum(",i->i", 2, a_dp[1:], optimize=do_opt), numpy.einsum(",i->i", 2, a[1:], optimize=do_opt), check_type=check_type, ) assert_dtype_allclose( - inp.einsum("i,->", a_dp[1:], 2, optimize=do_opt), + dpnp.einsum("i,->", a_dp[1:], 2, optimize=do_opt), numpy.einsum("i,->", a[1:], 2, optimize=do_opt), check_type=check_type, ) assert_dtype_allclose( - inp.einsum(",i->", 2, a_dp[1:], optimize=do_opt), + dpnp.einsum(",i->", 2, a_dp[1:], optimize=do_opt), numpy.einsum(",i->", 2, a[1:], optimize=do_opt), check_type=check_type, ) @@ -1229,63 +1169,63 @@ def check_einsum_sums(self, dtype, do_opt=False): a = numpy.arange(2) + 1 b = numpy.arange(4).reshape(2, 2) + 3 c = numpy.arange(4).reshape(2, 2) + 7 - a_dp = inp.array(a) - b_dp = inp.array(b) - c_dp = inp.array(c) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) + c_dp = dpnp.array(c) assert_dtype_allclose( - inp.einsum("z,mz,zm->", a_dp, b_dp, c_dp, optimize=do_opt), + dpnp.einsum("z,mz,zm->", a_dp, b_dp, c_dp, optimize=do_opt), numpy.einsum("z,mz,zm->", a, b, c, optimize=do_opt), ) # singleton dimensions broadcast a = numpy.ones((10, 2)) b = numpy.ones((1, 2)) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) assert_dtype_allclose( - inp.einsum("ij,ij->j", a_dp, b_dp, optimize=do_opt), + dpnp.einsum("ij,ij->j", a_dp, b_dp, optimize=do_opt), numpy.einsum("ij,ij->j", a, b, optimize=do_opt), ) # a blas-compatible contraction broadcasting case a = numpy.array([2.0, 3.0]) b = numpy.array([4.0]) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) assert_dtype_allclose( - inp.einsum("i, i", a_dp, b_dp, optimize=do_opt), + dpnp.einsum("i, i", a_dp, b_dp, optimize=do_opt), numpy.einsum("i, i", a, b, optimize=do_opt), ) # all-ones array a = numpy.ones((1, 5)) / 2 b = numpy.ones((5, 5)) / 2 - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) assert_dtype_allclose( - inp.einsum("...ij,...jk->...ik", a_dp, a_dp, optimize=do_opt), + dpnp.einsum("...ij,...jk->...ik", a_dp, a_dp, optimize=do_opt), numpy.einsum("...ij,...jk->...ik", a, a, optimize=do_opt), ) assert_dtype_allclose( - inp.einsum("...ij,...jk->...ik", a_dp, b_dp, optimize=do_opt), + dpnp.einsum("...ij,...jk->...ik", a_dp, b_dp, optimize=do_opt), numpy.einsum("...ij,...jk->...ik", a, b, optimize=do_opt), ) # special case a = numpy.eye(2, dtype=dtype) b = numpy.ones(2, dtype=dtype) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) assert_dtype_allclose( # contig_contig_outstride0_two - inp.einsum("ji,i->", a_dp, b_dp, optimize=do_opt), + dpnp.einsum("ji,i->", a_dp, b_dp, optimize=do_opt), numpy.einsum("ji,i->", a, b, optimize=do_opt), ) assert_dtype_allclose( # stride0_contig_outstride0_two - inp.einsum("i,ij->", b_dp, a_dp, optimize=do_opt), + dpnp.einsum("i,ij->", b_dp, a_dp, optimize=do_opt), numpy.einsum("i,ij->", b, a, optimize=do_opt), ) assert_dtype_allclose( # contig_stride0_outstride0_two - inp.einsum("ij,i->", a_dp, b_dp, optimize=do_opt), + dpnp.einsum("ij,i->", a_dp, b_dp, optimize=do_opt), numpy.einsum("ij,i->", a, b, optimize=do_opt), ) @@ -1321,172 +1261,173 @@ def test_einsum_misc(self): for opt in [True, False]: a = numpy.ones((1, 2)) b = numpy.ones((2, 2, 1)) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("ij...,j...->i...", a, b, optimize=opt) - result = inp.einsum("ij...,j...->i...", a_dp, b_dp, optimize=opt) + result = dpnp.einsum("ij...,j...->i...", a_dp, b_dp, optimize=opt) assert_dtype_allclose(result, expected) a = numpy.array([1, 2, 3]) b = numpy.array([2, 3, 4]) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("...i,...i", a, b, optimize=True) - result = inp.einsum("...i,...i", a_dp, b_dp, optimize=True) + result = dpnp.einsum("...i,...i", a_dp, b_dp, optimize=True) assert_dtype_allclose(result, expected) a = numpy.ones((5, 12, 4, 2, 3), numpy.int64) b = numpy.ones((5, 12, 11), numpy.int64) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("ijklm,ijn,ijn->", a, b, b, optimize=opt) - result1 = inp.einsum( + result1 = dpnp.einsum( "ijklm,ijn,ijn->", a_dp, b_dp, b_dp, optimize=opt ) assert_dtype_allclose(result1, expected) - result2 = inp.einsum("ijklm,ijn->", a_dp, b_dp, optimize=opt) + result2 = dpnp.einsum("ijklm,ijn->", a_dp, b_dp, optimize=opt) assert_dtype_allclose(result2, expected) a = numpy.arange(1, 3) b = numpy.arange(1, 5).reshape(2, 2) c = numpy.arange(1, 9).reshape(4, 2) - a_dp = inp.array(a) - b_dp = inp.array(b) - c_dp = inp.array(c) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) + c_dp = dpnp.array(c) expected = numpy.einsum("x,yx,zx->xzy", a, b, c, optimize=opt) - result = inp.einsum("x,yx,zx->xzy", a_dp, b_dp, c_dp, optimize=opt) + result = dpnp.einsum("x,yx,zx->xzy", a_dp, b_dp, c_dp, optimize=opt) assert_dtype_allclose(result, expected) # Ensure explicitly setting out=None does not cause an error a = numpy.array([1]) b = numpy.array([2]) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("i,j", a, b, out=None) - result = inp.einsum("i,j", a_dp, b_dp, out=None) + result = dpnp.einsum("i,j", a_dp, b_dp, out=None) assert_dtype_allclose(result, expected) def test_subscript_range(self): # make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used # when creating a subscript from arrays - a = inp.ones((2, 3)) - b = inp.ones((3, 4)) - inp.einsum(a, [0, 20], b, [20, 2], [0, 2]) - inp.einsum(a, [0, 27], b, [27, 2], [0, 2]) - inp.einsum(a, [0, 51], b, [51, 2], [0, 2]) - assert_raises(ValueError, inp.einsum, a, [0, 52], b, [52, 2], [0, 2]) - assert_raises(ValueError, inp.einsum, a, [-1, 5], b, [5, 2], [-1, 2]) + a = dpnp.ones((2, 3)) + b = dpnp.ones((3, 4)) + dpnp.einsum(a, [0, 20], b, [20, 2], [0, 2]) + dpnp.einsum(a, [0, 27], b, [27, 2], [0, 2]) + dpnp.einsum(a, [0, 51], b, [51, 2], [0, 2]) + assert_raises(ValueError, dpnp.einsum, a, [0, 52], b, [52, 2], [0, 2]) + assert_raises(ValueError, dpnp.einsum, a, [-1, 5], b, [5, 2], [-1, 2]) def test_einsum_broadcast(self): a = numpy.arange(2 * 3 * 4).reshape(2, 3, 4) b = numpy.arange(3) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("ijk,j->ijk", a, b, optimize=False) - result = inp.einsum("ijk,j->ijk", a_dp, b_dp, optimize=False) + result = dpnp.einsum("ijk,j->ijk", a_dp, b_dp, optimize=False) assert_dtype_allclose(result, expected) for opt in [True, False]: assert_dtype_allclose( - inp.einsum("ij...,j...->ij...", a_dp, b_dp, optimize=opt), + dpnp.einsum("ij...,j...->ij...", a_dp, b_dp, optimize=opt), expected, ) assert_dtype_allclose( - inp.einsum("ij...,...j->ij...", a_dp, b_dp, optimize=opt), + dpnp.einsum("ij...,...j->ij...", a_dp, b_dp, optimize=opt), expected, ) assert_dtype_allclose( - inp.einsum("ij...,j->ij...", a_dp, b_dp, optimize=opt), expected + dpnp.einsum("ij...,j->ij...", a_dp, b_dp, optimize=opt), + expected, ) a = numpy.arange(12).reshape((4, 3)) b = numpy.arange(6).reshape((3, 2)) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("ik,kj->ij", a, b, optimize=False) - result = inp.einsum("ik,kj->ij", a_dp, b_dp, optimize=False) + result = dpnp.einsum("ik,kj->ij", a_dp, b_dp, optimize=False) assert_dtype_allclose(result, expected) for opt in [True, False]: assert_dtype_allclose( - inp.einsum("ik...,k...->i...", a_dp, b_dp, optimize=opt), + dpnp.einsum("ik...,k...->i...", a_dp, b_dp, optimize=opt), expected, ) assert_dtype_allclose( - inp.einsum("ik...,...kj->i...j", a_dp, b_dp, optimize=opt), + dpnp.einsum("ik...,...kj->i...j", a_dp, b_dp, optimize=opt), expected, ) assert_dtype_allclose( - inp.einsum("...k,kj", a_dp, b_dp, optimize=opt), expected + dpnp.einsum("...k,kj", a_dp, b_dp, optimize=opt), expected ) assert_dtype_allclose( - inp.einsum("ik,k...->i...", a_dp, b_dp, optimize=opt), expected + dpnp.einsum("ik,k...->i...", a_dp, b_dp, optimize=opt), expected ) dims = [2, 3, 4, 5] a = numpy.arange(numpy.prod(dims)).reshape(dims) v = numpy.arange(4) - a_dp = inp.array(a) - v_dp = inp.array(v) + a_dp = dpnp.array(a) + v_dp = dpnp.array(v) expected = numpy.einsum("ijkl,k->ijl", a, v, optimize=False) - result = inp.einsum("ijkl,k->ijl", a_dp, v_dp, optimize=False) + result = dpnp.einsum("ijkl,k->ijl", a_dp, v_dp, optimize=False) assert_dtype_allclose(result, expected) for opt in [True, False]: assert_dtype_allclose( - inp.einsum("ijkl,k", a_dp, v_dp, optimize=opt), expected + dpnp.einsum("ijkl,k", a_dp, v_dp, optimize=opt), expected ) assert_dtype_allclose( - inp.einsum("...kl,k", a_dp, v_dp, optimize=opt), expected + dpnp.einsum("...kl,k", a_dp, v_dp, optimize=opt), expected ) assert_dtype_allclose( - inp.einsum("...kl,k...", a_dp, v_dp, optimize=opt), expected + dpnp.einsum("...kl,k...", a_dp, v_dp, optimize=opt), expected ) J, K, M = 8, 8, 6 a = numpy.arange(J * K * M).reshape(1, 1, 1, J, K, M) b = numpy.arange(J * K * M * 3).reshape(J, K, M, 3) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("...lmn,...lmno->...o", a, b, optimize=False) - result = inp.einsum("...lmn,...lmno->...o", a_dp, b_dp, optimize=False) + result = dpnp.einsum("...lmn,...lmno->...o", a_dp, b_dp, optimize=False) assert_dtype_allclose(result, expected) for opt in [True, False]: assert_dtype_allclose( - inp.einsum("...lmn,lmno->...o", a_dp, b_dp, optimize=opt), + dpnp.einsum("...lmn,lmno->...o", a_dp, b_dp, optimize=opt), expected, ) def test_einsum_stride(self): a = numpy.arange(2 * 3).reshape(2, 3).astype(numpy.float32) b = numpy.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(numpy.int16) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("cl, cpx->lpx", a, b) - result = inp.einsum("cl, cpx->lpx", a_dp, b_dp) + result = dpnp.einsum("cl, cpx->lpx", a_dp, b_dp) assert_dtype_allclose(result, expected) a = numpy.arange(3 * 3).reshape(3, 3).astype(numpy.float64) b = numpy.arange(3 * 3 * 64 * 64) b = b.reshape(3, 3, 64, 64).astype(numpy.float32) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum("cl, cpxy->lpxy", a, b) - result = inp.einsum("cl, cpxy->lpxy", a_dp, b_dp) + result = dpnp.einsum("cl, cpxy->lpxy", a_dp, b_dp) assert_dtype_allclose(result, expected) def test_einsum_collapsing(self): x = numpy.random.normal(0, 1, (5, 5, 5, 5)) y = numpy.zeros((5, 5)) expected = numpy.einsum("aabb->ab", x, out=y) - x_dp = inp.array(x) - y_dp = inp.array(y) - result = inp.einsum("aabb->ab", x_dp, out=y_dp) + x_dp = dpnp.array(x) + y_dp = dpnp.array(y) + result = dpnp.einsum("aabb->ab", x_dp, out=y_dp) assert result is y_dp assert_dtype_allclose(result, expected) def test_einsum_tensor(self): tensor = numpy.random.random_sample((10, 10, 10, 10)) - tensor_dp = inp.array(tensor) + tensor_dp = dpnp.array(tensor) expected = numpy.einsum("ijij->", tensor) - result = inp.einsum("ijij->", tensor_dp) + result = dpnp.einsum("ijij->", tensor_dp) assert_dtype_allclose(result, expected) @pytest.mark.parametrize( @@ -1499,80 +1440,80 @@ def test_different_paths(self, dtype): a = (numpy.arange(7) + 0.5).astype(dtype) s = numpy.array(2, dtype=dtype) - a_dp = inp.asarray(a) - s_dp = inp.asarray(s) + a_dp = dpnp.asarray(a) + s_dp = dpnp.asarray(s) # contig -> scalar: expected = numpy.einsum("i->", a) - result = inp.einsum("i->", a_dp) + result = dpnp.einsum("i->", a_dp) assert_dtype_allclose(result, expected) # contig, contig -> contig: expected = numpy.einsum("i,i->i", a, a) - result = inp.einsum("i,i->i", a_dp, a_dp) + result = dpnp.einsum("i,i->i", a_dp, a_dp) assert_dtype_allclose(result, expected) # noncontig, noncontig -> contig: expected = numpy.einsum("i,i->i", a.repeat(2)[::2], a.repeat(2)[::2]) - result = inp.einsum("i,i->i", a_dp.repeat(2)[::2], a_dp.repeat(2)[::2]) + result = dpnp.einsum("i,i->i", a_dp.repeat(2)[::2], a_dp.repeat(2)[::2]) assert_dtype_allclose(result, expected) # contig + contig -> scalar expected = numpy.einsum("i,i->", a, a) - result = inp.einsum("i,i->", a_dp, a_dp) + result = dpnp.einsum("i,i->", a_dp, a_dp) assert_dtype_allclose(result, expected) # contig + scalar -> contig (with out) - out_dp = inp.ones(7, dtype=dtype) + out_dp = dpnp.ones(7, dtype=dtype) expected = numpy.einsum("i,->i", a, s) - result = inp.einsum("i,->i", a_dp, s_dp, out=out_dp) + result = dpnp.einsum("i,->i", a_dp, s_dp, out=out_dp) assert result is out_dp assert_dtype_allclose(result, expected) # scalar + contig -> contig (with out) expected = numpy.einsum(",i->i", s, a) - result = inp.einsum(",i->i", s_dp, a_dp) + result = dpnp.einsum(",i->i", s_dp, a_dp) assert_dtype_allclose(result, expected) # scalar + contig -> scalar # Use einsum to compare to not have difference due to sum round-offs: - result1 = inp.einsum(",i->", s_dp, a_dp) - result2 = inp.einsum("i->", s_dp * a_dp) + result1 = dpnp.einsum(",i->", s_dp, a_dp) + result2 = dpnp.einsum("i->", s_dp * a_dp) assert_array_equal(result1.asnumpy(), result2.asnumpy()) # contig + scalar -> scalar # Use einsum to compare to not have difference due to sum round-offs: - result3 = inp.einsum("i,->", a_dp, s_dp) + result3 = dpnp.einsum("i,->", a_dp, s_dp) assert_array_equal(result2.asnumpy(), result3.asnumpy()) # contig + contig + contig -> scalar a = numpy.array([0.5, 0.5, 0.25, 4.5, 3.0], dtype=dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum("i,i,i->", a, a, a) - result = inp.einsum("i,i,i->", a_dp, a_dp, a_dp) + result = dpnp.einsum("i,i,i->", a_dp, a_dp, a_dp) assert_dtype_allclose(result, expected) # four arrays: expected = numpy.einsum("i,i,i,i->", a, a, a, a) - result = inp.einsum("i,i,i,i->", a_dp, a_dp, a_dp, a_dp) + result = dpnp.einsum("i,i,i,i->", a_dp, a_dp, a_dp, a_dp) assert_dtype_allclose(result, expected) def test_small_boolean_arrays(self): # Use array of True embedded in False. - a = numpy.zeros((16, 1, 1), dtype=inp.bool)[:2] + a = numpy.zeros((16, 1, 1), dtype=dpnp.bool)[:2] a[...] = True - a_dp = inp.array(a) - out_dp = inp.zeros((16, 1, 1), dtype=inp.bool)[:2] + a_dp = dpnp.array(a) + out_dp = dpnp.zeros((16, 1, 1), dtype=dpnp.bool)[:2] expected = numpy.einsum("...ij,...jk->...ik", a, a) - result = inp.einsum("...ij,...jk->...ik", a_dp, a_dp, out=out_dp) + result = dpnp.einsum("...ij,...jk->...ik", a_dp, a_dp, out=out_dp) assert result is out_dp assert_dtype_allclose(result, expected) def test_out_is_res(self): a = numpy.arange(9).reshape(3, 3) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.einsum("...ij,...jk->...ik", a, a) - result = inp.einsum("...ij,...jk->...ik", a_dp, a_dp, out=a_dp) + result = dpnp.einsum("...ij,...jk->...ik", a_dp, a_dp, out=a_dp) assert result is a_dp assert_dtype_allclose(result, expected) @@ -1595,17 +1536,17 @@ def optimize_compare(self, subscripts, operands=None): dpnp_args = [args[0]] for arr in args[1:]: - dpnp_args.append(inp.asarray(arr)) + dpnp_args.append(dpnp.asarray(arr)) expected = numpy.einsum(*args) # no optimization - result = inp.einsum(*dpnp_args, optimize=False) + result = dpnp.einsum(*dpnp_args, optimize=False) assert_dtype_allclose(result, expected, factor=16) - result = inp.einsum(*dpnp_args, optimize="greedy") + result = dpnp.einsum(*dpnp_args, optimize="greedy") assert_dtype_allclose(result, expected, factor=16) - result = inp.einsum(*dpnp_args, optimize="optimal") + result = dpnp.einsum(*dpnp_args, optimize="optimal") assert_dtype_allclose(result, expected, factor=16) def test_hadamard_like_products(self): @@ -1686,9 +1627,9 @@ def test_random_cases(self): self.optimize_compare("aef,fbc,dca->bde") def test_combined_views_mapping(self): - a = inp.arange(9).reshape(1, 1, 3, 1, 3) + a = dpnp.arange(9).reshape(1, 1, 3, 1, 3) expected = numpy.einsum("bbcdc->d", a.asnumpy()) - result = inp.einsum("bbcdc->d", a) + result = dpnp.einsum("bbcdc->d", a) assert_dtype_allclose(result, expected) def test_broadcasting_dot_cases(self): @@ -1711,45 +1652,45 @@ def test_broadcasting_dot_cases(self): def test_output_order(self): # Ensure output order is respected for optimize cases, the below # contraction should yield a reshaped tensor view - a = inp.ones((2, 3, 5), order="F") - b = inp.ones((4, 3), order="F") + a = dpnp.ones((2, 3, 5), order="F") + b = dpnp.ones((4, 3), order="F") for opt in [True, False]: - tmp = inp.einsum("...ft,mf->...mt", a, b, order="a", optimize=opt) + tmp = dpnp.einsum("...ft,mf->...mt", a, b, order="a", optimize=opt) assert tmp.flags.f_contiguous - tmp = inp.einsum("...ft,mf->...mt", a, b, order="f", optimize=opt) + tmp = dpnp.einsum("...ft,mf->...mt", a, b, order="f", optimize=opt) assert tmp.flags.f_contiguous - tmp = inp.einsum("...ft,mf->...mt", a, b, order="c", optimize=opt) + tmp = dpnp.einsum("...ft,mf->...mt", a, b, order="c", optimize=opt) assert tmp.flags.c_contiguous - tmp = inp.einsum("...ft,mf->...mt", a, b, order="k", optimize=opt) + tmp = dpnp.einsum("...ft,mf->...mt", a, b, order="k", optimize=opt) assert tmp.flags.c_contiguous is False assert tmp.flags.f_contiguous is False - tmp = inp.einsum("...ft,mf->...mt", a, b, optimize=opt) + tmp = dpnp.einsum("...ft,mf->...mt", a, b, optimize=opt) assert tmp.flags.c_contiguous is False assert tmp.flags.f_contiguous is False - c = inp.ones((4, 3), order="C") + c = dpnp.ones((4, 3), order="C") for opt in [True, False]: - tmp = inp.einsum("...ft,mf->...mt", a, c, order="a", optimize=opt) + tmp = dpnp.einsum("...ft,mf->...mt", a, c, order="a", optimize=opt) assert tmp.flags.c_contiguous - d = inp.ones((2, 3, 5), order="C") + d = dpnp.ones((2, 3, 5), order="C") for opt in [True, False]: - tmp = inp.einsum("...ft,mf->...mt", d, c, order="a", optimize=opt) + tmp = dpnp.einsum("...ft,mf->...mt", d, c, order="a", optimize=opt) assert tmp.flags.c_contiguous def test_einsum_path(self): # Test einsum path for covergae a = numpy.random.rand(1, 2, 3, 4) b = numpy.random.rand(4, 3, 2, 1) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) expected = numpy.einsum_path("ijkl,dcba->dcba", a, b) - result = inp.einsum_path("ijkl,dcba->dcba", a_dp, b_dp) + result = dpnp.einsum_path("ijkl,dcba->dcba", a_dp, b_dp) assert expected[0] == result[0] assert expected[1] == result[1] @@ -1765,17 +1706,13 @@ class TestInv: [[[1, 3], [3, 1]], [[0, 1], [1, 3]]], ], ], - ids=[ - "2D_array", - "3D_array", - "4D_array", - ], + ids=["2D_array", "3D_array", "4D_array"], ) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_inv(self, array, dtype): a = numpy.array(array, dtype=dtype) - ia = inp.array(a) - result = inp.linalg.inv(ia) + ia = dpnp.array(a) + result = dpnp.linalg.inv(ia) expected = numpy.linalg.inv(a) assert_dtype_allclose(result, expected) @@ -1790,35 +1727,27 @@ def test_inv_strides(self): ] ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) # positive strides expected = numpy.linalg.inv(a_np[::2, ::2]) - result = inp.linalg.inv(a_dp[::2, ::2]) + result = dpnp.linalg.inv(a_dp[::2, ::2]) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) # negative strides expected = numpy.linalg.inv(a_np[::-2, ::-2]) - result = inp.linalg.inv(a_dp[::-2, ::-2]) + result = dpnp.linalg.inv(a_dp[::-2, ::-2]) assert_allclose(expected, result, rtol=1e-3, atol=1e-4) @pytest.mark.parametrize( "shape", - [ - (0, 0), - (3, 0, 0), - (0, 2, 2), - ], - ids=[ - "(0, 0)", - "(3, 0, 0)", - "(0, 2, 2)", - ], + [(0, 0), (3, 0, 0), (0, 2, 2)], + ids=["(0, 0)", "(3, 0, 0)", "(0, 2, 2)"], ) def test_inv_empty(self, shape): a = numpy.empty(shape) - ia = inp.array(a) - result = inp.linalg.inv(ia) + ia = dpnp.array(a) + result = dpnp.linalg.inv(ia) expected = numpy.linalg.inv(a) assert_dtype_allclose(result, expected) @@ -1843,10 +1772,10 @@ def test_inv_empty(self, shape): ) def test_inv_singular_matrix(self, matrix): a_np = numpy.array(matrix, dtype="float32") - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) assert_raises(numpy.linalg.LinAlgError, numpy.linalg.inv, a_np) - assert_raises(inp.linalg.LinAlgError, inp.linalg.inv, a_dp) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.inv, a_dp) # TODO: remove skip when MKLD-13852 is resolved # _getrf_batch does not raise an error with singular matrices. @@ -1855,25 +1784,25 @@ def test_inv_singular_matrix_3D(self): a_np = numpy.array( [[[1, 2], [3, 4]], [[1, 2], [1, 2]], [[1, 3], [3, 1]]] ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) assert_raises(numpy.linalg.LinAlgError, numpy.linalg.inv, a_np) - assert_raises(inp.linalg.LinAlgError, inp.linalg.inv, a_dp) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.inv, a_dp) def test_inv_errors(self): - a_dp = inp.array([[1, 2], [2, 5]], dtype="float32") + a_dp = dpnp.array([[1, 2], [2, 5]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.inv, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.inv, a_np) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() - assert_raises(inp.linalg.LinAlgError, inp.linalg.inv, a_dp_ndim_1) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.inv, a_dp_ndim_1) # a is not square - a_dp = inp.ones((2, 3)) - assert_raises(inp.linalg.LinAlgError, inp.linalg.inv, a_dp) + a_dp = dpnp.ones((2, 3)) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.inv, a_dp) class TestLstsq: @@ -1896,10 +1825,10 @@ def test_lstsq(self, a_shape, b_shape, dtype): a_np = numpy.random.rand(*a_shape).astype(dtype) b_np = numpy.random.rand(*b_shape).astype(dtype) - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) - result = inp.linalg.lstsq(a_dp, b_dp) + result = dpnp.linalg.lstsq(a_dp, b_dp) # if rcond is not set, FutureWarning is given. # By default Numpy uses None for calculations expected = numpy.linalg.lstsq(a_np, b_np, rcond=None) @@ -1912,14 +1841,13 @@ def test_lstsq(self, a_shape, b_shape, dtype): def test_lstsq_diff_type(self, a_dtype, b_dtype): a_np = numpy.array([[1, 2], [3, -5]], dtype=a_dtype) b_np = numpy.array([4, 1], dtype=b_dtype) - - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) # if rcond is not set, FutureWarning is given. # By default Numpy uses None for calculations expected = numpy.linalg.lstsq(a_np, b_np, rcond=None) - result = inp.linalg.lstsq(a_dp, b_dp) + result = dpnp.linalg.lstsq(a_dp, b_dp) for param_dp, param_np in zip(result, expected): assert_dtype_allclose(param_dp, param_np) @@ -1933,10 +1861,10 @@ def test_lstsq_empty(self, m, n, nrhs, dtype): a_np = numpy.arange(m * n).reshape(m, n).astype(dtype) b_np = numpy.ones((m, nrhs)).astype(dtype) - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) - result = inp.linalg.lstsq(a_dp, b_dp) + result = dpnp.linalg.lstsq(a_dp, b_dp) # if rcond is not set, FutureWarning is given. # By default Numpy uses None for calculations expected = numpy.linalg.lstsq(a_np, b_np, rcond=None) @@ -1945,24 +1873,24 @@ def test_lstsq_empty(self, m, n, nrhs, dtype): assert_dtype_allclose(param_dp, param_np) def test_lstsq_errors(self): - a_dp = inp.array([[1, 0.5], [0.5, 1]], dtype="float32") - b_dp = inp.array(a_dp, dtype="float32") + a_dp = dpnp.array([[1, 0.5], [0.5, 1]], dtype="float32") + b_dp = dpnp.array(a_dp, dtype="float32") # diffetent queue a_queue = dpctl.SyclQueue() b_queue = dpctl.SyclQueue() - a_dp_q = inp.array(a_dp, sycl_queue=a_queue) - b_dp_q = inp.array(b_dp, sycl_queue=b_queue) - assert_raises(ValueError, inp.linalg.lstsq, a_dp_q, b_dp_q) + a_dp_q = dpnp.array(a_dp, sycl_queue=a_queue) + b_dp_q = dpnp.array(b_dp, sycl_queue=b_queue) + assert_raises(ValueError, dpnp.linalg.lstsq, a_dp_q, b_dp_q) # unsupported type `a` and `b` - a_np = inp.asnumpy(a_dp) - b_np = inp.asnumpy(b_dp) - assert_raises(TypeError, inp.linalg.lstsq, a_np, b_dp) - assert_raises(TypeError, inp.linalg.lstsq, a_dp, b_np) + a_np = dpnp.asnumpy(a_dp) + b_np = dpnp.asnumpy(b_dp) + assert_raises(TypeError, dpnp.linalg.lstsq, a_np, b_dp) + assert_raises(TypeError, dpnp.linalg.lstsq, a_dp, b_np) # unsupported type `rcond` - assert_raises(TypeError, inp.linalg.lstsq, a_dp, b_dp, [-1]) + assert_raises(TypeError, dpnp.linalg.lstsq, a_dp, b_dp, [-1]) class TestMatrixPower: @@ -1988,28 +1916,28 @@ class TestMatrixPower: ) def test_matrix_power(self, data, power, dtype): a = data.astype(dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) - result = inp.linalg.matrix_power(a_dp, power) + result = dpnp.linalg.matrix_power(a_dp, power) expected = numpy.linalg.matrix_power(a, power) assert_dtype_allclose(result, expected) def test_matrix_power_errors(self): - a_dp = inp.eye(4, dtype="float32") + a_dp = dpnp.eye(4, dtype="float32") # unsupported type `a` - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.matrix_power, a_np, 2) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.matrix_power, a_np, 2) # unsupported type `power` - assert_raises(TypeError, inp.linalg.matrix_power, a_dp, 1.5) - assert_raises(TypeError, inp.linalg.matrix_power, a_dp, [2]) + assert_raises(TypeError, dpnp.linalg.matrix_power, a_dp, 1.5) + assert_raises(TypeError, dpnp.linalg.matrix_power, a_dp, [2]) # not invertible - noninv = inp.array([[1, 0], [0, 0]]) + noninv = dpnp.array([[1, 0], [0, 0]]) assert_raises( - inp.linalg.LinAlgError, inp.linalg.matrix_power, noninv, -1 + dpnp.linalg.LinAlgError, dpnp.linalg.matrix_power, noninv, -1 ) @@ -2028,10 +1956,10 @@ class TestMatrixRank: ) def test_matrix_rank(self, data, dtype): a = data.astype(dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) np_rank = numpy.linalg.matrix_rank(a) - dp_rank = inp.linalg.matrix_rank(a_dp) + dp_rank = dpnp.linalg.matrix_rank(a_dp) assert np_rank == dp_rank @pytest.mark.parametrize("dtype", get_all_dtypes()) @@ -2046,10 +1974,10 @@ def test_matrix_rank(self, data, dtype): ) def test_matrix_rank_hermitian(self, data, dtype): a = data.astype(dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) np_rank = numpy.linalg.matrix_rank(a, hermitian=True) - dp_rank = inp.linalg.matrix_rank(a_dp, hermitian=True) + dp_rank = dpnp.linalg.matrix_rank(a_dp, hermitian=True) assert np_rank == dp_rank @pytest.mark.parametrize( @@ -2059,22 +1987,18 @@ def test_matrix_rank_hermitian(self, data, dtype): (numpy.array(0.99e-6), numpy.array(1.01e-6)), (numpy.array([0.99e-6]), numpy.array([1.01e-6])), ], - ids=[ - "float", - "0-D array", - "1-D array", - ], + ids=["float", "0-D array", "1-D array"], ) def test_matrix_rank_tolerance(self, high_tol, low_tol): a = numpy.eye(4) a[-1, -1] = 1e-6 - a_dp = inp.array(a) + a_dp = dpnp.array(a) if isinstance(high_tol, numpy.ndarray): - dp_high_tol = inp.array( + dp_high_tol = dpnp.array( high_tol, usm_type=a_dp.usm_type, sycl_queue=a_dp.sycl_queue ) - dp_low_tol = inp.array( + dp_low_tol = dpnp.array( low_tol, usm_type=a_dp.usm_type, sycl_queue=a_dp.sycl_queue ) else: @@ -2084,7 +2008,7 @@ def test_matrix_rank_tolerance(self, high_tol, low_tol): np_rank_high_tol = numpy.linalg.matrix_rank( a, hermitian=True, tol=high_tol ) - dp_rank_high_tol = inp.linalg.matrix_rank( + dp_rank_high_tol = dpnp.linalg.matrix_rank( a_dp, hermitian=True, tol=dp_high_tol ) assert np_rank_high_tol == dp_rank_high_tol @@ -2092,7 +2016,7 @@ def test_matrix_rank_tolerance(self, high_tol, low_tol): np_rank_low_tol = numpy.linalg.matrix_rank( a, hermitian=True, tol=low_tol ) - dp_rank_low_tol = inp.linalg.matrix_rank( + dp_rank_low_tol = dpnp.linalg.matrix_rank( a_dp, hermitian=True, tol=dp_low_tol ) assert np_rank_low_tol == dp_rank_low_tol @@ -2106,50 +2030,50 @@ def test_matrix_rank_tolerance(self, high_tol, low_tol): ) def test_matrix_rank_tol(self, tol): a = numpy.zeros((4, 3, 2)) - a_dp = inp.array(a) + a_dp = dpnp.array(a) if isinstance(tol, numpy.ndarray): - dp_tol = inp.array( + dp_tol = dpnp.array( tol, usm_type=a_dp.usm_type, sycl_queue=a_dp.sycl_queue ) else: dp_tol = tol expected = numpy.linalg.matrix_rank(a, rtol=tol) - result = inp.linalg.matrix_rank(a_dp, rtol=dp_tol) + result = dpnp.linalg.matrix_rank(a_dp, rtol=dp_tol) assert_dtype_allclose(result, expected) expected = numpy.linalg.matrix_rank(a, tol=tol) - result = inp.linalg.matrix_rank(a_dp, tol=dp_tol) + result = dpnp.linalg.matrix_rank(a_dp, tol=dp_tol) assert_dtype_allclose(result, expected) def test_matrix_rank_errors(self): - a_dp = inp.array([[1, 2], [3, 4]], dtype="float32") + a_dp = dpnp.array([[1, 2], [3, 4]], dtype="float32") # unsupported type `a` - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.matrix_rank, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.matrix_rank, a_np) # unsupported type `tol` tol = numpy.array(0.5, dtype="float32") - assert_raises(TypeError, inp.linalg.matrix_rank, a_dp, tol) - assert_raises(TypeError, inp.linalg.matrix_rank, a_dp, [0.5]) + assert_raises(TypeError, dpnp.linalg.matrix_rank, a_dp, tol) + assert_raises(TypeError, dpnp.linalg.matrix_rank, a_dp, [0.5]) # diffetent queue a_queue = dpctl.SyclQueue() tol_queue = dpctl.SyclQueue() - a_dp_q = inp.array(a_dp, sycl_queue=a_queue) - tol_dp_q = inp.array([0.5], dtype="float32", sycl_queue=tol_queue) + a_dp_q = dpnp.array(a_dp, sycl_queue=a_queue) + tol_dp_q = dpnp.array([0.5], dtype="float32", sycl_queue=tol_queue) assert_raises( ExecutionPlacementError, - inp.linalg.matrix_rank, + dpnp.linalg.matrix_rank, a_dp_q, tol_dp_q, ) # both tol and rtol are given assert_raises( - ValueError, inp.linalg.matrix_rank, a_dp, tol=1e-06, rtol=1e-04 + ValueError, dpnp.linalg.matrix_rank, a_dp, tol=1e-06, rtol=1e-04 ) @@ -2159,17 +2083,17 @@ def test_matrix_rank_errors(self): # 1 test to increase code coverage def test_matrix_transpose(): a = numpy.arange(6).reshape((2, 3)) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.linalg.matrix_transpose(a) - result = inp.linalg.matrix_transpose(a_dp) + result = dpnp.linalg.matrix_transpose(a_dp) assert_allclose(expected, result) with assert_raises_regex( ValueError, "array must be at least 2-dimensional" ): - inp.linalg.matrix_transpose(a_dp[:, 0]) + dpnp.linalg.matrix_transpose(a_dp[:, 0]) class TestNorm: @@ -2183,18 +2107,18 @@ def setup_method(self): @pytest.mark.parametrize("ord", [None, -2, -1, 0, 1, 2, 3]) @pytest.mark.parametrize("axis", [0, None]) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_empty(self, shape, ord, axis, keepdims): + def test_empty(self, shape, ord, axis, keepdims): a = numpy.empty(shape) - ia = inp.array(a) + ia = dpnp.array(a) if axis is None and a.ndim > 1 and ord in [0, 3]: # Invalid norm order for matrices (a.ndim == 2) or # Improper number of dimensions to norm (a.ndim>2) with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) elif axis is None and a.ndim > 2 and ord is not None: # Improper number of dimensions to norm with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) elif ( axis is None and ord is not None @@ -2203,87 +2127,85 @@ def test_norm_empty(self, shape, ord, axis, keepdims): ): # reduction cannot be performed over zero-size axes with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) else: - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm( a, ord=ord, axis=axis, keepdims=keepdims ) assert_dtype_allclose(result, expected) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 0, 1, 2, 3, inp.inf] + "ord", [None, -dpnp.inf, -2, -1, 0, 1, 2, 3, dpnp.inf] ) @pytest.mark.parametrize("axis", [0, None]) - def test_norm_0D(self, ord, axis): + def test_0D(self, ord, axis): a = numpy.array(2) - ia = inp.array(a) + ia = dpnp.array(a) if axis is None and ord is not None: # Improper number of dimensions to norm with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis) + dpnp.linalg.norm(ia, ord=ord, axis=axis) elif axis is not None: with pytest.raises(AxisError): - inp.linalg.norm(ia, ord=ord, axis=axis) + dpnp.linalg.norm(ia, ord=ord, axis=axis) else: - result = inp.linalg.norm(ia, ord=ord, axis=axis) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis) expected = numpy.linalg.norm(a, ord=ord, axis=axis) assert_dtype_allclose(result, expected) @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 0, 1, 2, 3.5, inp.inf] + "ord", [None, -dpnp.inf, -2, -1, 0, 1, 2, 3.5, dpnp.inf] ) @pytest.mark.parametrize("axis", [0, None]) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_1D(self, dtype, ord, axis, keepdims): + def test_1D(self, dtype, ord, axis, keepdims): a = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims) assert_dtype_allclose(result, expected) @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 0, 1, 2, 3.5, inp.inf] + "ord", [None, -dpnp.inf, -2, -1, 0, 1, 2, 3.5, dpnp.inf] ) @pytest.mark.parametrize("axis", [0, None]) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_1D_complex(self, dtype, ord, axis, keepdims): + def test_1D_complex(self, dtype, ord, axis, keepdims): x1 = numpy.random.uniform(-5, 5, 10) x2 = numpy.random.uniform(-5, 5, 10) a = numpy.array(x1 + 1j * x2, dtype=dtype) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims) assert_dtype_allclose(result, expected) @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 1, 2, 3, inp.inf, "fro", "nuc"] + "ord", [None, -dpnp.inf, -2, -1, 1, 2, 3, dpnp.inf, "fro", "nuc"] ) @pytest.mark.parametrize( "axis", [0, 1, (1, 0), None], ids=["0", "1", "(1, 0)", "None"] ) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_2D(self, dtype, ord, axis, keepdims): - a = numpy.array(numpy.random.uniform(-5, 5, 15), dtype=dtype).reshape( - 3, 5 - ) - ia = inp.array(a) + def test_2D(self, dtype, ord, axis, keepdims): + a = generate_random_numpy_array((3, 5), dtype) + ia = dpnp.array(a) if (axis in [-1, 0, 1] and ord in ["nuc", "fro"]) or ( (isinstance(axis, tuple) or axis is None) and ord == 3 ): # Invalid norm order for vectors with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) else: - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm( a, ord=ord, axis=axis, keepdims=keepdims ) @@ -2292,25 +2214,25 @@ def test_norm_2D(self, dtype, ord, axis, keepdims): @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 1, 2, 3, inp.inf, "fro", "nuc"] + "ord", [None, -dpnp.inf, -2, -1, 1, 2, 3, dpnp.inf, "fro", "nuc"] ) @pytest.mark.parametrize( "axis", [0, 1, (1, 0), None], ids=["0", "1", "(1, 0)", "None"] ) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_2D_complex(self, dtype, ord, axis, keepdims): + def test_2D_complex(self, dtype, ord, axis, keepdims): x1 = numpy.random.uniform(-5, 5, 15) x2 = numpy.random.uniform(-5, 5, 15) a = numpy.array(x1 + 1j * x2, dtype=dtype).reshape(3, 5) - ia = inp.array(a) + ia = dpnp.array(a) if (axis in [-1, 0, 1] and ord in ["nuc", "fro"]) or ( (isinstance(axis, tuple) or axis is None) and ord == 3 ): # Invalid norm order for vectors with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) else: - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm( a, ord=ord, axis=axis, keepdims=keepdims ) @@ -2319,7 +2241,7 @@ def test_norm_2D_complex(self, dtype, ord, axis, keepdims): @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 1, 2, 3, inp.inf, "fro", "nuc"] + "ord", [None, -dpnp.inf, -2, -1, 1, 2, 3, dpnp.inf, "fro", "nuc"] ) @pytest.mark.parametrize( "axis", @@ -2327,23 +2249,21 @@ def test_norm_2D_complex(self, dtype, ord, axis, keepdims): ids=["-1", "0", "1", "(0, 1)", "(-1, -2)", "None"], ) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_ND(self, dtype, ord, axis, keepdims): - a = numpy.array(numpy.random.uniform(-5, 5, 120), dtype=dtype).reshape( - 2, 3, 4, 5 - ) - ia = inp.array(a) + def test_ND(self, dtype, ord, axis, keepdims): + a = generate_random_numpy_array((2, 3, 4, 5), dtype) + ia = dpnp.array(a) if (axis in [-1, 0, 1] and ord in ["nuc", "fro"]) or ( isinstance(axis, tuple) and ord == 3 ): # Invalid norm order for vectors with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) elif axis is None and ord is not None: # Improper number of dimensions to norm with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) else: - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm( a, ord=ord, axis=axis, keepdims=keepdims ) @@ -2352,7 +2272,7 @@ def test_norm_ND(self, dtype, ord, axis, keepdims): @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 1, 2, 3, inp.inf, "fro", "nuc"] + "ord", [None, -dpnp.inf, -2, -1, 1, 2, 3, dpnp.inf, "fro", "nuc"] ) @pytest.mark.parametrize( "axis", @@ -2360,23 +2280,23 @@ def test_norm_ND(self, dtype, ord, axis, keepdims): ids=["-1", "0", "1", "(0, 1)", "(-1, -2)", "None"], ) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_ND_complex(self, dtype, ord, axis, keepdims): + def test_ND_complex(self, dtype, ord, axis, keepdims): x1 = numpy.random.uniform(-5, 5, 120) x2 = numpy.random.uniform(-5, 5, 120) a = numpy.array(x1 + 1j * x2, dtype=dtype).reshape(2, 3, 4, 5) - ia = inp.array(a) + ia = dpnp.array(a) if (axis in [-1, 0, 1] and ord in ["nuc", "fro"]) or ( isinstance(axis, tuple) and ord == 3 ): # Invalid norm order for vectors with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) elif axis is None and ord is not None: # Improper number of dimensions to norm with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) else: - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm( a, ord=ord, axis=axis, keepdims=keepdims ) @@ -2385,7 +2305,7 @@ def test_norm_ND_complex(self, dtype, ord, axis, keepdims): @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 1, 2, 3, inp.inf, "fro", "nuc"] + "ord", [None, -dpnp.inf, -2, -1, 1, 2, 3, dpnp.inf, "fro", "nuc"] ) @pytest.mark.parametrize( "axis", @@ -2393,7 +2313,7 @@ def test_norm_ND_complex(self, dtype, ord, axis, keepdims): ids=["-1", "0", "1", "(0, 1)", "(-2, -1)", "None"], ) @pytest.mark.parametrize("keepdims", [True, False]) - def test_norm_usm_ndarray(self, dtype, ord, axis, keepdims): + def test_usm_ndarray(self, dtype, ord, axis, keepdims): a = numpy.array(numpy.random.uniform(-5, 5, 120), dtype=dtype).reshape( 2, 3, 4, 5 ) @@ -2403,45 +2323,43 @@ def test_norm_usm_ndarray(self, dtype, ord, axis, keepdims): ): # Invalid norm order for vectors with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) elif axis is None and ord is not None: # Improper number of dimensions to norm with pytest.raises(ValueError): - inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) else: - result = inp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) + result = dpnp.linalg.norm(ia, ord=ord, axis=axis, keepdims=keepdims) expected = numpy.linalg.norm( a, ord=ord, axis=axis, keepdims=keepdims ) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("stride", [3, -1, -5]) - def test_norm_strided_1D(self, stride): + def test_strided_1D(self, stride): a = numpy.arange(25) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.norm(ia[::stride]) + result = dpnp.linalg.norm(ia[::stride]) expected = numpy.linalg.norm(a[::stride]) assert_dtype_allclose(result, expected) @pytest.mark.parametrize( - "axis", - [-1, 0, (0, 1), None], - ids=["-1", "0", "(0, 1)", "None"], + "axis", [-1, 0, (0, 1), None], ids=["-1", "0", "(0, 1)", "None"] ) @pytest.mark.parametrize( "stride", [(-2, -4), (2, 4), (-3, 5), (3, -1)], ids=["(-2, -4)", "(2, 4)", "(-3, 5)", "(3, -1)"], ) - def test_norm_strided_2D(self, axis, stride): + def test_strided_2D(self, axis, stride): A = numpy.random.rand(20, 30) - B = inp.asarray(A) + B = dpnp.asarray(A) slices = tuple(slice(None, None, stride[i]) for i in range(A.ndim)) a = A[slices] b = B[slices] - result = inp.linalg.norm(b, axis=axis) + result = dpnp.linalg.norm(b, axis=axis) expected = numpy.linalg.norm(a, axis=axis) assert_dtype_allclose(result, expected) @@ -2455,54 +2373,54 @@ def test_norm_strided_2D(self, axis, stride): [(-2, -3, -1, -4), (-2, 4, -3, 5), (2, 3, 1, 4)], ids=["(-2, -3, -1, -4)", "(-2, 4, -3, 5)", "(2, 3, 1, 4)"], ) - def test_norm_strided_ND(self, axis, stride): + def test_strided_ND(self, axis, stride): A = numpy.random.rand(12, 16, 20, 24) - B = inp.asarray(A) + B = dpnp.asarray(A) slices = tuple(slice(None, None, stride[i]) for i in range(A.ndim)) a = A[slices] b = B[slices] - result = inp.linalg.norm(b, axis=axis) + result = dpnp.linalg.norm(b, axis=axis) expected = numpy.linalg.norm(a, axis=axis) assert_dtype_allclose(result, expected) @testing.with_requires("numpy>=2.0") @pytest.mark.parametrize( "ord", - [None, -inp.inf, -2, -1, 1, 2, inp.inf, "fro", "nuc"], + [None, -dpnp.inf, -2, -1, 1, 2, dpnp.inf, "fro", "nuc"], ) @pytest.mark.parametrize("keepdims", [True, False]) def test_matrix_norm(self, ord, keepdims): a = numpy.array(numpy.random.uniform(-5, 5, 15)).reshape(3, 5) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.matrix_norm(ia, ord=ord, keepdims=keepdims) + result = dpnp.linalg.matrix_norm(ia, ord=ord, keepdims=keepdims) expected = numpy.linalg.matrix_norm(a, ord=ord, keepdims=keepdims) assert_dtype_allclose(result, expected) @testing.with_requires("numpy>=2.0") @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 0, 1, 2, 3.5, inp.inf] + "ord", [None, -dpnp.inf, -2, -1, 0, 1, 2, 3.5, dpnp.inf] ) def test_vector_norm_0D(self, ord): a = numpy.array(2) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.vector_norm(ia, ord=ord) + result = dpnp.linalg.vector_norm(ia, ord=ord) expected = numpy.linalg.vector_norm(a, ord=ord) assert_dtype_allclose(result, expected) @testing.with_requires("numpy>=2.0") @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 0, 1, 2, 3.5, inp.inf] + "ord", [None, -dpnp.inf, -2, -1, 0, 1, 2, 3.5, dpnp.inf] ) @pytest.mark.parametrize("axis", [0, None]) @pytest.mark.parametrize("keepdims", [True, False]) def test_vector_norm_1D(self, ord, axis, keepdims): a = numpy.array(numpy.random.uniform(-5, 5, 10)) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.vector_norm( + result = dpnp.linalg.vector_norm( ia, ord=ord, axis=axis, keepdims=keepdims ) expected = numpy.linalg.vector_norm( @@ -2513,7 +2431,7 @@ def test_vector_norm_1D(self, ord, axis, keepdims): @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("suppress_divide_numpy_warnings") @pytest.mark.parametrize( - "ord", [None, -inp.inf, -2, -1, 1, 2, 3.5, inp.inf] + "ord", [None, -dpnp.inf, -2, -1, 1, 2, 3.5, dpnp.inf] ) @pytest.mark.parametrize( "axis", @@ -2523,9 +2441,9 @@ def test_vector_norm_1D(self, ord, axis, keepdims): @pytest.mark.parametrize("keepdims", [True, False]) def test_vector_norm_ND(self, ord, axis, keepdims): a = numpy.arange(120).reshape(2, 3, 4, 5) - ia = inp.array(a) + ia = dpnp.array(a) - result = inp.linalg.vector_norm( + result = dpnp.linalg.vector_norm( ia, ord=ord, axis=axis, keepdims=keepdims ) expected = numpy.linalg.vector_norm( @@ -2533,20 +2451,20 @@ def test_vector_norm_ND(self, ord, axis, keepdims): ) assert_dtype_allclose(result, expected) - def test_norm_error(self): - ia = inp.arange(120).reshape(2, 3, 4, 5) + def test_error(self): + ia = dpnp.arange(120).reshape(2, 3, 4, 5) # Duplicate axes given with pytest.raises(ValueError): - inp.linalg.norm(ia, axis=(2, 2)) + dpnp.linalg.norm(ia, axis=(2, 2)) #'axis' must be None, an integer or a tuple of integers with pytest.raises(TypeError): - inp.linalg.norm(ia, axis=[2]) + dpnp.linalg.norm(ia, axis=[2]) # Invalid norm order for vectors with pytest.raises(ValueError): - inp.linalg.norm(ia, axis=1, ord=[3]) + dpnp.linalg.norm(ia, axis=1, ord=[3]) class TestQr: @@ -2564,35 +2482,31 @@ class TestQr: "(2, 2, 4)", ], ) - @pytest.mark.parametrize( - "mode", - ["r", "raw", "complete", "reduced"], - ids=["r", "raw", "complete", "reduced"], - ) + @pytest.mark.parametrize("mode", ["r", "raw", "complete", "reduced"]) def test_qr(self, dtype, shape, mode): # Set seed_value=81 to prevent # random generation of the input singular matrix a = generate_random_numpy_array(shape, dtype, seed_value=81) - ia = inp.array(a) + ia = dpnp.array(a) if mode == "r": np_r = numpy.linalg.qr(a, mode) - dpnp_r = inp.linalg.qr(ia, mode) + dpnp_r = dpnp.linalg.qr(ia, mode) else: np_q, np_r = numpy.linalg.qr(a, mode) - dpnp_q, dpnp_r = inp.linalg.qr(ia, mode) + dpnp_q, dpnp_r = dpnp.linalg.qr(ia, mode) # check decomposition if mode in ("complete", "reduced"): if a.ndim == 2: assert_almost_equal( - inp.dot(dpnp_q, dpnp_r), + dpnp.dot(dpnp_q, dpnp_r), a, decimal=5, ) else: # a.ndim > 2 assert_almost_equal( - inp.matmul(dpnp_q, dpnp_r), + dpnp.matmul(dpnp_q, dpnp_r), a, decimal=5, ) @@ -2615,42 +2529,34 @@ def test_qr(self, dtype, shape, mode): "(0, 2, 3)", ], ) - @pytest.mark.parametrize( - "mode", - ["r", "raw", "complete", "reduced"], - ids=["r", "raw", "complete", "reduced"], - ) + @pytest.mark.parametrize("mode", ["r", "raw", "complete", "reduced"]) def test_qr_empty(self, dtype, shape, mode): a = numpy.empty(shape, dtype=dtype) - ia = inp.array(a) + ia = dpnp.array(a) if mode == "r": np_r = numpy.linalg.qr(a, mode) - dpnp_r = inp.linalg.qr(ia, mode) + dpnp_r = dpnp.linalg.qr(ia, mode) else: np_q, np_r = numpy.linalg.qr(a, mode) - dpnp_q, dpnp_r = inp.linalg.qr(ia, mode) + dpnp_q, dpnp_r = dpnp.linalg.qr(ia, mode) assert_dtype_allclose(dpnp_q, np_q) assert_dtype_allclose(dpnp_r, np_r) - @pytest.mark.parametrize( - "mode", - ["r", "raw", "complete", "reduced"], - ids=["r", "raw", "complete", "reduced"], - ) + @pytest.mark.parametrize("mode", ["r", "raw", "complete", "reduced"]) def test_qr_strides(self, mode): a = generate_random_numpy_array((5, 5)) - ia = inp.array(a) + ia = dpnp.array(a) # positive strides if mode == "r": np_r = numpy.linalg.qr(a[::2, ::2], mode) - dpnp_r = inp.linalg.qr(ia[::2, ::2], mode) + dpnp_r = dpnp.linalg.qr(ia[::2, ::2], mode) else: np_q, np_r = numpy.linalg.qr(a[::2, ::2], mode) - dpnp_q, dpnp_r = inp.linalg.qr(ia[::2, ::2], mode) + dpnp_q, dpnp_r = dpnp.linalg.qr(ia[::2, ::2], mode) assert_dtype_allclose(dpnp_q, np_q) @@ -2659,38 +2565,38 @@ def test_qr_strides(self, mode): # negative strides if mode == "r": np_r = numpy.linalg.qr(a[::-2, ::-2], mode) - dpnp_r = inp.linalg.qr(ia[::-2, ::-2], mode) + dpnp_r = dpnp.linalg.qr(ia[::-2, ::-2], mode) else: np_q, np_r = numpy.linalg.qr(a[::-2, ::-2], mode) - dpnp_q, dpnp_r = inp.linalg.qr(ia[::-2, ::-2], mode) + dpnp_q, dpnp_r = dpnp.linalg.qr(ia[::-2, ::-2], mode) assert_dtype_allclose(dpnp_q, np_q) assert_dtype_allclose(dpnp_r, np_r) def test_qr_errors(self): - a_dp = inp.array([[1, 2], [3, 5]], dtype="float32") + a_dp = dpnp.array([[1, 2], [3, 5]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.qr, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.qr, a_np) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() - assert_raises(inp.linalg.LinAlgError, inp.linalg.qr, a_dp_ndim_1) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.qr, a_dp_ndim_1) # invalid mode - assert_raises(ValueError, inp.linalg.qr, a_dp, "c") + assert_raises(ValueError, dpnp.linalg.qr, a_dp, "c") class TestSolve: @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_solve(self, dtype): a_np = numpy.array([[1, 0.5], [0.5, 1]], dtype=dtype) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) expected = numpy.linalg.solve(a_np, a_np) - result = inp.linalg.solve(a_dp, a_dp) + result = dpnp.linalg.solve(a_dp, a_dp) assert_allclose(expected, result, rtol=1e-06) @@ -2716,11 +2622,11 @@ def test_solve_broadcast(self, a_shape, b_shape, dtype): # random generation of the input singular matrix b_np = generate_random_numpy_array(b_shape, dtype, seed_value=76) - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) expected = numpy.linalg.solve(a_np, b_np) - result = inp.linalg.solve(a_dp, b_dp) + result = dpnp.linalg.solve(a_dp, b_dp) assert_dtype_allclose(result, expected) @@ -2731,11 +2637,11 @@ def test_solve_nrhs_greater_n(self, dtype): a_np = numpy.array([[1, 2], [3, 5]], dtype=dtype) b_np = numpy.array([[1, 1, 1], [2, 2, 2]], dtype=dtype) - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) expected = numpy.linalg.solve(a_np, b_np) - result = inp.linalg.solve(a_dp, b_dp) + result = dpnp.linalg.solve(a_dp, b_dp) assert_dtype_allclose(result, expected) @@ -2744,12 +2650,11 @@ def test_solve_nrhs_greater_n(self, dtype): def test_solve_diff_type(self, a_dtype, b_dtype): a_np = numpy.array([[1, 2], [3, -5]], dtype=a_dtype) b_np = numpy.array([4, 1], dtype=b_dtype) - - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) expected = numpy.linalg.solve(a_np, b_np) - result = inp.linalg.solve(a_dp, b_dp) + result = dpnp.linalg.solve(a_dp, b_dp) assert_dtype_allclose(result, expected) @@ -2765,17 +2670,17 @@ def test_solve_strides(self): ) b_np = numpy.array([5, 8, 9, 2, 1]) - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) # positive strides expected = numpy.linalg.solve(a_np[::2, ::2], b_np[::2]) - result = inp.linalg.solve(a_dp[::2, ::2], b_dp[::2]) + result = dpnp.linalg.solve(a_dp[::2, ::2], b_dp[::2]) assert_allclose(expected, result, rtol=1e-05) # negative strides expected = numpy.linalg.solve(a_np[::-2, ::-2], b_np[::-2]) - result = inp.linalg.solve(a_dp[::-2, ::-2], b_dp[::-2]) + result = dpnp.linalg.solve(a_dp[::-2, ::-2], b_dp[::-2]) assert_allclose(expected, result, rtol=1e-05) @pytest.mark.parametrize( @@ -2801,48 +2706,48 @@ def test_solve_singular_matrix(self, matrix, vector): a_np = numpy.array(matrix, dtype="float32") b_np = numpy.array(vector, dtype="float32") - a_dp = inp.array(a_np) - b_dp = inp.array(b_np) + a_dp = dpnp.array(a_np) + b_dp = dpnp.array(b_np) assert_raises(numpy.linalg.LinAlgError, numpy.linalg.solve, a_np, b_np) - assert_raises(inp.linalg.LinAlgError, inp.linalg.solve, a_dp, b_dp) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.solve, a_dp, b_dp) def test_solve_errors(self): - a_dp = inp.array([[1, 0.5], [0.5, 1]], dtype="float32") - b_dp = inp.array(a_dp, dtype="float32") + a_dp = dpnp.array([[1, 0.5], [0.5, 1]], dtype="float32") + b_dp = dpnp.array(a_dp, dtype="float32") # diffetent queue a_queue = dpctl.SyclQueue() b_queue = dpctl.SyclQueue() - a_dp_q = inp.array(a_dp, sycl_queue=a_queue) - b_dp_q = inp.array(b_dp, sycl_queue=b_queue) - assert_raises(ValueError, inp.linalg.solve, a_dp_q, b_dp_q) + a_dp_q = dpnp.array(a_dp, sycl_queue=a_queue) + b_dp_q = dpnp.array(b_dp, sycl_queue=b_queue) + assert_raises(ValueError, dpnp.linalg.solve, a_dp_q, b_dp_q) # unsupported type - a_np = inp.asnumpy(a_dp) - b_np = inp.asnumpy(b_dp) - assert_raises(TypeError, inp.linalg.solve, a_np, b_dp) - assert_raises(TypeError, inp.linalg.solve, a_dp, b_np) + a_np = dpnp.asnumpy(a_dp) + b_np = dpnp.asnumpy(b_dp) + assert_raises(TypeError, dpnp.linalg.solve, a_np, b_dp) + assert_raises(TypeError, dpnp.linalg.solve, a_dp, b_np) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() assert_raises( - inp.linalg.LinAlgError, inp.linalg.solve, a_dp_ndim_1, b_dp + dpnp.linalg.LinAlgError, dpnp.linalg.solve, a_dp_ndim_1, b_dp ) # b.ndim == 0 - b_dp_ndim_0 = inp.array(2) - assert_raises(ValueError, inp.linalg.solve, a_dp, b_dp_ndim_0) + b_dp_ndim_0 = dpnp.array(2) + assert_raises(ValueError, dpnp.linalg.solve, a_dp, b_dp_ndim_0) class TestSlogdet: @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_slogdet_2d(self, dtype): a_np = numpy.array([[1, 2], [3, 4]], dtype=dtype) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) sign_expected, logdet_expected = numpy.linalg.slogdet(a_np) - sign_result, logdet_result = inp.linalg.slogdet(a_dp) + sign_result, logdet_result = dpnp.linalg.slogdet(a_dp) assert_allclose(sign_expected, sign_result) assert_allclose(logdet_expected, logdet_result, rtol=1e-3, atol=1e-4) @@ -2857,10 +2762,10 @@ def test_slogdet_3d(self, dtype): ], dtype=dtype, ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) sign_expected, logdet_expected = numpy.linalg.slogdet(a_np) - sign_result, logdet_result = inp.linalg.slogdet(a_dp) + sign_result, logdet_result = dpnp.linalg.slogdet(a_dp) assert_allclose(sign_expected, sign_result) assert_allclose(logdet_expected, logdet_result, rtol=1e-3, atol=1e-4) @@ -2876,17 +2781,17 @@ def test_slogdet_strides(self): ] ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) # positive strides sign_expected, logdet_expected = numpy.linalg.slogdet(a_np[::2, ::2]) - sign_result, logdet_result = inp.linalg.slogdet(a_dp[::2, ::2]) + sign_result, logdet_result = dpnp.linalg.slogdet(a_dp[::2, ::2]) assert_allclose(sign_expected, sign_result) assert_allclose(logdet_expected, logdet_result, rtol=1e-3, atol=1e-4) # negative strides sign_expected, logdet_expected = numpy.linalg.slogdet(a_np[::-2, ::-2]) - sign_result, logdet_result = inp.linalg.slogdet(a_dp[::-2, ::-2]) + sign_result, logdet_result = dpnp.linalg.slogdet(a_dp[::-2, ::-2]) assert_allclose(sign_expected, sign_result) assert_allclose(logdet_expected, logdet_result, rtol=1e-3, atol=1e-4) @@ -2911,10 +2816,10 @@ def test_slogdet_strides(self): ) def test_slogdet_singular_matrix(self, matrix): a_np = numpy.array(matrix, dtype="float32") - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) sign_expected, logdet_expected = numpy.linalg.slogdet(a_np) - sign_result, logdet_result = inp.linalg.slogdet(a_dp) + sign_result, logdet_result = dpnp.linalg.slogdet(a_dp) assert_allclose(sign_expected, sign_result) assert_allclose(logdet_expected, logdet_result, rtol=1e-3, atol=1e-4) @@ -2927,30 +2832,30 @@ def test_slogdet_singular_matrix_3D(self): a_np = numpy.array( [[[1, 2], [3, 4]], [[1, 2], [1, 2]], [[1, 3], [3, 1]]] ) - a_dp = inp.array(a_np) + a_dp = dpnp.array(a_np) sign_expected, logdet_expected = numpy.linalg.slogdet(a_np) - sign_result, logdet_result = inp.linalg.slogdet(a_dp) + sign_result, logdet_result = dpnp.linalg.slogdet(a_dp) assert_allclose(sign_expected, sign_result) assert_allclose(logdet_expected, logdet_result, rtol=1e-3, atol=1e-4) def test_slogdet_errors(self): - a_dp = inp.array([[1, 2], [3, 5]], dtype="float32") + a_dp = dpnp.array([[1, 2], [3, 5]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.slogdet, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.slogdet, a_np) class TestSvd: def get_tol(self, dtype): tol = 1e-06 - if dtype in (inp.float32, inp.complex64): + if dtype in (dpnp.float32, dpnp.complex64): tol = 1e-04 elif not has_support_aspect64() and dtype in ( - inp.int32, - inp.int64, + dpnp.int32, + dpnp.int64, None, ): tol = 1e-04 @@ -2985,10 +2890,10 @@ def check_decomposition( ): tol = self._tol if compute_vt: - dpnp_diag_s = inp.zeros_like(dp_a, dtype=dp_s.dtype) + dpnp_diag_s = dpnp.zeros_like(dp_a, dtype=dp_s.dtype) for i in range(min(dp_a.shape[-2], dp_a.shape[-1])): dpnp_diag_s[..., i, i] = dp_s[..., i] - reconstructed = inp.dot(dp_u, inp.dot(dpnp_diag_s, dp_vt)) + reconstructed = dpnp.dot(dp_u, dpnp.dot(dpnp_diag_s, dp_vt)) # TODO: use assert dpnp.allclose() inside check_decomposition() # when it will support complex dtypes assert_allclose(dp_a, reconstructed, rtol=tol, atol=1e-4) @@ -3002,13 +2907,13 @@ def check_decomposition( np_vt[..., i, :] = -np_vt[..., i, :] for i in range(numpy.count_nonzero(np_s > tol)): assert_allclose( - inp.asnumpy(dp_u[..., :, i]), + dpnp.asnumpy(dp_u[..., :, i]), np_u[..., :, i], rtol=tol, atol=tol, ) assert_allclose( - inp.asnumpy(dp_vt[..., i, :]), + dpnp.asnumpy(dp_vt[..., i, :]), np_vt[..., i, :], rtol=tol, atol=tol, @@ -3018,14 +2923,14 @@ def check_decomposition( @pytest.mark.parametrize( "shape", [(2, 2), (3, 4), (5, 3), (16, 16)], - ids=["(2,2)", "(3,4)", "(5,3)", "(16,16)"], + ids=["(2, 2)", "(3, 4)", "(5, 3)", "(16, 16)"], ) def test_svd(self, dtype, shape): a = numpy.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) - dp_a = inp.array(a) + dp_a = dpnp.array(a) np_u, np_s, np_vt = numpy.linalg.svd(a) - dp_u, dp_s, dp_vt = inp.linalg.svd(dp_a) + dp_u, dp_s, dp_vt = dpnp.linalg.svd(dp_a) self.check_types_shapes(dp_u, dp_s, dp_vt, np_u, np_s, np_vt) self.get_tol(dtype) @@ -3034,11 +2939,9 @@ def test_svd(self, dtype, shape): ) @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) - @pytest.mark.parametrize("compute_vt", [True, False], ids=["True", "False"]) + @pytest.mark.parametrize("compute_vt", [True, False]) @pytest.mark.parametrize( - "shape", - [(2, 2), (16, 16)], - ids=["(2, 2)", "(16, 16)"], + "shape", [(2, 2), (16, 16)], ids=["(2, 2)", "(16, 16)"] ) def test_svd_hermitian(self, dtype, compute_vt, shape): # Set seed_value=81 to prevent @@ -3046,18 +2949,18 @@ def test_svd_hermitian(self, dtype, compute_vt, shape): a = generate_random_numpy_array( shape, dtype, hermitian=True, seed_value=81 ) - dp_a = inp.array(a) + dp_a = dpnp.array(a) if compute_vt: np_u, np_s, np_vt = numpy.linalg.svd( a, compute_uv=compute_vt, hermitian=True ) - dp_u, dp_s, dp_vt = inp.linalg.svd( + dp_u, dp_s, dp_vt = dpnp.linalg.svd( dp_a, compute_uv=compute_vt, hermitian=True ) else: np_s = numpy.linalg.svd(a, compute_uv=compute_vt, hermitian=True) - dp_s = inp.linalg.svd(dp_a, compute_uv=compute_vt, hermitian=True) + dp_s = dpnp.linalg.svd(dp_a, compute_uv=compute_vt, hermitian=True) np_u = np_vt = dp_u = dp_vt = None self.check_types_shapes( @@ -3071,15 +2974,15 @@ def test_svd_hermitian(self, dtype, compute_vt, shape): ) def test_svd_errors(self): - a_dp = inp.array([[1, 2], [3, 4]], dtype="float32") + a_dp = dpnp.array([[1, 2], [3, 4]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.svd, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.svd, a_np) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() - assert_raises(inp.linalg.LinAlgError, inp.linalg.svd, a_dp_ndim_1) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.svd, a_dp_ndim_1) # numpy.linalg.svdvals() is available since numpy >= 2.0 @@ -3089,51 +2992,51 @@ class TestSvdvals: @pytest.mark.parametrize( "shape", [(3, 5), (4, 2), (2, 3, 3), (3, 5, 2)], - ids=["(3,5)", "(4,2)", "(2,3,3)", "(3,5,2)"], + ids=["(3, 5)", "(4, 2)", "(2, 3, 3)", "(3, 5, 2)"], ) def test_svdvals(self, dtype, shape): a = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape) - dp_a = inp.array(a) + dp_a = dpnp.array(a) expected = numpy.linalg.svdvals(a) - result = inp.linalg.svdvals(dp_a) + result = dpnp.linalg.svdvals(dp_a) assert_dtype_allclose(result, expected) @pytest.mark.parametrize( "shape", [(0, 0), (1, 0, 0), (0, 2, 2)], - ids=["(0,0)", "(1,0,0)", "(0,2,2)"], + ids=["(0, 0)", "(1, 0, 0)", "(0, 2, 2)"], ) def test_svdvals_empty(self, shape): - a = generate_random_numpy_array(shape, inp.default_float_type()) - dp_a = inp.array(a) + a = generate_random_numpy_array(shape, dpnp.default_float_type()) + dp_a = dpnp.array(a) expected = numpy.linalg.svdvals(a) - result = inp.linalg.svdvals(dp_a) + result = dpnp.linalg.svdvals(dp_a) assert_dtype_allclose(result, expected) def test_svdvals_errors(self): - a_dp = inp.array([[1, 2], [3, 4]], dtype="float32") + a_dp = dpnp.array([[1, 2], [3, 4]], dtype="float32") # unsupported type - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.svdvals, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.svdvals, a_np) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() - assert_raises(inp.linalg.LinAlgError, inp.linalg.svdvals, a_dp_ndim_1) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.svdvals, a_dp_ndim_1) class TestPinv: def get_tol(self, dtype): tol = 1e-06 - if dtype in (inp.float32, inp.complex64): + if dtype in (dpnp.float32, dpnp.complex64): tol = 1e-04 elif not has_support_aspect64() and dtype in ( - inp.int32, - inp.int64, + dpnp.int32, + dpnp.int64, None, ): tol = 1e-04 @@ -3165,10 +3068,10 @@ def test_pinv(self, dtype, shape): # Set seed_value=81 to prevent # random generation of the input singular matrix a = generate_random_numpy_array(shape, dtype, seed_value=81) - a_dp = inp.array(a) + a_dp = dpnp.array(a) B = numpy.linalg.pinv(a) - B_dp = inp.linalg.pinv(a_dp) + B_dp = dpnp.linalg.pinv(a_dp) self.check_types_shapes(B_dp, B) self.get_tol(dtype) @@ -3176,17 +3079,15 @@ def test_pinv(self, dtype, shape): assert_allclose(B_dp, B, rtol=tol, atol=tol) if a.ndim == 2: - reconstructed = inp.dot(a_dp, inp.dot(B_dp, a_dp)) + reconstructed = dpnp.dot(a_dp, dpnp.dot(B_dp, a_dp)) else: # a.ndim > 2 - reconstructed = inp.matmul(a_dp, inp.matmul(B_dp, a_dp)) + reconstructed = dpnp.matmul(a_dp, dpnp.matmul(B_dp, a_dp)) assert_allclose(reconstructed, a_dp, rtol=tol, atol=tol) @pytest.mark.parametrize("dtype", get_float_complex_dtypes()) @pytest.mark.parametrize( - "shape", - [(2, 2), (16, 16)], - ids=["(2, 2)", "(16, 16)"], + "shape", [(2, 2), (16, 16)], ids=["(2, 2)", "(16, 16)"] ) def test_pinv_hermitian(self, dtype, shape): # Set seed_value=81 to prevent @@ -3194,26 +3095,26 @@ def test_pinv_hermitian(self, dtype, shape): a = generate_random_numpy_array( shape, dtype, hermitian=True, seed_value=81 ) - a_dp = inp.array(a) + a_dp = dpnp.array(a) B = numpy.linalg.pinv(a, hermitian=True) - B_dp = inp.linalg.pinv(a_dp, hermitian=True) + B_dp = dpnp.linalg.pinv(a_dp, hermitian=True) self.check_types_shapes(B_dp, B) self.get_tol(dtype) tol = self._tol - reconstructed = inp.dot(inp.dot(a_dp, B_dp), a_dp) + reconstructed = dpnp.dot(dpnp.dot(a_dp, B_dp), a_dp) assert_allclose(reconstructed, a_dp, rtol=tol, atol=tol) # rtol kwarg was added in numpy 2.0 @testing.with_requires("numpy>=2.0") def test_pinv_rtol(self): a = numpy.ones((2, 2)) - a_dp = inp.array(a) + a_dp = dpnp.array(a) expected = numpy.linalg.pinv(a, rtol=1e-15) - result = inp.linalg.pinv(a_dp, rtol=1e-15) + result = dpnp.linalg.pinv(a_dp, rtol=1e-15) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) @@ -3231,60 +3132,60 @@ def test_pinv_rtol(self): ) def test_pinv_empty(self, dtype, shape): a = numpy.empty(shape, dtype=dtype) - a_dp = inp.array(a) + a_dp = dpnp.array(a) B = numpy.linalg.pinv(a) - B_dp = inp.linalg.pinv(a_dp) + B_dp = dpnp.linalg.pinv(a_dp) assert_dtype_allclose(B_dp, B) def test_pinv_strides(self): a = generate_random_numpy_array((5, 5)) - a_dp = inp.array(a) + a_dp = dpnp.array(a) self.get_tol(a_dp.dtype) tol = self._tol # positive strides B = numpy.linalg.pinv(a[::2, ::2]) - B_dp = inp.linalg.pinv(a_dp[::2, ::2]) + B_dp = dpnp.linalg.pinv(a_dp[::2, ::2]) assert_allclose(B_dp, B, rtol=tol, atol=tol) # negative strides B = numpy.linalg.pinv(a[::-2, ::-2]) - B_dp = inp.linalg.pinv(a_dp[::-2, ::-2]) + B_dp = dpnp.linalg.pinv(a_dp[::-2, ::-2]) assert_allclose(B_dp, B, rtol=tol, atol=tol) def test_pinv_errors(self): - a_dp = inp.array([[1, 2], [3, 4]], dtype="float32") + a_dp = dpnp.array([[1, 2], [3, 4]], dtype="float32") # unsupported type `a` - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.pinv, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.pinv, a_np) # unsupported type `rcond` rcond = numpy.array(0.5, dtype="float32") - assert_raises(TypeError, inp.linalg.pinv, a_dp, rcond) - assert_raises(TypeError, inp.linalg.pinv, a_dp, [0.5]) + assert_raises(TypeError, dpnp.linalg.pinv, a_dp, rcond) + assert_raises(TypeError, dpnp.linalg.pinv, a_dp, [0.5]) # non-broadcastable `rcond` - rcond_dp = inp.array([0.5], dtype="float32") - assert_raises(ValueError, inp.linalg.pinv, a_dp, rcond_dp) + rcond_dp = dpnp.array([0.5], dtype="float32") + assert_raises(ValueError, dpnp.linalg.pinv, a_dp, rcond_dp) # a.ndim < 2 a_dp_ndim_1 = a_dp.flatten() - assert_raises(inp.linalg.LinAlgError, inp.linalg.pinv, a_dp_ndim_1) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.pinv, a_dp_ndim_1) # diffetent queue a_queue = dpctl.SyclQueue() rcond_queue = dpctl.SyclQueue() - a_dp_q = inp.array(a_dp, sycl_queue=a_queue) - rcond_dp_q = inp.array([0.5], dtype="float32", sycl_queue=rcond_queue) - assert_raises(ValueError, inp.linalg.pinv, a_dp_q, rcond_dp_q) + a_dp_q = dpnp.array(a_dp, sycl_queue=a_queue) + rcond_dp_q = dpnp.array([0.5], dtype="float32", sycl_queue=rcond_queue) + assert_raises(ValueError, dpnp.linalg.pinv, a_dp_q, rcond_dp_q) # both rcond and rtol are given assert_raises( - ValueError, inp.linalg.pinv, a_dp, rcond=1e-06, rtol=1e-04 + ValueError, dpnp.linalg.pinv, a_dp, rcond=1e-06, rtol=1e-04 ) @@ -3292,39 +3193,33 @@ class TestTensorinv: @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize( "shape, ind", - [ - ((4, 6, 8, 3), 2), - ((24, 8, 3), 1), - ], - ids=[ - "(4, 6, 8, 3)", - "(24, 8, 3)", - ], + [((4, 6, 8, 3), 2), ((24, 8, 3), 1)], + ids=["(4, 6, 8, 3)", "(24, 8, 3)"], ) def test_tensorinv(self, dtype, shape, ind): a = numpy.eye(24, dtype=dtype).reshape(shape) - a_dp = inp.array(a) + a_dp = dpnp.array(a) ainv = numpy.linalg.tensorinv(a, ind=ind) - ainv_dp = inp.linalg.tensorinv(a_dp, ind=ind) + ainv_dp = dpnp.linalg.tensorinv(a_dp, ind=ind) assert ainv.shape == ainv_dp.shape assert_dtype_allclose(ainv_dp, ainv) def test_test_tensorinv_errors(self): - a_dp = inp.eye(24, dtype="float32").reshape(4, 6, 8, 3) + a_dp = dpnp.eye(24, dtype="float32").reshape(4, 6, 8, 3) # unsupported type `a` - a_np = inp.asnumpy(a_dp) - assert_raises(TypeError, inp.linalg.pinv, a_np) + a_np = dpnp.asnumpy(a_dp) + assert_raises(TypeError, dpnp.linalg.pinv, a_np) # unsupported type `ind` - assert_raises(TypeError, inp.linalg.tensorinv, a_dp, 2.0) - assert_raises(TypeError, inp.linalg.tensorinv, a_dp, [2.0]) - assert_raises(ValueError, inp.linalg.tensorinv, a_dp, -1) + assert_raises(TypeError, dpnp.linalg.tensorinv, a_dp, 2.0) + assert_raises(TypeError, dpnp.linalg.tensorinv, a_dp, [2.0]) + assert_raises(ValueError, dpnp.linalg.tensorinv, a_dp, -1) # non-square - assert_raises(inp.linalg.LinAlgError, inp.linalg.tensorinv, a_dp, 1) + assert_raises(dpnp.linalg.LinAlgError, dpnp.linalg.tensorinv, a_dp, 1) class TestTensorsolve: @@ -3332,40 +3227,36 @@ class TestTensorsolve: @pytest.mark.parametrize( "axes", [None, (1,), (2,)], - ids=[ - "None", - "(1,)", - "(2,)", - ], + ids=["None", "(1,)", "(2,)"], ) def test_tensorsolve_axes(self, dtype, axes): a = numpy.eye(12).reshape(12, 3, 4).astype(dtype) b = numpy.ones(a.shape[0], dtype=dtype) - a_dp = inp.array(a) - b_dp = inp.array(b) + a_dp = dpnp.array(a) + b_dp = dpnp.array(b) res_np = numpy.linalg.tensorsolve(a, b, axes=axes) - res_dp = inp.linalg.tensorsolve(a_dp, b_dp, axes=axes) + res_dp = dpnp.linalg.tensorsolve(a_dp, b_dp, axes=axes) assert res_np.shape == res_dp.shape assert_dtype_allclose(res_dp, res_np) def test_tensorsolve_errors(self): - a_dp = inp.eye(24, dtype="float32").reshape(4, 6, 8, 3) - b_dp = inp.ones(a_dp.shape[:2], dtype="float32") + a_dp = dpnp.eye(24, dtype="float32").reshape(4, 6, 8, 3) + b_dp = dpnp.ones(a_dp.shape[:2], dtype="float32") # unsupported type `a` and `b` - a_np = inp.asnumpy(a_dp) - b_np = inp.asnumpy(b_dp) - assert_raises(TypeError, inp.linalg.tensorsolve, a_np, b_dp) - assert_raises(TypeError, inp.linalg.tensorsolve, a_dp, b_np) + a_np = dpnp.asnumpy(a_dp) + b_np = dpnp.asnumpy(b_dp) + assert_raises(TypeError, dpnp.linalg.tensorsolve, a_np, b_dp) + assert_raises(TypeError, dpnp.linalg.tensorsolve, a_dp, b_np) # unsupported type `axes` - assert_raises(TypeError, inp.linalg.tensorsolve, a_dp, 2.0) - assert_raises(TypeError, inp.linalg.tensorsolve, a_dp, -2) + assert_raises(TypeError, dpnp.linalg.tensorsolve, a_dp, 2.0) + assert_raises(TypeError, dpnp.linalg.tensorsolve, a_dp, -2) # incorrect axes assert_raises( - inp.linalg.LinAlgError, inp.linalg.tensorsolve, a_dp, b_dp, (1,) + dpnp.linalg.LinAlgError, dpnp.linalg.tensorsolve, a_dp, b_dp, (1,) ) diff --git a/dpnp/tests/test_mathematical.py b/dpnp/tests/test_mathematical.py index baa9c60310c..7f250afaba8 100644 --- a/dpnp/tests/test_mathematical.py +++ b/dpnp/tests/test_mathematical.py @@ -3599,10 +3599,10 @@ def setup_method(self): numpy.random.seed(42) @pytest.mark.parametrize( - "order_pair", [("C", "C"), ("C", "F"), ("F", "C"), ("F", "F")] + "order1, order2", [("C", "C"), ("C", "F"), ("F", "C"), ("F", "F")] ) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((4,), (4,)), ((1, 4), (4, 1)), @@ -3651,9 +3651,7 @@ def setup_method(self): ((1, 3, 3, 1), (4, 1, 1, 2)), ], ) - def test_matmul(self, order_pair, shape_pair): - order1, order2 = order_pair - shape1, shape2 = shape_pair + def test_matmul(self, order1, order2, shape1, shape2): # input should be float type otherwise they are copied to c-contigous array # so testing order becomes meaningless dtype = dpnp.default_float_type() @@ -3669,10 +3667,10 @@ def test_matmul(self, order_pair, shape_pair): assert_dtype_allclose(result, expected) @pytest.mark.parametrize( - "order_pair", [("C", "C"), ("C", "F"), ("F", "C"), ("F", "F")] + "order1, order2", [("C", "C"), ("C", "F"), ("F", "C"), ("F", "F")] ) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((2, 0), (0, 3)), ((0, 4), (4, 3)), @@ -3695,15 +3693,12 @@ def test_matmul(self, order_pair, shape_pair): ((7, 4, 3), (0, 7, 3, 5)), ], ) - def test_matmul_empty(self, order_pair, shape_pair): - order1, order2 = order_pair - shape1, shape2 = shape_pair + def test_matmul_empty(self, order1, order2, shape1, shape2): dtype = dpnp.default_float_type() a1 = numpy.arange(numpy.prod(shape1), dtype=dtype).reshape(shape1) a2 = numpy.arange(numpy.prod(shape2), dtype=dtype).reshape(shape2) a1 = numpy.array(a1, order=order1) a2 = numpy.array(a2, order=order2) - b1 = dpnp.asarray(a1) b2 = dpnp.asarray(a2) @@ -3712,7 +3707,7 @@ def test_matmul_empty(self, order_pair, shape_pair): assert_dtype_allclose(result, expected) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((2, 4), (4, 3)), ((4, 2, 3), (4, 3, 5)), @@ -3724,15 +3719,10 @@ def test_matmul_empty(self, order_pair, shape_pair): "((6, 7, 4, 3), (6, 7, 3, 5))", ], ) - def test_matmul_bool(self, shape_pair): - shape1, shape2 = shape_pair - a1 = numpy.resize( - numpy.arange(2, dtype=numpy.bool_), numpy.prod(shape1) - ).reshape(shape1) - a2 = numpy.resize( - numpy.arange(2, dtype=numpy.bool_), numpy.prod(shape2) - ).reshape(shape2) - + def test_matmul_bool(self, shape1, shape2): + x = numpy.arange(2, dtype=numpy.bool_) + a1 = numpy.resize(x, numpy.prod(shape1)).reshape(shape1) + a2 = numpy.resize(x, numpy.prod(shape2)).reshape(shape2) b1 = dpnp.asarray(a1) b2 = dpnp.asarray(a2) @@ -3742,7 +3732,7 @@ def test_matmul_bool(self, shape_pair): @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((2, 4), (4, 3)), ((4, 2, 3), (4, 3, 5)), @@ -3754,11 +3744,9 @@ def test_matmul_bool(self, shape_pair): "((6, 7, 4, 3), (6, 7, 3, 5))", ], ) - def test_matmul_dtype(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_matmul_dtype(self, dtype, shape1, shape2): a1 = numpy.arange(numpy.prod(shape1)).reshape(shape1) a2 = numpy.arange(numpy.prod(shape2)).reshape(shape2) - b1 = dpnp.asarray(a1) b2 = dpnp.asarray(a2) @@ -3892,7 +3880,7 @@ def test_matmul_axes_out_1D(self, axes, b_shape, out_shape): "dtype2", get_all_dtypes(no_bool=True, no_none=True) ) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((2, 4), (4, 3)), ((4, 2, 3), (4, 3, 5)), @@ -3904,11 +3892,9 @@ def test_matmul_axes_out_1D(self, axes, b_shape, out_shape): "((6, 7, 4, 3), (6, 7, 3, 5))", ], ) - def test_matmul_dtype_matrix_inout(self, dtype1, dtype2, shape_pair): - shape1, shape2 = shape_pair + def test_matmul_dtype_matrix_inout(self, dtype1, dtype2, shape1, shape2): a1 = numpy.arange(numpy.prod(shape1), dtype=dtype1).reshape(shape1) a2 = numpy.arange(numpy.prod(shape2), dtype=dtype1).reshape(shape2) - b1 = dpnp.asarray(a1) b2 = dpnp.asarray(a2) @@ -3923,7 +3909,7 @@ def test_matmul_dtype_matrix_inout(self, dtype1, dtype2, shape_pair): @pytest.mark.parametrize("dtype1", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize("dtype2", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((2, 4), (4, 3)), ((4, 2, 3), (4, 3, 5)), @@ -3935,11 +3921,9 @@ def test_matmul_dtype_matrix_inout(self, dtype1, dtype2, shape_pair): "((6, 7, 4, 3), (6, 7, 3, 5))", ], ) - def test_matmul_dtype_matrix_inputs(self, dtype1, dtype2, shape_pair): - shape1, shape2 = shape_pair + def test_matmul_dtype_matrix_inputs(self, dtype1, dtype2, shape1, shape2): a1 = numpy.arange(numpy.prod(shape1), dtype=dtype1).reshape(shape1) a2 = numpy.arange(numpy.prod(shape2), dtype=dtype2).reshape(shape2) - b1 = dpnp.asarray(a1) b2 = dpnp.asarray(a2) @@ -3951,7 +3935,7 @@ def test_matmul_dtype_matrix_inputs(self, dtype1, dtype2, shape_pair): @pytest.mark.parametrize("order2", ["C", "F", "A"]) @pytest.mark.parametrize("order", ["C", "F", "K", "A"]) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((2, 4), (4, 3)), ((4, 2, 3), (4, 3, 5)), @@ -3963,21 +3947,20 @@ def test_matmul_dtype_matrix_inputs(self, dtype1, dtype2, shape_pair): "((6, 7, 4, 3), (6, 7, 3, 5))", ], ) - def test_matmul_order(self, order1, order2, order, shape_pair): - shape1, shape2 = shape_pair + def test_matmul_order(self, order1, order2, order, shape1, shape2): a1 = numpy.arange(numpy.prod(shape1)).reshape(shape1, order=order1) a2 = numpy.arange(numpy.prod(shape2)).reshape(shape2, order=order2) - b1 = dpnp.asarray(a1) b2 = dpnp.asarray(a2) result = dpnp.matmul(b1, b2, order=order) expected = numpy.matmul(a1, a2, order=order) - # For the special case of shape_pair == ((6, 7, 4, 3), (6, 7, 3, 5)) - # and order1 == "F" and order2 == "F", NumPy result is not c-contiguous + # For the special case of shape1 = (6, 7, 4, 3), shape2 = (6, 7, 3, 5) + # and order1 = "F" and order2 = "F", NumPy result is not c-contiguous # nor f-contiguous, while dpnp (and cupy) results are c-contiguous if not ( - shape_pair == ((6, 7, 4, 3), (6, 7, 3, 5)) + shape1 == (6, 7, 4, 3) + and shape2 == (6, 7, 3, 5) and order1 == "F" and order2 == "F" and order == "K" @@ -4253,15 +4236,14 @@ def test_matmul_out_0D(self, out_shape): @testing.slow @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((5000, 5000, 2, 2), (5000, 5000, 2, 2)), ((2, 2), (5000, 5000, 2, 2)), ((5000, 5000, 2, 2), (2, 2)), ], ) - def test_matmul_large(self, shape_pair): - shape1, shape2 = shape_pair + def test_matmul_large(self, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array(numpy.random.uniform(-5, 5, size1)).reshape(shape1) diff --git a/dpnp/tests/test_product.py b/dpnp/tests/test_product.py index 27437a0a29b..9778240c91c 100644 --- a/dpnp/tests/test_product.py +++ b/dpnp/tests/test_product.py @@ -26,10 +26,10 @@ class TestCross: def setup_method(self): numpy.random.seed(42) - @pytest.mark.parametrize("axis", [None, 0], ids=["None", "0"]) - @pytest.mark.parametrize("axisc", [-1, 0], ids=["-1", "0"]) - @pytest.mark.parametrize("axisb", [-1, 0], ids=["-1", "0"]) - @pytest.mark.parametrize("axisa", [-1, 0], ids=["-1", "0"]) + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("axisc", [-1, 0]) + @pytest.mark.parametrize("axisb", [-1, 0]) + @pytest.mark.parametrize("axisa", [-1, 0]) @pytest.mark.parametrize( "x1", [[1, 2, 3], [1.0, 2.5, 6.0], [2, 4, 6]], @@ -40,7 +40,7 @@ def setup_method(self): [[4, 5, 6], [1.0, 5.0, 2.0], [6, 4, 3]], ids=["[4, 5, 6]", "[1., 5., 2.]", "[6, 4, 3]"], ) - def test_cross_3x3(self, x1, x2, axisa, axisb, axisc, axis): + def test_3x3(self, x1, x2, axisa, axisb, axisc, axis): np_x1 = numpy.array(x1) dpnp_x1 = dpnp.array(x1) @@ -65,7 +65,7 @@ def test_cross_3x3(self, x1, x2, axisa, axisb, axisc, axis): ((2, 3, 4, 5), (2, 4, 3, 5), -3, -2, 0), ], ) - def test_cross(self, dtype, shape1, shape2, axis_a, axis_b, axis_c): + def test_basic(self, dtype, shape1, shape2, axis_a, axis_b, axis_c): a = numpy.array( numpy.random.uniform(-5, 5, numpy.prod(shape1)), dtype=dtype ).reshape(shape1) @@ -91,7 +91,7 @@ def test_cross(self, dtype, shape1, shape2, axis_a, axis_b, axis_c): ((2, 3, 4, 5), (2, 4, 3, 5), -3, -2, 0), ], ) - def test_cross_complex(self, dtype, shape1, shape2, axis_a, axis_b, axis_c): + def test_complex(self, dtype, shape1, shape2, axis_a, axis_b, axis_c): x11 = numpy.random.uniform(-5, 5, numpy.prod(shape1)) x12 = numpy.random.uniform(-5, 5, numpy.prod(shape1)) x21 = numpy.random.uniform(-5, 5, numpy.prod(shape2)) @@ -114,7 +114,7 @@ def test_cross_complex(self, dtype, shape1, shape2, axis_a, axis_b, axis_c): ((2, 3, 4, 5), (2, 3, 4, 5), 1), ], ) - def test_cross_axis(self, dtype, shape1, shape2, axis): + def test_axis(self, dtype, shape1, shape2, axis): a = numpy.array( numpy.random.uniform(-5, 5, numpy.prod(shape1)), dtype=dtype ).reshape(shape1) @@ -130,7 +130,7 @@ def test_cross_axis(self, dtype, shape1, shape2, axis): @pytest.mark.parametrize("dtype1", get_all_dtypes()) @pytest.mark.parametrize("dtype2", get_all_dtypes()) - def test_cross_input_dtype_matrix(self, dtype1, dtype2): + def test_input_dtype_matrix(self, dtype1, dtype2): if dtype1 == dpnp.bool and dtype2 == dpnp.bool: pytest.skip("boolean input arrays is not supported.") a = numpy.array(numpy.random.uniform(-5, 5, 3), dtype=dtype1) @@ -156,9 +156,7 @@ def test_cross_input_dtype_matrix(self, dtype1, dtype2): ((2, 3, 4, 5), (1, 1, 3, 1), -3, -2, 0), ], ) - def test_cross_broadcast( - self, dtype, shape1, shape2, axis_a, axis_b, axis_c - ): + def test_broadcast(self, dtype, shape1, shape2, axis_a, axis_b, axis_c): a = numpy.array( numpy.random.uniform(-5, 5, numpy.prod(shape1)), dtype=dtype ).reshape(shape1) @@ -174,7 +172,7 @@ def test_cross_broadcast( @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize("stride", [3, -3]) - def test_cross_strided(self, dtype, stride): + def test_strided(self, dtype, stride): a = numpy.arange(1, 10, dtype=dtype) b = numpy.arange(1, 10, dtype=dtype) ia = dpnp.array(a) @@ -186,7 +184,7 @@ def test_cross_strided(self, dtype, stride): @testing.with_requires("numpy>=2.0") @pytest.mark.parametrize("axis", [0, 1, -1]) - def test_linalg_cross(self, axis): + def test_linalg(self, axis): a = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = numpy.array([[7, 8, 9], [4, 5, 6], [1, 2, 3]]) ia = dpnp.array(a) @@ -196,7 +194,7 @@ def test_linalg_cross(self, axis): expected = numpy.linalg.cross(a, b, axis=axis) assert_dtype_allclose(result, expected) - def test_cross_error(self): + def test_error(self): a = dpnp.arange(3) b = dpnp.arange(4) # Incompatible vector dimensions @@ -215,7 +213,7 @@ def test_cross_error(self): dpnp.cross(a, a) @testing.with_requires("numpy>=2.0") - def test_linalg_cross_error(self): + def test_linalg_error(self): a = dpnp.arange(4) b = dpnp.arange(4) # Both input arrays must be (arrays of) 3-dimensional vectors @@ -228,7 +226,7 @@ def setup_method(self): numpy.random.seed(42) @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_dot_ones(self, dtype): + def test_ones(self, dtype): n = 10**5 a = numpy.ones(n, dtype=dtype) b = numpy.ones(n, dtype=dtype) @@ -240,7 +238,7 @@ def test_dot_ones(self, dtype): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - def test_dot_arange(self, dtype): + def test_arange(self, dtype): n = 10**2 m = 10**3 if dtype is not dpnp.float32 else 10**2 a = numpy.hstack((numpy.arange(n, dtype=dtype),) * m) @@ -253,7 +251,7 @@ def test_dot_arange(self, dtype): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_dot_scalar(self, dtype): + def test_scalar(self, dtype): a = 2 b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype) ib = dpnp.array(b) @@ -268,7 +266,7 @@ def test_dot_scalar(self, dtype): @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((), (10,)), ((10,), ()), @@ -294,8 +292,7 @@ def test_dot_scalar(self, dtype): "3d_3d", ], ) - def test_dot(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_basic(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array( @@ -313,7 +310,7 @@ def test_dot(self, dtype, shape_pair): @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((), (10,)), ((10,), ()), @@ -337,8 +334,7 @@ def test_dot(self, dtype, shape_pair): "3d_3d", ], ) - def test_dot_complex(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_complex(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) x11 = numpy.random.uniform(-5, 5, size1) @@ -356,7 +352,7 @@ def test_dot_complex(self, dtype, shape_pair): @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((), (10,)), ((10,), ()), @@ -380,8 +376,7 @@ def test_dot_complex(self, dtype, shape_pair): "3d_3d", ], ) - def test_dot_ndarray(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_ndarray(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array( @@ -398,10 +393,8 @@ def test_dot_ndarray(self, dtype, shape_pair): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - @pytest.mark.parametrize( - "stride", [3, -1, -2, -5], ids=["3", "-1", "-2", "-5"] - ) - def test_dot_strided(self, dtype, stride): + @pytest.mark.parametrize("stride", [3, -1, -2, -5]) + def test_strided(self, dtype, stride): a = numpy.arange(25, dtype=dtype) b = numpy.arange(25, dtype=dtype) ia = dpnp.array(a) @@ -412,15 +405,13 @@ def test_dot_strided(self, dtype, stride): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - def test_dot_out_scalar(self, dtype): - size = 10 + def test_out_scalar(self, dtype): a = 2 - b = numpy.array(numpy.random.uniform(-5, 5, size), dtype=dtype) - ia = 2 + b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype) ib = dpnp.array(b) - dp_out = dpnp.empty((size,), dtype=dtype) - result = dpnp.dot(ia, ib, out=dp_out) + dp_out = dpnp.empty((10,), dtype=dtype) + result = dpnp.dot(a, ib, out=dp_out) expected = numpy.dot(a, b) assert result is dp_out @@ -428,7 +419,7 @@ def test_dot_out_scalar(self, dtype): @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2, out_shape", [ ((), (10,), (10,)), ((10,), (), (10,)), @@ -454,8 +445,7 @@ def test_dot_out_scalar(self, dtype): "3d_3d", ], ) - def test_dot_out(self, dtype, shape_pair): - shape1, shape2, out_shape = shape_pair + def test_out(self, dtype, shape1, shape2, out_shape): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array( @@ -476,7 +466,7 @@ def test_dot_out(self, dtype, shape_pair): @pytest.mark.parametrize("dtype1", get_all_dtypes()) @pytest.mark.parametrize("dtype2", get_all_dtypes()) - def test_dot_input_dtype_matrix(self, dtype1, dtype2): + def test_input_dtype_matrix(self, dtype1, dtype2): a = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype1) b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype2) ia = dpnp.array(a) @@ -486,14 +476,14 @@ def test_dot_input_dtype_matrix(self, dtype1, dtype2): expected = numpy.dot(a, b) assert_dtype_allclose(result, expected) - def test_dot_1d_error(self): + def test_1d_error(self): a = dpnp.ones(25) b = dpnp.ones(24) # size of input arrays differ with pytest.raises(ValueError): dpnp.dot(a, b) - def test_dot_sycl_queue_error(self): + def test_sycl_queue_error(self): a = dpnp.ones((5,), sycl_queue=dpctl.SyclQueue()) b = dpnp.ones((5,), sycl_queue=dpctl.SyclQueue()) with pytest.raises(ValueError): @@ -506,7 +496,7 @@ def test_dot_sycl_queue_error(self): dpnp.dot(a, b, out=out) @pytest.mark.parametrize("ia", [1, dpnp.ones((), dtype=dpnp.float32)]) - def test_dot_out_error_scalar(self, ia): + def test_out_error_scalar(self, ia): a = ia if dpnp.isscalar(ia) else ia.asnumpy() ib = dpnp.ones(10, dtype=dpnp.float32) b = ib.asnumpy() @@ -524,7 +514,7 @@ def test_dot_out_error_scalar(self, ia): assert_raises(ValueError, numpy.dot, a, b, out=out) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2, out_shape", [ ((10,), (10,), ()), ((3, 4), (4, 2), (3, 2)), @@ -535,16 +525,15 @@ def test_dot_out_error_scalar(self, ia): ], ids=["1d_1d", "2d_2d", "2d_1d", "3d_1d", "1d_3d", "3d_3d"], ) - def test_dot_out_error(self, shape_pair): - shape1, shape2, shape_out = shape_pair + def test_out_error(self, shape1, shape2, out_shape): a = numpy.ones(shape1, dtype=numpy.int32) b = numpy.ones(shape2, dtype=numpy.int32) ia = dpnp.array(a) ib = dpnp.array(b) # output data type is incorrect - np_out = numpy.empty(shape_out, dtype=numpy.int64) - dp_out = dpnp.empty(shape_out, dtype=dpnp.int64) + np_out = numpy.empty(out_shape, dtype=numpy.int64) + dp_out = dpnp.empty(out_shape, dtype=dpnp.int64) with pytest.raises(TypeError): dpnp.dot(ia, ib, out=dp_out) with pytest.raises(ValueError): @@ -559,10 +548,10 @@ def test_dot_out_error(self, shape_pair): numpy.dot(a, b, out=np_out) # "F" or "C" is irrelevant for 0d or 1d arrays - if not (len(shape_out) in [0, 1]): + if not (len(out_shape) in [0, 1]): # output should be C-contiguous - np_out = numpy.empty(shape_out, dtype=numpy.int32, order="F") - dp_out = dpnp.empty(shape_out, dtype=dpnp.int32, order="F") + np_out = numpy.empty(out_shape, dtype=numpy.int32, order="F") + dp_out = dpnp.empty(out_shape, dtype=dpnp.int32, order="F") with pytest.raises(ValueError): dpnp.dot(ia, ib, out=dp_out) with pytest.raises(ValueError): @@ -574,7 +563,7 @@ def setup_method(self): numpy.random.seed(42) @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_inner_scalar(self, dtype): + def test_scalar(self, dtype): a = 2 b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype) ib = dpnp.array(b) @@ -598,7 +587,7 @@ def test_inner_scalar(self, dtype): ((5,), ()), ], ) - def test_inner(self, dtype, shape1, shape2): + def test_basic(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array( @@ -625,7 +614,7 @@ def test_inner(self, dtype, shape1, shape2): ((5,), ()), ], ) - def test_inner_complex(self, dtype, shape1, shape2): + def test_complex(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) x11 = numpy.random.uniform(-5, 5, size1) @@ -643,7 +632,7 @@ def test_inner_complex(self, dtype, shape1, shape2): @pytest.mark.parametrize("dtype1", get_all_dtypes()) @pytest.mark.parametrize("dtype2", get_all_dtypes()) - def test_inner_input_dtype_matrix(self, dtype1, dtype2): + def test_input_dtype_matrix(self, dtype1, dtype2): a = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype1) b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype2) ia = dpnp.array(a) @@ -654,10 +643,8 @@ def test_inner_input_dtype_matrix(self, dtype1, dtype2): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - @pytest.mark.parametrize( - "stride", [3, -1, -2, -4], ids=["3", "-1", "-2", "-4"] - ) - def test_inner_strided(self, dtype, stride): + @pytest.mark.parametrize("stride", [3, -1, -2, -4]) + def test_strided(self, dtype, stride): a = numpy.arange(20, dtype=dtype) b = numpy.arange(20, dtype=dtype) ia = dpnp.array(a) @@ -667,7 +654,7 @@ def test_inner_strided(self, dtype, stride): expected = numpy.inner(a[::stride], b[::stride]) assert_dtype_allclose(result, expected) - def test_inner_error(self): + def test_error(self): a = dpnp.arange(24) b = dpnp.arange(23) # shape of input arrays is not similar at the last axis @@ -677,7 +664,7 @@ def test_inner_error(self): class TestKron: @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_kron_scalar(self, dtype): + def test_scalar(self, dtype): a = 2 b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype) ib = dpnp.array(b) @@ -704,7 +691,7 @@ def test_kron_scalar(self, dtype): ((5,), ()), ], ) - def test_kron(self, dtype, shape1, shape2): + def test_basic(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array( @@ -734,7 +721,7 @@ def test_kron(self, dtype, shape1, shape2): ((5,), ()), ], ) - def test_kron(self, dtype, shape1, shape2): + def test_complex(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) x11 = numpy.random.uniform(-5, 5, size1) @@ -752,7 +739,7 @@ def test_kron(self, dtype, shape1, shape2): @pytest.mark.parametrize("dtype1", get_all_dtypes()) @pytest.mark.parametrize("dtype2", get_all_dtypes()) - def test_kron_input_dtype_matrix(self, dtype1, dtype2): + def test_input_dtype_matrix(self, dtype1, dtype2): a = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype1) b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype2) ia = dpnp.array(a) @@ -763,10 +750,8 @@ def test_kron_input_dtype_matrix(self, dtype1, dtype2): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - @pytest.mark.parametrize( - "stride", [3, -1, -2, -4], ids=["3", "-1", "-2", "-4"] - ) - def test_kron_strided1(self, dtype, stride): + @pytest.mark.parametrize("stride", [3, -1, -2, -4]) + def test_strided1(self, dtype, stride): a = numpy.arange(20, dtype=dtype) b = numpy.arange(20, dtype=dtype) ia = dpnp.array(a) @@ -776,8 +761,8 @@ def test_kron_strided1(self, dtype, stride): expected = numpy.kron(a[::stride], b[::stride]) assert_dtype_allclose(result, expected) - @pytest.mark.parametrize("stride", [2, -1, -2], ids=["2", "-1", "-2"]) - def test_kron_strided2(self, stride): + @pytest.mark.parametrize("stride", [2, -1, -2]) + def test_strided2(self, stride): a = numpy.arange(48).reshape(6, 8) b = numpy.arange(480).reshape(6, 8, 10) ia = dpnp.array(a) @@ -792,7 +777,7 @@ def test_kron_strided2(self, stride): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("order", ["C", "F", "A"]) - def test_kron_order(self, order): + def test_order(self, order): a = numpy.arange(48).reshape(6, 8, order=order) b = numpy.arange(480).reshape(6, 8, 10, order=order) ia = dpnp.array(a) @@ -800,8 +785,8 @@ def test_kron_order(self, order): result = dpnp.kron(ia, ib) expected = numpy.kron(a, b) - assert result.flags["C_CONTIGUOUS"] == expected.flags["C_CONTIGUOUS"] - assert result.flags["F_CONTIGUOUS"] == expected.flags["F_CONTIGUOUS"] + assert result.flags.c_contiguous == expected.flags.c_contiguous + assert result.flags.f_contiguous == expected.flags.f_contiguous assert_dtype_allclose(result, expected) @@ -809,23 +794,6 @@ class TestMultiDot: def setup_method(self): numpy.random.seed(70) - @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - def test_multi_dot_all_2d(self, dtype): - n = 16 - a = dpnp.reshape(dpnp.arange(n, dtype=dtype), (4, 4)) - b = dpnp.reshape(dpnp.arange(n, dtype=dtype), (4, 4)) - c = dpnp.reshape(dpnp.arange(n, dtype=dtype), (4, 4)) - d = dpnp.reshape(dpnp.arange(n, dtype=dtype), (4, 4)) - - a1 = numpy.arange(n, dtype=dtype).reshape((4, 4)) - b1 = numpy.arange(n, dtype=dtype).reshape((4, 4)) - c1 = numpy.arange(n, dtype=dtype).reshape((4, 4)) - d1 = numpy.arange(n, dtype=dtype).reshape((4, 4)) - - result = dpnp.linalg.multi_dot([a, b, c, d]) - expected = numpy.linalg.multi_dot([a1, b1, c1, d1]) - assert_dtype_allclose(result, expected) - @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) @pytest.mark.parametrize( "shapes", @@ -856,7 +824,7 @@ def test_multi_dot_all_2d(self, dtype): "five_arrays", ], ) - def test_multi_dot(self, shapes, dtype): + def test_basic(self, shapes, dtype): numpy_array_list = [] dpnp_array_list = [] for shape in shapes: @@ -902,7 +870,7 @@ def test_multi_dot(self, shapes, dtype): "five_arrays", ], ) - def test_multi_dot_complex(self, shapes, dtype): + def test_complex(self, shapes, dtype): numpy_array_list = [] dpnp_array_list = [] for shape in shapes: @@ -948,7 +916,7 @@ def test_multi_dot_complex(self, shapes, dtype): "five_arrays", ], ) - def test_multi_dot_out(self, shapes, dtype): + def test_out(self, shapes, dtype): numpy_array_list = [] dpnp_array_list = [] for shape in shapes[:-1]: @@ -971,7 +939,7 @@ def test_multi_dot_out(self, shapes, dtype): [(-2, -2), (2, 2), (-2, 2), (2, -2)], ids=["(-2, -2)", "(2, 2)", "(-2, 2)", "(2, -2)"], ) - def test_multi_dot_strided(self, stride): + def test_strided(self, stride): numpy_array_list = [] dpnp_array_list = [] for num_array in [2, 3, 4, 5]: # number of arrays in multi_dot @@ -993,7 +961,7 @@ def test_multi_dot_strided(self, stride): expected = numpy.linalg.multi_dot(numpy_array_list) assert_dtype_allclose(result, expected) - def test_multi_dot_error(self): + def test_error(self): a = dpnp.ones(25) # Expecting at least two arrays with pytest.raises(ValueError): @@ -1034,7 +1002,7 @@ def setup_method(self): numpy.random.seed(87) @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_tensordot_scalar(self, dtype): + def test_scalar(self, dtype): a = 2 b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype) ib = dpnp.array(b) @@ -1049,7 +1017,7 @@ def test_tensordot_scalar(self, dtype): @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) @pytest.mark.parametrize("axes", [0, 1, 2]) - def test_tensordot(self, dtype, axes): + def test_basic(self, dtype, axes): a = numpy.array(numpy.random.uniform(-10, 10, 64), dtype=dtype).reshape( 4, 4, 4 ) @@ -1065,7 +1033,7 @@ def test_tensordot(self, dtype, axes): @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize("axes", [0, 1, 2]) - def test_tensordot_complex(self, dtype, axes): + def test_complex(self, dtype, axes): x11 = numpy.random.uniform(-10, 10, 64) x12 = numpy.random.uniform(-10, 10, 64) x21 = numpy.random.uniform(-10, 10, 64) @@ -1090,7 +1058,7 @@ def test_tensordot_complex(self, dtype, axes): ((3, 1), (0, 2)), ], ) - def test_tensordot_axes(self, dtype, axes): + def test_axes(self, dtype, axes): a = numpy.array( numpy.random.uniform(-10, 10, 120), dtype=dtype ).reshape(2, 5, 3, 4) @@ -1106,7 +1074,7 @@ def test_tensordot_axes(self, dtype, axes): @pytest.mark.parametrize("dtype1", get_all_dtypes()) @pytest.mark.parametrize("dtype2", get_all_dtypes()) - def test_tensordot_input_dtype_matrix(self, dtype1, dtype2): + def test_input_dtype_matrix(self, dtype1, dtype2): a = numpy.array( numpy.random.uniform(-10, 10, 60), dtype=dtype1 ).reshape(3, 4, 5) @@ -1125,7 +1093,7 @@ def test_tensordot_input_dtype_matrix(self, dtype1, dtype2): [(-2, -2, -2, -2), (2, 2, 2, 2), (-2, 2, -2, 2), (2, -2, 2, -2)], ids=["-2", "2", "(-2, 2)", "(2, -2)"], ) - def test_tensordot_strided(self, stride): + def test_strided(self, stride): for dim in [1, 2, 3, 4]: axes = 1 if dim == 1 else 2 A = numpy.random.rand(*([20] * dim)) @@ -1143,7 +1111,7 @@ def test_tensordot_strided(self, stride): "axes", [([0, 1]), ([0, 1], [1, 2]), ([-2, -3], [3, 2])], ) - def test_linalg_tensordot(self, axes): + def test_linalg(self, axes): a = numpy.array(numpy.random.uniform(-10, 10, 120)).reshape(2, 5, 3, 4) b = numpy.array(numpy.random.uniform(-10, 10, 120)).reshape(4, 2, 5, 3) ia = dpnp.array(a) @@ -1153,7 +1121,7 @@ def test_linalg_tensordot(self, axes): expected = numpy.linalg.tensordot(a, b, axes=axes) assert_dtype_allclose(result, expected) - def test_tensordot_error(self): + def test_error(self): a = 5 b = 2 # both inputs are scalar @@ -1196,7 +1164,7 @@ def setup_method(self): numpy.random.seed(42) @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_vdot_scalar(self, dtype): + def test_scalar(self, dtype): a = numpy.array([3.5], dtype=dtype) ia = dpnp.array(a) b = 2 + 3j @@ -1211,7 +1179,7 @@ def test_vdot_scalar(self, dtype): @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((), ()), ((10,), (10,)), @@ -1231,8 +1199,7 @@ def test_vdot_scalar(self, dtype): "3d_3d", ], ) - def test_vdot(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_basic(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array( @@ -1250,7 +1217,7 @@ def test_vdot(self, dtype, shape_pair): @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((), ()), ((10,), (10,)), @@ -1270,8 +1237,7 @@ def test_vdot(self, dtype, shape_pair): "3d_3d", ], ) - def test_vdot_complex(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_vdot_complex(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) x11 = numpy.random.uniform(-5, 5, size1) @@ -1288,10 +1254,8 @@ def test_vdot_complex(self, dtype, shape_pair): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) - @pytest.mark.parametrize( - "stride", [3, -1, -2, -4], ids=["3", "-1", "-2", "-4"] - ) - def test_vdot_strided(self, dtype, stride): + @pytest.mark.parametrize("stride", [3, -1, -2, -4]) + def test_strided(self, dtype, stride): a = numpy.arange(25, dtype=dtype) b = numpy.arange(25, dtype=dtype) ia = dpnp.array(a) @@ -1303,7 +1267,7 @@ def test_vdot_strided(self, dtype, stride): @pytest.mark.parametrize("dtype1", get_all_dtypes()) @pytest.mark.parametrize("dtype2", get_all_dtypes()) - def test_vdot_input_dtype_matrix(self, dtype1, dtype2): + def test_input_dtype_matrix(self, dtype1, dtype2): a = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype1) b = numpy.array(numpy.random.uniform(-5, 5, 10), dtype=dtype2) ia = dpnp.array(a) @@ -1313,7 +1277,7 @@ def test_vdot_input_dtype_matrix(self, dtype1, dtype2): expected = numpy.vdot(a, b) assert_dtype_allclose(result, expected) - def test_vdot_error(self): + def test_error(self): a = dpnp.ones(25) b = dpnp.ones(24) # size of input arrays differ @@ -1340,7 +1304,7 @@ def setup_method(self): "dtype", get_all_dtypes(no_none=True, no_complex=True) ) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((4,), (4,)), # call_flag: dot ((1, 1, 4), (1, 1, 4)), # call_flag: dot @@ -1357,8 +1321,7 @@ def setup_method(self): ((1, 4, 5), (1, 3, 1, 5)), ], ) - def test_basic(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_basic(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) x1 = numpy.random.uniform(-5, 5, size1) @@ -1374,7 +1337,7 @@ def test_basic(self, dtype, shape_pair): @pytest.mark.parametrize("dtype", get_complex_dtypes()) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((4,), (4,)), # call_flag: dot ((1, 1, 4), (1, 1, 4)), # call_flag: dot @@ -1391,8 +1354,7 @@ def test_basic(self, dtype, shape_pair): ((1, 4, 5), (1, 3, 1, 5)), ], ) - def test_complex(self, dtype, shape_pair): - shape1, shape2 = shape_pair + def test_complex(self, dtype, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) x11 = numpy.random.uniform(-5, 5, size1) @@ -1410,11 +1372,10 @@ def test_complex(self, dtype, shape_pair): @pytest.mark.parametrize("axis", [0, 2, -2]) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [((4,), (4, 4, 4)), ((3, 4, 5), (3, 4, 5))], ) - def test_axis1(self, axis, shape_pair): - shape1, shape2 = shape_pair + def test_axis1(self, axis, shape1, shape2): size1 = numpy.prod(shape1, dtype=int) size2 = numpy.prod(shape2, dtype=int) a = numpy.array(numpy.random.uniform(-5, 5, size1)).reshape(shape1) @@ -1579,7 +1540,7 @@ def test_out_order(self, order1, order2, out_order): @pytest.mark.parametrize("dtype1", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("dtype2", get_all_dtypes(no_none=True)) @pytest.mark.parametrize( - "shape_pair", + "shape1, shape2", [ ((4,), ()), ((1, 1, 4), (1, 1)), @@ -1588,8 +1549,7 @@ def test_out_order(self, order1, order2, out_order): ((3, 0, 4), (3, 0)), # zero-size output ], ) - def test_out_dtype(self, dtype1, dtype2, shape_pair): - shape1, shape2 = shape_pair + def test_out_dtype(self, dtype1, dtype2, shape1, shape2): a = numpy.ones(shape1, dtype=dtype1) b = dpnp.asarray(a) @@ -1626,7 +1586,7 @@ def test_out_0D(self, out_shape): assert_dtype_allclose(result, expected) @pytest.mark.parametrize("axis", [0, 1, 2, -1, -2, -3]) - def test_linalg_vecdot(self, axis): + def test_linalg(self, axis): x11 = numpy.random.uniform(-5, 5, 4) x12 = numpy.random.uniform(-5, 5, 4) x21 = numpy.random.uniform(-5, 5, 64) diff --git a/dpnp/tests/test_sort.py b/dpnp/tests/test_sort.py index 4735e1747de..941910648f4 100644 --- a/dpnp/tests/test_sort.py +++ b/dpnp/tests/test_sort.py @@ -16,14 +16,16 @@ class TestArgsort: @pytest.mark.parametrize("kind", [None, "stable", "mergesort", "radixsort"]) - @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_none=True, no_complex=True) + ) def test_basic(self, kind, dtype): a = numpy.random.uniform(-5, 5, 10) - np_array = numpy.array(a, dtype=dtype) - dp_array = dpnp.array(np_array) + a = numpy.array(a, dtype=dtype) + ia = dpnp.array(a) - result = dpnp.argsort(dp_array, kind=kind) - expected = numpy.argsort(np_array, kind="stable") + result = dpnp.argsort(ia, kind=kind) + expected = numpy.argsort(a, kind="stable") assert_dtype_allclose(result, expected) @pytest.mark.parametrize("kind", [None, "stable", "mergesort", "radixsort"]) @@ -31,24 +33,24 @@ def test_basic(self, kind, dtype): def test_complex(self, kind, dtype): a = numpy.random.uniform(-5, 5, 10) b = numpy.random.uniform(-5, 5, 10) - np_array = numpy.array(a + b * 1j, dtype=dtype) - dp_array = dpnp.array(np_array) + a = numpy.array(a + b * 1j, dtype=dtype) + ia = dpnp.array(a) if kind == "radixsort": - assert_raises(ValueError, dpnp.argsort, dp_array, kind=kind) + assert_raises(ValueError, dpnp.argsort, ia, kind=kind) else: - result = dpnp.argsort(dp_array, kind=kind) - expected = numpy.argsort(np_array) + result = dpnp.argsort(ia, kind=kind) + expected = numpy.argsort(a) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("axis", [None, -2, -1, 0, 1, 2]) def test_axis(self, axis): a = numpy.random.uniform(-10, 10, 36) - np_array = numpy.array(a).reshape(3, 4, 3) - dp_array = dpnp.array(np_array) + a = numpy.array(a).reshape(3, 4, 3) + ia = dpnp.array(a) - result = dpnp.argsort(dp_array, axis=axis) - expected = numpy.argsort(np_array, axis=axis) + result = dpnp.argsort(ia, axis=axis) + expected = numpy.argsort(a, axis=axis) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes()) @@ -60,45 +62,45 @@ def test_ndarray(self, dtype, axis): ) else: a = numpy.random.uniform(-10, 10, 12) - np_array = numpy.array(a, dtype=dtype).reshape(6, 2) - dp_array = dpnp.array(np_array) + a = numpy.array(a, dtype=dtype).reshape(6, 2) + ia = dpnp.array(a) - result = dp_array.argsort(axis=axis) - expected = np_array.argsort(axis=axis) + result = ia.argsort(axis=axis) + expected = a.argsort(axis=axis) assert_dtype_allclose(result, expected) # this test validates that all different options of kind in dpnp are stable @pytest.mark.parametrize("kind", [None, "stable", "mergesort", "radixsort"]) def test_kind(self, kind): - np_array = numpy.repeat(numpy.arange(10), 10) - dp_array = dpnp.array(np_array) + a = numpy.repeat(numpy.arange(10), 10) + ia = dpnp.array(a) - result = dpnp.argsort(dp_array, kind=kind) - expected = numpy.argsort(np_array, kind="stable") + result = dpnp.argsort(ia, kind=kind) + expected = numpy.argsort(a, kind="stable") assert_dtype_allclose(result, expected) # `stable` keyword is supported in numpy 2.0 and above @testing.with_requires("numpy>=2.0") @pytest.mark.parametrize("stable", [None, False, True]) def test_stable(self, stable): - np_array = numpy.repeat(numpy.arange(10), 10) - dp_array = dpnp.array(np_array) + a = numpy.repeat(numpy.arange(10), 10) + ia = dpnp.array(a) - result = dpnp.argsort(dp_array, stable="stable") - expected = numpy.argsort(np_array, stable=True) + result = dpnp.argsort(ia, stable=stable) + expected = numpy.argsort(a, stable=True) assert_dtype_allclose(result, expected) def test_zero_dim(self): - np_array = numpy.array(2.5) - dp_array = dpnp.array(np_array) + a = numpy.array(2.5) + ia = dpnp.array(a) # with default axis=-1 with pytest.raises(AxisError): - dpnp.argsort(dp_array) + dpnp.argsort(ia) # with axis = None - result = dpnp.argsort(dp_array, axis=None) - expected = numpy.argsort(np_array, axis=None) + result = dpnp.argsort(ia, axis=None) + expected = numpy.argsort(a, axis=None) assert_dtype_allclose(result, expected) @@ -107,13 +109,13 @@ class TestSearchSorted: @pytest.mark.parametrize("dtype", get_float_dtypes(no_float16=False)) def test_nans_float(self, side, dtype): a = numpy.array([0, 1, numpy.nan], dtype=dtype) - dp_a = dpnp.array(a) + ia = dpnp.array(a) - result = dp_a.searchsorted(dp_a, side=side) + result = ia.searchsorted(ia, side=side) expected = a.searchsorted(a, side=side) assert_equal(result, expected) - result = dpnp.searchsorted(dp_a, dp_a[-1], side=side) + result = dpnp.searchsorted(ia, ia[-1], side=side) expected = numpy.searchsorted(a, a[-1], side=side) assert_equal(result, expected) @@ -123,9 +125,9 @@ def test_nans_complex(self, side, dtype): a = numpy.zeros(9, dtype=dtype) a.real += [0, 0, 1, 1, 0, 1, numpy.nan, numpy.nan, numpy.nan] a.imag += [0, 1, 0, 1, numpy.nan, numpy.nan, 0, 1, numpy.nan] - dp_a = dpnp.array(a) + ia = dpnp.array(a) - result = dp_a.searchsorted(dp_a, side=side) + result = ia.searchsorted(ia, side=side) expected = a.searchsorted(a, side=side) assert_equal(result, expected) @@ -133,24 +135,24 @@ def test_nans_complex(self, side, dtype): @pytest.mark.parametrize("side", ["left", "right"]) def test_n_elements(self, n, side): a = numpy.ones(n) - dp_a = dpnp.array(a) + ia = dpnp.array(a) v = numpy.array([0, 1, 2]) dp_v = dpnp.array(v) - result = dp_a.searchsorted(dp_v, side=side) + result = ia.searchsorted(dp_v, side=side) expected = a.searchsorted(v, side=side) assert_equal(result, expected) @pytest.mark.parametrize("side", ["left", "right"]) def test_smart_resetting(self, side): a = numpy.arange(5) - dp_a = dpnp.array(a) + ia = dpnp.array(a) v = numpy.array([6, 5, 4]) dp_v = dpnp.array(v) - result = dp_a.searchsorted(dp_v, side=side) + result = ia.searchsorted(dp_v, side=side) expected = a.searchsorted(v, side=side) assert_equal(result, expected) @@ -161,16 +163,16 @@ def test_type_specific(self, side, dtype): a = numpy.arange(2, dtype=dtype) else: a = numpy.arange(0, 5, dtype=dtype) - dp_a = dpnp.array(a) + ia = dpnp.array(a) - result = dp_a.searchsorted(dp_a, side=side) + result = ia.searchsorted(ia, side=side) expected = a.searchsorted(a, side=side) assert_equal(result, expected) e = numpy.ndarray(shape=0, buffer=b"", dtype=dtype) dp_e = dpnp.array(e) - result = dp_e.searchsorted(dp_a, side=side) + result = dp_e.searchsorted(ia, side=side) expected = e.searchsorted(a, side=side) assert_array_equal(result, expected) @@ -180,11 +182,11 @@ def test_sorter(self, dtype): s = a.argsort() k = numpy.linspace(0, 1, 20, dtype=dtype) - dp_a = dpnp.array(a) + ia = dpnp.array(a) dp_s = dpnp.array(s) dp_k = dpnp.array(k) - result = dp_a.searchsorted(dp_k, sorter=dp_s) + result = ia.searchsorted(dp_k, sorter=dp_s) expected = a.searchsorted(k, sorter=s) assert_equal(result, expected) @@ -194,11 +196,11 @@ def test_sorter_with_side(self, side): s = a.argsort() k = [0, 1, 2, 3, 5] - dp_a = dpnp.array(a) + ia = dpnp.array(a) dp_s = dpnp.array(s) dp_k = dpnp.array(k) - result = dp_a.searchsorted(dp_k, side=side, sorter=dp_s) + result = ia.searchsorted(dp_k, side=side, sorter=dp_s) expected = a.searchsorted(k, side=side, sorter=s) assert_equal(result, expected) @@ -216,10 +218,10 @@ def test_sorter_type_specific(self, side, dtype): # from np.intp in all platforms s = numpy.array([4, 2, 3, 0, 1], dtype=numpy.int16) - dp_a = dpnp.array(a) + ia = dpnp.array(a) dp_s = dpnp.array(s) - result = dp_a.searchsorted(dp_a, side, dp_s) + result = ia.searchsorted(ia, side, dp_s) expected = a.searchsorted(a, side, s) assert_equal(result, expected) @@ -231,10 +233,10 @@ def test_sorter_non_contiguous(self, side): srt[::2] = [4, 2, 3, 0, 1] s = srt[::2] - dp_a = dpnp.array(a) + ia = dpnp.array(a) dp_s = dpnp.array(s) - result = dp_a.searchsorted(dp_a, side=side, sorter=dp_s) + result = ia.searchsorted(ia, side=side, sorter=dp_s) expected = a.searchsorted(a, side=side, sorter=s) assert_equal(result, expected) @@ -264,23 +266,25 @@ def test_invalid_sorter(self): def test_v_scalar(self): v = 0 a = numpy.array([-8, -5, -1, 3, 6, 10]) - dp_a = dpnp.array(a) + ia = dpnp.array(a) - result = dpnp.searchsorted(dp_a, v) + result = dpnp.searchsorted(ia, v) expected = numpy.searchsorted(a, v) assert_equal(result, expected) class TestSort: @pytest.mark.parametrize("kind", [None, "stable", "mergesort", "radixsort"]) - @pytest.mark.parametrize("dtype", get_all_dtypes(no_complex=True)) + @pytest.mark.parametrize( + "dtype", get_all_dtypes(no_none=True, no_complex=True) + ) def test_basic(self, kind, dtype): a = numpy.random.uniform(-5, 5, 10) - np_array = numpy.array(a, dtype=dtype) - dp_array = dpnp.array(np_array) + a = numpy.array(a, dtype=dtype) + ia = dpnp.array(a) - result = dpnp.sort(dp_array, kind=kind) - expected = numpy.sort(np_array) + result = dpnp.sort(ia, kind=kind) + expected = numpy.sort(a) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("kind", [None, "stable", "mergesort", "radixsort"]) @@ -288,90 +292,94 @@ def test_basic(self, kind, dtype): def test_complex(self, kind, dtype): a = numpy.random.uniform(-5, 5, 10) b = numpy.random.uniform(-5, 5, 10) - np_array = numpy.array(a + b * 1j, dtype=dtype) - dp_array = dpnp.array(np_array) + a = numpy.array(a + b * 1j, dtype=dtype) + ia = dpnp.array(a) if kind == "radixsort": - assert_raises(ValueError, dpnp.argsort, dp_array, kind=kind) + assert_raises(ValueError, dpnp.argsort, ia, kind=kind) else: - result = dpnp.sort(dp_array, kind=kind) - expected = numpy.sort(np_array) + result = dpnp.sort(ia, kind=kind) + expected = numpy.sort(a) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("axis", [None, -2, -1, 0, 1, 2]) def test_axis(self, axis): a = numpy.random.uniform(-10, 10, 36) - np_array = numpy.array(a).reshape(3, 4, 3) - dp_array = dpnp.array(np_array) + a = numpy.array(a).reshape(3, 4, 3) + ia = dpnp.array(a) - result = dpnp.sort(dp_array, axis=axis) - expected = numpy.sort(np_array, axis=axis) + result = dpnp.sort(ia, axis=axis) + expected = numpy.sort(a, axis=axis) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize("axis", [-2, -1, 0, 1]) def test_ndarray(self, dtype, axis): a = numpy.random.uniform(-10, 10, 12) - np_array = numpy.array(a, dtype=dtype).reshape(6, 2) - dp_array = dpnp.array(np_array) + a = numpy.array(a, dtype=dtype).reshape(6, 2) + ia = dpnp.array(a) - dp_array.sort(axis=axis) - np_array.sort(axis=axis) - assert_dtype_allclose(dp_array, np_array) + ia.sort(axis=axis) + a.sort(axis=axis) + assert_dtype_allclose(ia, a) # this test validates that all different options of kind in dpnp are stable @pytest.mark.parametrize("kind", [None, "stable", "mergesort", "radixsort"]) def test_kind(self, kind): - np_array = numpy.repeat(numpy.arange(10), 10) - dp_array = dpnp.array(np_array) + a = numpy.repeat(numpy.arange(10), 10) + ia = dpnp.array(a) - result = dpnp.sort(dp_array, kind=kind) - expected = numpy.sort(np_array, kind="stable") + result = dpnp.sort(ia, kind=kind) + expected = numpy.sort(a, kind="stable") assert_dtype_allclose(result, expected) # `stable` keyword is supported in numpy 2.0 and above @testing.with_requires("numpy>=2.0") @pytest.mark.parametrize("stable", [None, False, True]) def test_stable(self, stable): - np_array = numpy.repeat(numpy.arange(10), 10) - dp_array = dpnp.array(np_array) + a = numpy.repeat(numpy.arange(10), 10) + ia = dpnp.array(a) - result = dpnp.sort(dp_array, stable="stable") - expected = numpy.sort(np_array, stable=True) + result = dpnp.sort(ia, stable=stable) + expected = numpy.sort(a, stable=True) assert_dtype_allclose(result, expected) def test_ndarray_axis_none(self): a = numpy.random.uniform(-10, 10, 12) - dp_array = dpnp.array(a).reshape(6, 2) + ia = dpnp.array(a).reshape(6, 2) with pytest.raises(TypeError): - dp_array.sort(axis=None) + ia.sort(axis=None) def test_zero_dim(self): - np_array = numpy.array(2.5) - dp_array = dpnp.array(np_array) + a = numpy.array(2.5) + ia = dpnp.array(a) # with default axis=-1 with pytest.raises(AxisError): - dpnp.sort(dp_array) + dpnp.sort(ia) # with axis = None - result = dpnp.sort(dp_array, axis=None) - expected = numpy.sort(np_array, axis=None) + result = dpnp.sort(ia, axis=None) + expected = numpy.sort(a, axis=None) assert_dtype_allclose(result, expected) def test_error(self): - dp_array = dpnp.arange(10) + ia = dpnp.arange(10) # quicksort is currently not supported with pytest.raises(ValueError): - dpnp.sort(dp_array, kind="quicksort") + dpnp.sort(ia, kind="quicksort") with pytest.raises(NotImplementedError): - dpnp.sort(dp_array, order=["age"]) + dpnp.sort(ia, order=["age"]) # both kind and stable are given with pytest.raises(ValueError): - dpnp.sort(dp_array, kind="mergesort", stable=True) + dpnp.sort(ia, kind="mergesort", stable=True) + + # stable is not valid + with pytest.raises(ValueError): + dpnp.sort(ia, stable="invalid") class TestSortComplex: diff --git a/dpnp/tests/test_statistics.py b/dpnp/tests/test_statistics.py index 8dbccb379d9..6eb831aace4 100644 --- a/dpnp/tests/test_statistics.py +++ b/dpnp/tests/test_statistics.py @@ -19,36 +19,32 @@ class TestAverage: - @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("returned", [True, False]) def test_avg_no_wgt(self, dtype, axis, returned): - dp_array = dpnp.array([[1, 1, 2], [3, 4, 5]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[1, 1, 2], [3, 4, 5]], dtype=dtype) + a = dpnp.asnumpy(ia) - result = dpnp.average(dp_array, axis=axis, returned=returned) - expected = numpy.average(np_array, axis=axis, returned=returned) + result = dpnp.average(ia, axis=axis, returned=returned) + expected = numpy.average(a, axis=axis, returned=returned) if returned: assert_dtype_allclose(result[0], expected[0]) assert_dtype_allclose(result[1], expected[1]) else: assert_dtype_allclose(result, expected) - @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) @pytest.mark.parametrize("returned", [True, False]) def test_avg(self, dtype, axis, returned): - dp_array = dpnp.array([[1, 1, 2], [3, 4, 5]], dtype=dtype) - dp_wgt = dpnp.array([[3, 1, 2], [3, 4, 2]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) - np_wgt = dpnp.asnumpy(dp_wgt) + ia = dpnp.array([[1, 1, 2], [3, 4, 5]], dtype=dtype) + iw = dpnp.array([[3, 1, 2], [3, 4, 2]], dtype=dtype) + a = dpnp.asnumpy(ia) + w = dpnp.asnumpy(iw) - result = dpnp.average( - dp_array, axis=axis, weights=dp_wgt, returned=returned - ) - expected = numpy.average( - np_array, axis=axis, weights=np_wgt, returned=returned - ) + result = dpnp.average(ia, axis=axis, weights=iw, returned=returned) + expected = numpy.average(a, axis=axis, weights=w, returned=returned) if returned: assert_dtype_allclose(result[0], expected[0]) @@ -75,37 +71,36 @@ def test_avg_complex(self, dtype): ids=["list", "tuple"], ) def test_avg_weight_array_like(self, weight): - dp_array = dpnp.array([[1, 1, 2], [3, 4, 5]]) - wgt = weight - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[1, 1, 2], [3, 4, 5]]) + a = dpnp.asnumpy(ia) - res = dpnp.average(dp_array, weights=wgt) - exp = numpy.average(np_array, weights=wgt) + res = dpnp.average(ia, weights=weight) + exp = numpy.average(a, weights=weight) assert_dtype_allclose(res, exp) def test_avg_weight_1D(self): - dp_array = dpnp.arange(12).reshape(3, 4) + ia = dpnp.arange(12).reshape(3, 4) wgt = [1, 2, 3] - np_array = dpnp.asnumpy(dp_array) + a = dpnp.asnumpy(ia) - res = dpnp.average(dp_array, axis=0, weights=wgt) - exp = numpy.average(np_array, axis=0, weights=wgt) + res = dpnp.average(ia, axis=0, weights=wgt) + exp = numpy.average(a, axis=0, weights=wgt) assert_dtype_allclose(res, exp) @pytest.mark.parametrize("dtype", get_all_dtypes(no_bool=True)) def test_avg_strided(self, dtype): - dp_array = dpnp.arange(20, dtype=dtype) - dp_wgt = dpnp.arange(-10, 10, dtype=dtype) - np_array = dpnp.asnumpy(dp_array) - np_wgt = dpnp.asnumpy(dp_wgt) + ia = dpnp.arange(20, dtype=dtype) + iw = dpnp.arange(-10, 10, dtype=dtype) + a = dpnp.asnumpy(ia) + w = dpnp.asnumpy(iw) - result = dpnp.average(dp_array[::-1], weights=dp_wgt[::-1]) - expected = numpy.average(np_array[::-1], weights=np_wgt[::-1]) - assert_allclose(expected, result) + result = dpnp.average(ia[::-1], weights=iw[::-1]) + expected = numpy.average(a[::-1], weights=w[::-1]) + assert_allclose(result, expected) - result = dpnp.average(dp_array[::2], weights=dp_wgt[::2]) - expected = numpy.average(np_array[::2], weights=np_wgt[::2]) - assert_allclose(expected, result) + result = dpnp.average(ia[::2], weights=iw[::2]) + expected = numpy.average(a[::2], weights=w[::2]) + assert_allclose(result, expected) def test_avg_error(self): a = dpnp.arange(5) @@ -148,9 +143,9 @@ def test_func(self, func, axis, keepdims, dtype): a = numpy.arange(768, dtype=dtype).reshape((4, 4, 6, 8)) ia = dpnp.array(a) - np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) - dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - assert_dtype_allclose(dpnp_res, np_res) + expected = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + result = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + assert_dtype_allclose(result, expected) @pytest.mark.parametrize("func", ["max", "min"]) @pytest.mark.parametrize("axis", [None, 0, 1, -1]) @@ -160,9 +155,9 @@ def test_bool(self, func, axis, keepdims): a = numpy.tile(a, (2, 2)) ia = dpnp.array(a) - np_res = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) - dpnp_res = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) - assert_dtype_allclose(dpnp_res, np_res) + expected = getattr(numpy, func)(a, axis=axis, keepdims=keepdims) + result = getattr(dpnp, func)(ia, axis=axis, keepdims=keepdims) + assert_dtype_allclose(result, expected) @pytest.mark.parametrize("func", ["max", "min"]) def test_out(self, func): @@ -170,38 +165,34 @@ def test_out(self, func): ia = dpnp.array(a) # out is dpnp_array - np_res = getattr(numpy, func)(a, axis=0) - dpnp_out = dpnp.empty(np_res.shape, dtype=np_res.dtype) - dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) - assert dpnp_out is dpnp_res - assert_allclose(dpnp_res, np_res) + expected = getattr(numpy, func)(a, axis=0) + dpnp_out = dpnp.empty(expected.shape, dtype=expected.dtype) + result = getattr(dpnp, func)(ia, axis=0, out=dpnp_out) + assert dpnp_out is result + assert_allclose(result, expected) # out is usm_ndarray - dpt_out = dpt.empty(np_res.shape, dtype=np_res.dtype) - dpnp_res = getattr(dpnp, func)(ia, axis=0, out=dpt_out) - assert dpt_out is dpnp_res.get_array() - assert_allclose(dpnp_res, np_res) + dpt_out = dpt.empty(expected.shape, dtype=expected.dtype) + result = getattr(dpnp, func)(ia, axis=0, out=dpt_out) + assert dpt_out is result.get_array() + assert_allclose(result, expected) # output is numpy array -> Error - dpnp_res = numpy.empty_like(np_res) + result = numpy.empty_like(expected) with pytest.raises(TypeError): - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + getattr(dpnp, func)(ia, axis=0, out=result) # output has incorrect shape -> Error - dpnp_res = dpnp.array(numpy.zeros((4, 2))) + result = dpnp.array(numpy.zeros((4, 2))) with pytest.raises(ValueError): - getattr(dpnp, func)(ia, axis=0, out=dpnp_res) + getattr(dpnp, func)(ia, axis=0, out=result) @pytest.mark.usefixtures("suppress_complex_warning") @pytest.mark.parametrize("func", ["max", "min"]) @pytest.mark.parametrize("arr_dt", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("out_dt", get_all_dtypes(no_none=True)) def test_out_dtype(self, func, arr_dt, out_dt): - a = ( - numpy.arange(12, dtype=numpy.float32) - .reshape((2, 2, 3)) - .astype(dtype=arr_dt) - ) + a = numpy.arange(12).reshape(2, 2, 3).astype(arr_dt) out = numpy.zeros_like(a, shape=(2, 3), dtype=out_dt) ia = dpnp.array(a) @@ -209,7 +200,7 @@ def test_out_dtype(self, func, arr_dt, out_dt): result = getattr(dpnp, func)(ia, out=iout, axis=1) expected = getattr(numpy, func)(a, out=out, axis=1) - assert_array_equal(expected, result) + assert_array_equal(result, expected) assert result is iout @pytest.mark.parametrize("func", ["max", "min"]) @@ -229,11 +220,11 @@ class TestMean: @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_mean(self, dtype, axis, keepdims): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + a = dpnp.asnumpy(ia) - result = dpnp.mean(dp_array, axis=axis, keepdims=keepdims) - expected = numpy.mean(np_array, axis=axis, keepdims=keepdims) + result = dpnp.mean(ia, axis=axis, keepdims=keepdims) + expected = numpy.mean(a, axis=axis, keepdims=keepdims) assert_dtype_allclose(result, expected) @pytest.mark.parametrize("dtype", get_all_dtypes()) @@ -262,15 +253,6 @@ def test_mean_complex(self, dtype): result = dpnp.mean(ia) assert_dtype_allclose(result, expected) - @pytest.mark.parametrize("dtype", get_all_dtypes()) - def test_mean_dtype(self, dtype): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]]) - np_array = dpnp.asnumpy(dp_array) - - expected = numpy.mean(np_array, dtype=dtype) - result = dpnp.mean(dp_array, dtype=dtype) - assert_allclose(expected, result) - @pytest.mark.usefixtures( "suppress_invalid_numpy_warnings", "suppress_mean_empty_slice_numpy_warnings", @@ -278,20 +260,20 @@ def test_mean_dtype(self, dtype): @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) def test_mean_empty(self, axis, shape): - dp_array = dpnp.empty(shape, dtype=dpnp.int64) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.empty(shape, dtype=dpnp.int64) + a = dpnp.asnumpy(ia) - result = dpnp.mean(dp_array, axis=axis) - expected = numpy.mean(np_array, axis=axis) - assert_allclose(expected, result) + result = dpnp.mean(ia, axis=axis) + expected = numpy.mean(a, axis=axis) + assert_allclose(result, expected) def test_mean_scalar(self): - dp_array = dpnp.array(5) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array(5) + a = dpnp.asnumpy(ia) - result = dp_array.mean() - expected = np_array.mean() - assert_allclose(expected, result) + result = ia.mean() + expected = a.mean() + assert_allclose(result, expected) def test_mean_NotImplemented(self): ia = dpnp.arange(5) @@ -402,7 +384,6 @@ def test_usm_ndarray(self, axis, overwrite_input): expected = numpy.median(a, axis=axis, overwrite_input=overwrite_input) result = dpnp.median(ia, axis=axis, overwrite_input=overwrite_input) - assert_dtype_allclose(result, expected) @@ -410,16 +391,16 @@ class TestVar: @pytest.mark.usefixtures( "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" ) - @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) @pytest.mark.parametrize("keepdims", [True, False]) @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2]) def test_var(self, dtype, axis, keepdims, ddof): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + a = dpnp.asnumpy(ia) - expected = numpy.var(np_array, axis=axis, keepdims=keepdims, ddof=ddof) - result = dpnp.var(dp_array, axis=axis, keepdims=keepdims, ddof=ddof) + expected = numpy.var(a, axis=axis, keepdims=keepdims, ddof=ddof) + result = dpnp.var(ia, axis=axis, keepdims=keepdims, ddof=ddof) if axis == 0 and ddof == 2: assert dpnp.all(dpnp.isnan(result)) @@ -429,20 +410,20 @@ def test_var(self, dtype, axis, keepdims, ddof): @pytest.mark.usefixtures( "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" ) - @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("ddof", [0, 1]) def test_var_out(self, dtype, axis, ddof): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + a = dpnp.asnumpy(ia) - expected = numpy.var(np_array, axis=axis, ddof=ddof) + expected = numpy.var(a, axis=axis, ddof=ddof) if has_support_aspect64(): res_dtype = expected.dtype else: - res_dtype = dpnp.default_float_type(dp_array.device) + res_dtype = dpnp.default_float_type(ia.device) out = dpnp.empty(expected.shape, dtype=res_dtype) - result = dpnp.var(dp_array, axis=axis, out=out, ddof=ddof) + result = dpnp.var(ia, axis=axis, out=out, ddof=ddof) assert result is out assert_dtype_allclose(result, expected) @@ -452,32 +433,32 @@ def test_var_out(self, dtype, axis, ddof): @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) def test_var_empty(self, axis, shape): - dp_array = dpnp.empty(shape, dtype=dpnp.int64) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.empty(shape, dtype=dpnp.int64) + a = dpnp.asnumpy(ia) - result = dpnp.var(dp_array, axis=axis) - expected = numpy.var(np_array, axis=axis) + result = dpnp.var(ia, axis=axis) + expected = numpy.var(a, axis=axis) assert_dtype_allclose(result, expected) @pytest.mark.usefixtures("suppress_complex_warning") @pytest.mark.parametrize("dt_in", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) def test_var_dtype(self, dt_in, dt_out): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) + a = dpnp.asnumpy(ia) - expected = numpy.var(np_array, dtype=dt_out) - result = dpnp.var(dp_array, dtype=dt_out) + expected = numpy.var(a, dtype=dt_out) + result = dpnp.var(ia, dtype=dt_out) assert expected.dtype == result.dtype assert_allclose(result, expected, rtol=1e-06) def test_var_scalar(self): - dp_array = dpnp.array(5) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array(5) + a = dpnp.asnumpy(ia) - result = dp_array.var() - expected = np_array.var() - assert_allclose(expected, result) + result = ia.var() + expected = a.var() + assert_allclose(result, expected) def test_var_error(self): ia = dpnp.arange(5) @@ -494,16 +475,16 @@ class TestStd: @pytest.mark.usefixtures( "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" ) - @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("axis", [0, 1, (0, 1)]) @pytest.mark.parametrize("keepdims", [True, False]) @pytest.mark.parametrize("ddof", [0, 0.5, 1, 1.5, 2]) def test_std(self, dtype, axis, keepdims, ddof): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + a = dpnp.asnumpy(ia) - expected = numpy.std(np_array, axis=axis, keepdims=keepdims, ddof=ddof) - result = dpnp.std(dp_array, axis=axis, keepdims=keepdims, ddof=ddof) + expected = numpy.std(a, axis=axis, keepdims=keepdims, ddof=ddof) + result = dpnp.std(ia, axis=axis, keepdims=keepdims, ddof=ddof) if axis == 0 and ddof == 2: assert dpnp.all(dpnp.isnan(result)) else: @@ -512,20 +493,20 @@ def test_std(self, dtype, axis, keepdims, ddof): @pytest.mark.usefixtures( "suppress_divide_invalid_numpy_warnings", "suppress_dof_numpy_warnings" ) - @pytest.mark.parametrize("dtype", get_all_dtypes()) + @pytest.mark.parametrize("dtype", get_all_dtypes(no_none=True)) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("ddof", [0, 1]) def test_std_out(self, dtype, axis, ddof): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + a = dpnp.asnumpy(ia) - expected = numpy.std(np_array, axis=axis, ddof=ddof) + expected = numpy.std(a, axis=axis, ddof=ddof) if has_support_aspect64(): res_dtype = expected.dtype else: - res_dtype = dpnp.default_float_type(dp_array.device) + res_dtype = dpnp.default_float_type(ia.device) out = dpnp.empty(expected.shape, dtype=res_dtype) - result = dpnp.std(dp_array, axis=axis, out=out, ddof=ddof) + result = dpnp.std(ia, axis=axis, out=out, ddof=ddof) assert out is result assert_dtype_allclose(result, expected) @@ -535,31 +516,31 @@ def test_std_out(self, dtype, axis, ddof): @pytest.mark.parametrize("axis", [None, 0, 1, (0, 1)]) @pytest.mark.parametrize("shape", [(2, 3), (2, 0), (0, 3)]) def test_std_empty(self, axis, shape): - dp_array = dpnp.empty(shape, dtype=dpnp.int64) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.empty(shape, dtype=dpnp.int64) + a = dpnp.asnumpy(ia) - result = dpnp.std(dp_array, axis=axis) - expected = numpy.std(np_array, axis=axis) + result = dpnp.std(ia, axis=axis) + expected = numpy.std(a, axis=axis) assert_dtype_allclose(result, expected) @pytest.mark.usefixtures("suppress_complex_warning") @pytest.mark.parametrize("dt_in", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) def test_std_dtype(self, dt_in, dt_out): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) + a = dpnp.asnumpy(ia) - expected = numpy.std(np_array, dtype=dt_out) - result = dpnp.std(dp_array, dtype=dt_out) + expected = numpy.std(a, dtype=dt_out) + result = dpnp.std(ia, dtype=dt_out) assert expected.dtype == result.dtype assert_allclose(result, expected, rtol=1e-6) def test_std_scalar(self): - dp_array = dpnp.array(5) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array(5) + a = dpnp.asnumpy(ia) - result = dp_array.std() - expected = np_array.std() + result = ia.std() + expected = a.std() assert_dtype_allclose(result, expected) def test_std_error(self): @@ -581,11 +562,11 @@ class TestCorrcoef: @pytest.mark.parametrize("dtype", get_all_dtypes()) @pytest.mark.parametrize("rowvar", [True, False]) def test_corrcoef(self, dtype, rowvar): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dtype) + a = dpnp.asnumpy(ia) - expected = numpy.corrcoef(np_array, rowvar=rowvar) - result = dpnp.corrcoef(dp_array, rowvar=rowvar) + expected = numpy.corrcoef(a, rowvar=rowvar) + result = dpnp.corrcoef(ia, rowvar=rowvar) assert_dtype_allclose(result, expected) @@ -596,22 +577,22 @@ def test_corrcoef(self, dtype, rowvar): ) @pytest.mark.parametrize("shape", [(2, 0), (0, 2)]) def test_corrcoef_empty(self, shape): - dp_array = dpnp.empty(shape, dtype=dpnp.int64) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.empty(shape, dtype=dpnp.int64) + a = dpnp.asnumpy(ia) - result = dpnp.corrcoef(dp_array) - expected = numpy.corrcoef(np_array) + result = dpnp.corrcoef(ia) + expected = numpy.corrcoef(a) assert_dtype_allclose(result, expected) @pytest.mark.usefixtures("suppress_complex_warning") @pytest.mark.parametrize("dt_in", get_all_dtypes(no_bool=True)) @pytest.mark.parametrize("dt_out", get_float_complex_dtypes()) def test_corrcoef_dtype(self, dt_in, dt_out): - dp_array = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array([[0, 1, 2], [3, 4, 0]], dtype=dt_in) + a = dpnp.asnumpy(ia) - expected = numpy.corrcoef(np_array, dtype=dt_out) - result = dpnp.corrcoef(dp_array, dtype=dt_out) + expected = numpy.corrcoef(a, dtype=dt_out) + result = dpnp.corrcoef(ia, dtype=dt_out) assert expected.dtype == result.dtype assert_allclose(result, expected, rtol=1e-6) @@ -620,11 +601,11 @@ def test_corrcoef_dtype(self, dt_in, dt_out): "suppress_dof_numpy_warnings", ) def test_corrcoef_scalar(self): - dp_array = dpnp.array(5) - np_array = dpnp.asnumpy(dp_array) + ia = dpnp.array(5) + a = dpnp.asnumpy(ia) - result = dpnp.corrcoef(dp_array) - expected = numpy.corrcoef(np_array) + result = dpnp.corrcoef(ia) + expected = numpy.corrcoef(a) assert_dtype_allclose(result, expected) @@ -647,11 +628,7 @@ def test_cov_1D_rowvar(dtype): assert_allclose(numpy.cov(b, rowvar=False), dpnp.cov(a, rowvar=False)) -@pytest.mark.parametrize( - "axis", - [None, 0, 1], - ids=["None", "0", "1"], -) +@pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize( "v", [ @@ -674,14 +651,10 @@ def test_ptp(v, axis): ia = dpnp.array(a) expected = numpy.ptp(a, axis) result = dpnp.ptp(ia, axis) - assert_array_equal(expected, result) + assert_array_equal(result, expected) -@pytest.mark.parametrize( - "axis", - [None, 0, 1], - ids=["None", "0", "1"], -) +@pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize( "v", [ @@ -705,4 +678,4 @@ def test_ptp_out(v, axis): expected = numpy.ptp(a, axis) result = dpnp.array(numpy.empty_like(expected)) dpnp.ptp(ia, axis, out=result) - assert_array_equal(expected, result) + assert_array_equal(result, expected)