From 1d6943ca1d034909306ba3c0613b6a65c739c36f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= Date: Thu, 21 Dec 2023 15:34:39 +0100 Subject: [PATCH] API: Array API support - Part 1 --- sparse/__init__.py | 76 ++++++++++ sparse/_common.py | 226 ++++++++++++++++++++++++++-- sparse/_compressed/compressed.py | 12 +- sparse/_compressed/indexing.py | 7 +- sparse/_coo/__init__.py | 4 + sparse/_coo/common.py | 125 ++++++++++++++- sparse/_coo/core.py | 32 +++- sparse/_sparse_array.py | 33 ++++ sparse/tests/test_array_function.py | 42 ++++++ sparse/tests/test_coo.py | 53 +++++++ sparse/tests/test_coo_numba.py | 2 +- 11 files changed, 591 insertions(+), 21 deletions(-) diff --git a/sparse/__init__.py b/sparse/__init__.py index b51d54b5..f5940c1f 100644 --- a/sparse/__init__.py +++ b/sparse/__init__.py @@ -10,3 +10,79 @@ __version__ = get_versions()["version"] del get_versions + +from numpy import ( + bool_ as bool, + float16, + float32, + float64, + complex64, + complex128, + uint8, + uint16, + uint32, + uint64, + int8, + int16, + int32, + int64, + pi, + e, + nan, + inf, + newaxis, + sin, + sinh, + cos, + cosh, + tan, + tanh, + arcsin as asin, + arcsinh as asinh, + arccos as acos, + arccosh as acosh, + arctan as atan, + arctan2 as atan2, + arctanh as atanh, + log, + log2, + log1p, + log10, + logaddexp, + power as pow, + sign, + square, + sqrt, + logical_and, + logical_not, + logical_or, + logical_xor, + bitwise_and, + bitwise_or, + bitwise_xor, + bitwise_not, + trunc, + add, + subtract, + remainder, + positive, + not_equal, + negative, + multiply, + less_equal, + less, + greater_equal, + greater, + floor_divide, + floor, + exp, + expm1, + divide, + ceil, + left_shift as bitwise_left_shift, + right_shift as bitwise_right_shift, + invert as bitwise_invert, + finfo, + iinfo, + can_cast, +) diff --git a/sparse/_common.py b/sparse/_common.py index 8b497f7b..d8a37d97 100644 --- a/sparse/_common.py +++ b/sparse/_common.py @@ -1,13 +1,15 @@ -import numpy as np -import numba -import scipy.sparse +import builtins +from collections.abc import Iterable from functools import wraps, reduce from itertools import chain from operator import mul, index -from collections.abc import Iterable +import warnings + +import numpy as np +import numba +import scipy.sparse from scipy.sparse import spmatrix from numba import literal_unroll -import warnings from ._sparse_array import SparseArray from ._utils import ( @@ -33,6 +35,8 @@ roll, kron, argwhere, + argmax, + argmin, isposinf, isneginf, result_type, @@ -187,7 +191,7 @@ def tensordot(a, b, axes=2, *, return_type=None): newshape_b = (N2, -1) oldb = [bs[axis] for axis in notin] - if any(dim == 0 for dim in chain(newshape_a, newshape_b)): + if builtins.any(dim == 0 for dim in chain(newshape_a, newshape_b)): res = asCOO(np.empty(olda + oldb), check=False) if isinstance(a, np.ndarray) or isinstance(b, np.ndarray): res = res.todense() @@ -268,12 +272,12 @@ def _matmul_recurser(a, b): if a.ndim == 2: return dot(a, b) res = [] - for i in range(max(a.shape[0], b.shape[0])): + for i in range(builtins.max(a.shape[0], b.shape[0])): a_i = a[0] if a.shape[0] == 1 else a[i] b_i = b[0] if b.shape[0] == 1 else b[i] res.append(_matmul_recurser(a_i, b_i)) mask = [isinstance(x, SparseArray) for x in res] - if all(mask): + if builtins.all(mask): return stack(res) else: res = [x.todense() if isinstance(x, SparseArray) else x for x in res] @@ -334,7 +338,7 @@ def _dot(a, b, return_type=None): from ._sparse_array import SparseArray out_shape = (a.shape[0], b.shape[1]) - if all(isinstance(arr, SparseArray) for arr in [a, b]) and any( + if builtins.all(isinstance(arr, SparseArray) for arr in [a, b]) and builtins.any( isinstance(arr, GCXS) for arr in [a, b] ): a = a.asformat("gcxs") @@ -1333,7 +1337,7 @@ def _parse_einsum_input(operands): if operands[num].shape == (): ellipse_count = 0 else: - ellipse_count = max(operands[num].ndim, 1) + ellipse_count = builtins.max(operands[num].ndim, 1) ellipse_count -= len(sub) - 3 if ellipse_count > longest: @@ -1573,7 +1577,7 @@ def stack(arrays, axis=0, compressed_axes=None): """ from ._compressed import GCXS - if not all(isinstance(arr, GCXS) for arr in arrays): + if not builtins.all(isinstance(arr, GCXS) for arr in arrays): from ._coo import stack as coo_stack return coo_stack(arrays, axis) @@ -1612,7 +1616,7 @@ def concatenate(arrays, axis=0, compressed_axes=None): """ from ._compressed import GCXS - if not all(isinstance(arr, GCXS) for arr in arrays): + if not builtins.all(isinstance(arr, GCXS) for arr in arrays): from ._coo import concatenate as coo_concat return coo_concat(arrays, axis) @@ -1622,6 +1626,9 @@ def concatenate(arrays, axis=0, compressed_axes=None): return gcxs_concat(arrays, axis, compressed_axes) +concat = concatenate + + def eye(N, M=None, k=0, dtype=float, format="coo", **kwargs): """Return a 2-D array in the specified format with ones on the diagonal and zeros elsewhere. @@ -1665,14 +1672,14 @@ def eye(N, M=None, k=0, dtype=float, format="coo", **kwargs): M = int(M) k = int(k) - data_length = min(N, M) + data_length = builtins.min(N, M) if k > 0: - data_length = max(min(data_length, M - k), 0) + data_length = builtins.max(builtins.min(data_length, M - k), 0) n_coords = np.arange(data_length, dtype=np.intp) m_coords = n_coords + k elif k < 0: - data_length = max(min(data_length, N + k), 0) + data_length = builtins.max(builtins.min(data_length, N + k), 0) m_coords = np.arange(data_length, dtype=np.intp) n_coords = m_coords - k else: @@ -1905,6 +1912,20 @@ def ones_like(a, dtype=None, shape=None, format=None, **kwargs): return full_like(a, 1, dtype=dtype, shape=shape, format=format, **kwargs) +def empty(shape, dtype=float, format="coo", **kwargs): + return full(shape, 0, np.dtype(dtype)).asformat(format, **kwargs) + + +empty.__doc__ = zeros.__doc__ + + +def empty_like(a, dtype=None, shape=None, format=None, **kwargs): + return full_like(a, 0, dtype=dtype, shape=shape, format=format, **kwargs) + + +empty_like.__doc__ = zeros_like.__doc__ + + def outer(a, b, out=None): """ Return outer product of two sparse arrays. @@ -2088,3 +2109,178 @@ def format_to_string(format): return format raise ValueError(f"invalid format: {format}") + + +def asarray( + obj, /, *, dtype=None, format="coo", backend="pydata", device=None, copy=False +): + """ + Convert the input to a sparse array. + Parameters + ---------- + obj : array_like + Object to be converted to an array. + dtype : dtype, optional + Output array data type. + format : str, optional + Output array sparse format. + backend : str, optional + Backend for the output array. + device : str, optional + Device on which to place the created array. + copy : bool, optional + Boolean indicating whether or not to copy the input. + Returns + ------- + out : Union[SparseArray, numpy.ndarray] + Sparse or 0-D array containing the data from `obj`. + Examples + -------- + >>> x = np.eye(8, dtype='i8') + >>> sparse.asarray(x, format="COO") + + """ + if format not in ["coo", "dok", "gcxs"]: + raise ValueError(f"{format} format not supported.") + + if backend not in ["pydata", "taco"]: + raise ValueError(f"{backend} backend not supported.") + + from ._coo import COO + from ._dok import DOK + from ._compressed import GCXS + + format_dict = {"coo": COO, "dok": DOK, "gcxs": GCXS} + + if backend == "pydata": + if isinstance(obj, (COO, DOK, GCXS)): + # TODO: consider `format` argument + warnings.warn("`format` argument was ignored") + return obj + + elif isinstance(obj, spmatrix): + return format_dict[format].from_scipy_sparse( + obj.astype(dtype=dtype, copy=copy) + ) + + # check for scalars and 0-D arrays + elif np.isscalar(obj) or (isinstance(obj, np.ndarray) and obj.shape == ()): + return np.asarray(obj, dtype=dtype) + + elif isinstance(obj, np.ndarray): + return format_dict[format].from_numpy(obj).astype(dtype=dtype, copy=copy) + + else: + raise ValueError(f"{type(obj)} not supported.") + + elif backend == "taco": + raise ValueError("Taco not yet supported.") + + +def _support_numpy(func): + """ + In case a NumPy array is passed to `sparse` namespace function + we want to flag it and dispatch to NumPy. + """ + + def wrapper_func(*args, **kwargs): + x = args[0] + if isinstance(x, (np.ndarray, np.number)): + warnings.warn( + f"Sparse {func.__name__} received dense NumPy array instead " + "of sparse array. Dispatching to NumPy function." + ) + return getattr(np, func.__name__)(*args, **kwargs) + else: + return func(*args, **kwargs) + + return wrapper_func + + +def all(x, /, *, axis=None, keepdims=False): + return x.all(axis=axis, keepdims=keepdims) + + +def any(x, /, *, axis=None, keepdims=False): + return x.any(axis=axis, keepdims=keepdims) + + +def permute_dims(x, /, axes=None): + return x.transpose(axes=axes) + + +def max(x, /, *, axis=None, keepdims=False): + return x.max(axis=axis, keepdims=keepdims) + + +def mean(x, /, *, axis=None, keepdims=False, dtype=None): + return x.mean(axis=axis, keepdims=keepdims, dtype=dtype) + + +def min(x, /, *, axis=None, keepdims=False): + return x.min(axis=axis, keepdims=keepdims) + + +def prod(x, /, *, axis=None, dtype=None, keepdims=False): + return x.prod(axis=axis, keepdims=keepdims, dtype=dtype) + + +def std(x, /, *, axis=None, correction=0.0, keepdims=False): + return x.std(axis=axis, ddof=correction, keepdims=keepdims) + + +def sum(x, /, *, axis=None, dtype=None, keepdims=False): + return x.sum(axis=axis, keepdims=keepdims, dtype=dtype) + + +def var(x, /, *, axis=None, correction=0.0, keepdims=False): + return x.var(axis=axis, ddof=correction, keepdims=keepdims) + + +def abs(x, /): + return x.__abs__() + + +def reshape(x, /, shape, *, copy=None): + return x.reshape(shape=shape) + + +def astype(x, dtype, /, *, copy=True): + return x.astype(dtype, copy=copy) + + +@_support_numpy +def broadcast_to(x, /, shape): + return x.broadcast_to(shape) + + +def broadcast_arrays(*arrays): + shape = np.broadcast_shapes(*[a.shape for a in arrays]) + return [a.broadcast_to(shape) for a in arrays] + + +def equal(x1, x2, /): + return x1 == x2 + + +@_support_numpy +def round(x, /, decimals=0, out=None): + return x.round(decimals=decimals, out=out) + + +@_support_numpy +def isinf(x, /): + return x.isinf() + + +@_support_numpy +def isnan(x, /): + return x.isnan() + + +def isfinite(x, /): + return ~isinf(x) + + +def nonzero(x, /): + return x.nonzero() diff --git a/sparse/_compressed/compressed.py b/sparse/_compressed/compressed.py index 70f4125d..9c846c2c 100644 --- a/sparse/_compressed/compressed.py +++ b/sparse/_compressed/compressed.py @@ -205,7 +205,7 @@ def copy(self, deep=True): @classmethod def from_numpy(cls, x, compressed_axes=None, fill_value=0, idx_dtype=None): - coo = COO(x, fill_value=fill_value, idx_dtype=idx_dtype) + coo = COO.from_numpy(x, fill_value=fill_value, idx_dtype=idx_dtype) return cls.from_coo(coo, compressed_axes, idx_dtype) @classmethod @@ -843,6 +843,16 @@ def _prune(self): else: self.indices = self.indices[mask] + def isinf(self): + return ( + self.tocoo().isinf().asformat("gcxs", compressed_axes=self.compressed_axes) + ) + + def isnan(self): + return ( + self.tocoo().isnan().asformat("gcxs", compressed_axes=self.compressed_axes) + ) + class _Compressed2d(GCXS): def __init__( diff --git a/sparse/_compressed/indexing.py b/sparse/_compressed/indexing.py index b554e0e5..230078dc 100644 --- a/sparse/_compressed/indexing.py +++ b/sparse/_compressed/indexing.py @@ -18,8 +18,10 @@ def getitem(x, key): from .compressed import GCXS if x.ndim == 1: - coo = x.tocoo()[key] - return GCXS.from_coo(coo) + result = x.tocoo()[key] + if np.isscalar(result): + return result + return GCXS.from_coo(result) key = list(normalize_index(key, x.shape)) @@ -283,6 +285,7 @@ def get_single_element(x, key): A convience function for indexing when returning a single element. """ + key = np.array(key)[x._axis_order] # reordering the input ind = np.ravel_multi_index(key, x._reordered_shape) row, col = np.unravel_index(ind, x._compressed_shape) diff --git a/sparse/_coo/__init__.py b/sparse/_coo/__init__.py index 4a3c910b..be5ce7cd 100644 --- a/sparse/_coo/__init__.py +++ b/sparse/_coo/__init__.py @@ -15,6 +15,8 @@ roll, kron, argwhere, + argmax, + argmin, isposinf, isneginf, result_type, @@ -40,6 +42,8 @@ "roll", "kron", "argwhere", + "argmax", + "argmin", "isposinf", "isneginf", "result_type", diff --git a/sparse/_coo/common.py b/sparse/_coo/common.py index eb5ba92c..62500c26 100644 --- a/sparse/_coo/common.py +++ b/sparse/_coo/common.py @@ -2,6 +2,7 @@ import operator import warnings from collections.abc import Iterable +from typing import Callable, Optional import numpy as np import scipy.sparse @@ -160,6 +161,9 @@ def concatenate(arrays, axis=0): check_consistent_fill_value(arrays) + if axis is None: + arrays = [x.flatten() for x in arrays] + arrays = [x if isinstance(x, COO) else COO(x) for x in arrays] axis = normalize_axis(axis, arrays[0].ndim) assert all( @@ -590,7 +594,7 @@ def argwhere(a): Returns ------- - index_array: numpy.ndarray + index_array : numpy.ndarray See Also -------- @@ -609,6 +613,125 @@ def argwhere(a): return np.transpose(a.nonzero()) +def _arg_minmax_common( + x: SparseArray, + axis: Optional[int], + keepdims: bool, + comp_op: Callable, + np_arg_func: Callable, +): + """ """ + assert comp_op in (operator.lt, operator.gt) + assert np_arg_func in (np.argmax, np.argmin) + + if not isinstance(axis, (int, type(None))): + raise ValueError(f"axis must be int or None, but it's: {type(axis)}") + + if isinstance(axis, int) and axis >= x.ndim: + raise ValueError( + f"axis {axis} is out of bounds for array of dimension {x.ndim}" + ) + + if x.fill_value != 0.0: + raise ValueError( + f"Only 0.0 fill value is supported, but found: {x.fill_value}." + ) + + if np.any(comp_op(x.data, 0.0)): + raise ValueError( + f"None of the non-zero values can be {comp_op.__name__} the fill value." + ) + + # fast path + if axis is None or x.ndim == 1: + x_flat = x.reshape(-1) + result = x_flat.coords[0, np_arg_func(x_flat.data)] + return np.array(result).reshape([1] * x.ndim) if keepdims else result + + # search for min/max value & index for each retained axis + minmax_indexes = {} + minmax_values = {} + + for idx, coord in enumerate(x.coords.T): + coord = list(coord) + axis_index = coord[axis] + coord[axis] = 0 + coord = tuple(coord) + if not coord in minmax_values or comp_op(minmax_values[coord], x.data[idx]): + minmax_values[coord] = x.data[idx] + minmax_indexes[coord] = axis_index + + new_shape = list(x.shape) + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + result = np.zeros(shape=new_shape, dtype=np.intp) + for idx, minmax_index in minmax_indexes.items(): + result[idx] = minmax_index + + return result if keepdims else result.squeeze() + + +def argmax(x, /, *, axis=None, keepdims=False): + """ + Returns the indices of the maximum values along a specified axis. + When the maximum value occurs multiple times, only the indices + corresponding to the first occurrence are returned. + Parameters + ---------- + x : SparseArray + Input array. The fill value must be ``0.0`` and all non-zero values + must be greater than ``0.0``. + axis : int, optional + Axis along which to search. If ``None``, the function must return + the index of the maximum value of the flattened array. Default: ``None``. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) must be included in the result + as singleton dimensions, and, accordingly, the result must be compatible + with the input array. Otherwise, if ``False``, the reduced axes (dimensions) + must not be included in the result. Default: ``False``. + Returns + ------- + out : numpy.ndarray + If ``axis`` is ``None``, a zero-dimensional array containing the index of + the first occurrence of the maximum value. Otherwise, a non-zero-dimensional + array containing the indices of the maximum values. + """ + return _arg_minmax_common( + x, axis=axis, keepdims=keepdims, comp_op=operator.lt, np_arg_func=np.argmax + ) + + +def argmin(x, /, *, axis=None, keepdims=False): + """ + Returns the indices of the minimum values along a specified axis. + When the minimum value occurs multiple times, only the indices + corresponding to the first occurrence are returned. + Parameters + ---------- + x : SparseArray + Input array. The fill value must be ``0.0`` and all non-zero values + must be less than ``0.0``. + axis : int, optional + Axis along which to search. If ``None``, the function must return + the index of the minimum value of the flattened array. Default: ``None``. + keepdims : bool, optional + If ``True``, the reduced axes (dimensions) must be included in the result + as singleton dimensions, and, accordingly, the result must be compatible + with the input array. Otherwise, if ``False``, the reduced axes (dimensions) + must not be included in the result. Default: ``False``. + Returns + ------- + out : numpy.ndarray + If ``axis`` is ``None``, a zero-dimensional array containing the index of + the first occurrence of the minimum value. Otherwise, a non-zero-dimensional + array containing the indices of the minimum values. + """ + return _arg_minmax_common( + x, axis=axis, keepdims=keepdims, comp_op=operator.gt, np_arg_func=np.argmin + ) + + def _replace_nan(array, value): """ Replaces ``NaN``s in ``array`` with ``value``. diff --git a/sparse/_coo/core.py b/sparse/_coo/core.py index 1579c011..eff98bd7 100644 --- a/sparse/_coo/core.py +++ b/sparse/_coo/core.py @@ -1485,6 +1485,36 @@ def asformat(self, format, **kwargs): return self.asformat("gcxs", **kwargs).asformat(format, **kwargs) + def isinf(self): + """ + Tests each element ``x_i`` of the array to determine if equal to positive or negative infinity. + """ + new_fill_value = True if np.isinf(self.fill_value) else False + new_data = np.isinf(self.data) + + return COO( + self.coords, + new_data, + shape=self.shape, + fill_value=new_fill_value, + prune=True, + ) + + def isnan(self): + """ + Tests each element ``x_i`` of the array to determine whether the element is ``NaN``. + """ + new_fill_value = True if np.isnan(self.fill_value) else False + new_data = np.isnan(self.data) + + return COO( + self.coords, + new_data, + shape=self.shape, + fill_value=new_fill_value, + prune=True, + ) + def as_coo(x, shape=None, fill_value=None, idx_dtype=None): """ @@ -1528,7 +1558,7 @@ def as_coo(x, shape=None, fill_value=None, idx_dtype=None): if isinstance(x, SparseArray): return x.asformat("coo") - if isinstance(x, np.ndarray): + if isinstance(x, np.ndarray) or np.isscalar(x): return COO.from_numpy(x, fill_value=fill_value, idx_dtype=idx_dtype) if isinstance(x, scipy.sparse.spmatrix): diff --git a/sparse/_sparse_array.py b/sparse/_sparse_array.py index deb90b1b..da2a100d 100644 --- a/sparse/_sparse_array.py +++ b/sparse/_sparse_array.py @@ -986,3 +986,36 @@ def conj(self): numpy.conj : NumPy equivalent function. """ return np.conj(self) + + def __bool__(self): + """ """ + return self._to_scalar(bool) + + def __float__(self): + """ """ + return self._to_scalar(float) + + def __int__(self): + """ """ + return self._to_scalar(int) + + def __index__(self): + """ """ + return self._to_scalar(int) + + def __complex__(self): + """ """ + return self._to_scalar(complex) + + def _to_scalar(self, builtin): + if self.size != 1 or self.shape != (): + raise ValueError(f"{builtin} can be computed for one-element arrays only.") + return builtin(self.todense().flatten()[0]) + + @abstractmethod + def isinf(self): + """ """ + + @abstractmethod + def isnan(self): + """ """ diff --git a/sparse/tests/test_array_function.py b/sparse/tests/test_array_function.py index 87c0e89b..2bbbed28 100644 --- a/sparse/tests/test_array_function.py +++ b/sparse/tests/test_array_function.py @@ -3,6 +3,7 @@ from sparse._utils import assert_eq import numpy as np import pytest +import scipy if not NEP18_ENABLED: @@ -98,3 +99,44 @@ def test_zeros_like_order(): def test_format(format): s = sparse.random((5, 5), density=0.2, format=format) assert s.format == format + + +class TestAsarray: + np_eye = np.eye(5) + + @pytest.mark.parametrize( + "input", + [ + np_eye, + scipy.sparse.csr_matrix(np_eye), + scipy.sparse.csc_matrix(np_eye), + 4, + np.array(5), + np.arange(12).reshape((2, 3, 2)), + sparse.COO.from_numpy(np_eye), + sparse.GCXS.from_numpy(np_eye), + sparse.DOK.from_numpy(np_eye), + ], + ) + @pytest.mark.parametrize("dtype", [np.int64, np.float64, np.complex128]) + @pytest.mark.parametrize("format", ["dok", "gcxs", "coo"]) + def test_asarray(self, input, dtype, format): + s = sparse.asarray(input, dtype=dtype, format=format) + + actual = s.todense() if hasattr(s, "todense") else s + expected = input.todense() if hasattr(input, "todense") else np.asarray(input) + + np.testing.assert_equal(actual, expected) + + def test_asarray_special_cases(self): + with pytest.raises(ValueError, match="Taco not yet supported."): + sparse.asarray(self.np_eye, backend="taco") + + with pytest.raises(ValueError, match=" not supported."): + sparse.asarray([1, 2, 3]) + + with pytest.raises(ValueError, match="any backend not supported."): + sparse.asarray(self.np_eye, backend="any") + + with pytest.raises(ValueError, match="any format not supported."): + sparse.asarray(self.np_eye, format="any") diff --git a/sparse/tests/test_coo.py b/sparse/tests/test_coo.py index 7209982c..ebe8e10e 100644 --- a/sparse/tests/test_coo.py +++ b/sparse/tests/test_coo.py @@ -1690,3 +1690,56 @@ def test_array_as_shape(): data = [10, 20, 30, 40, 50] s = sparse.COO(coords, data, shape=np.array((5, 5))) + + +@pytest.mark.parametrize( + "arr", + [np.array([[0, 3, 0], [1, 2, 0]]), np.array([[[0, 0], [1, 0]], [[5, 0], [0, 3]]])], +) +@pytest.mark.parametrize("axis", [None, 0, 1]) +@pytest.mark.parametrize("keepdims", [True, False]) +@pytest.mark.parametrize( + "mode", + [(sparse.argmax, np.argmax, lambda x: x), (sparse.argmin, np.argmin, lambda x: -x)], +) +def test_argmax_argmin(arr, axis, keepdims, mode): + sparse_func, np_func, transform = mode + arr = transform(arr) + + s_arr = sparse.COO.from_numpy(arr) + + result = sparse_func(s_arr, axis=axis, keepdims=keepdims) + expected = np_func(arr, axis=axis, keepdims=keepdims) + + np.testing.assert_equal(result, expected) + + +@pytest.mark.parametrize("func", [np.argmax, np.argmin]) +def test_argmax_argmin_value_constraint(func): + s = sparse.COO.from_numpy(np.full((2, 2), 2), fill_value=2) + + with pytest.raises( + ValueError, match="Only 0.0 fill value is supported, but found: 2." + ): + func(s) + + arr = np.array([[-2, 0], [0, 2]]) + s = sparse.COO.from_numpy(arr) + + with pytest.raises( + ValueError, match=r"None of the non-zero values can be (lt|gt) the fill value" + ): + func(s, axis=0) + + +@pytest.mark.parametrize("config", [(np.inf, "isinf"), (np.nan, "isnan")]) +def test_isinf_isnan(config): + obj, func_name = config + + arr = np.array([[1, 1, obj], [-obj, 1, 1]]) + s = sparse.COO.from_numpy(arr) + + result = getattr(s, func_name)().todense() + expected = getattr(np, func_name)(arr) + + np.testing.assert_equal(result, expected) diff --git a/sparse/tests/test_coo_numba.py b/sparse/tests/test_coo_numba.py index 84bb088f..4c71df28 100644 --- a/sparse/tests/test_coo_numba.py +++ b/sparse/tests/test_coo_numba.py @@ -22,7 +22,7 @@ def get_it(): def assert_coo_equal(c1, c2): assert c1.shape == c2.shape - assert c1 == c2 + assert sparse.all(c1 == c2) assert c1.data.dtype == c2.data.dtype assert c1.fill_value == c2.fill_value