diff --git a/tests/python/contrib/test_hexagon/pytest_util.py b/tests/python/contrib/test_hexagon/pytest_util.py index fb28ebeb6823..0264fc1bc281 100644 --- a/tests/python/contrib/test_hexagon/pytest_util.py +++ b/tests/python/contrib/test_hexagon/pytest_util.py @@ -59,6 +59,22 @@ def get_single_param_chunk(param_val, param_desc: Optional[str]): val_str = "F" need_prefix_separator = True + elif type(param_val) == TensorContentConstant: + val_str = f"const({param_val.elem_value})" + need_prefix_separator = True + + elif type(param_val) == TensorContentDtypeMin: + val_str = "min" + need_prefix_separator = True + + elif type(param_val) == TensorContentDtypeMax: + val_str = "max" + need_prefix_separator = True + + elif type(param_val) == TensorContentRandom: + val_str = "random" + need_prefix_separator = True + else: val_str = str(param_val) need_prefix_separator = True @@ -91,3 +107,53 @@ def get_multitest_ids( get_test_id(*single_test_param_list, test_param_descs=param_descs) for single_test_param_list in multitest_params_list ] + + +def get_numpy_dtype_info(np_dtype_name: str) -> Union[np.finfo, np.iinfo]: + """ + Return an appropriate 'np.iinfo' or 'np.finfo' object corresponding to + the specified dtype. + """ + np_dtype = np.dtype(np_dtype_name) + kind = np_dtype.kind + + if kind == "f": + return np.finfo(np_dtype_name) + elif kind == "i": + return np.iinfo(np_dtype_name) + else: + raise TypeError( + f"np_dtype_name ({np_dtype_name}) must indicate some floating-point or integral data type" + ) + + +TensorContentConstant = collections.namedtuple("TensorContentConstant", ["elem_value"]) +TensorContentRandom = collections.namedtuple("TensorContentRandom", []) +TensorContentDtypeMin = collections.namedtuple("TensorContentDtypeMin", []) +TensorContentDtypeMax = collections.namedtuple("TensorContentDtypeMax", []) + + +def create_populated_numpy_ndarray( + input_shape: Union[list, tuple], dtype: str, input_tensor_populator +) -> np.ndarray: + """ + Create a numpy tensor with the specified shape, dtype, and content. + """ + itp = input_tensor_populator # just for brevity + + if type(itp) == TensorContentConstant: + return np.full(tuple(input_shape), itp.elem_value, dtype=dtype) + + elif type(itp) == TensorContentDtypeMin: + info = get_numpy_dtype_info(dtype) + return np.full(tuple(input_shape), info.min, dtype=dtype) + + elif type(itp) == TensorContentDtypeMax: + info = get_numpy_dtype_info(dtype) + return np.full(tuple(input_shape), info.max, dtype=dtype) + + elif type(itp) == TensorContentRandom: + return np.random.random(input_shape).astype(dtype) + + else: + raise ValueError(f"Unexpected input_tensor_populator type: {type(itp)}") diff --git a/tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py b/tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py index 34e9b751b9c6..af60e0f2e084 100644 --- a/tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py +++ b/tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py @@ -18,7 +18,6 @@ import pytest import numpy as np from typing import * -import collections from tvm import te import tvm.testing @@ -27,7 +26,14 @@ from tvm.contrib.hexagon.session import Session import tvm.topi.hexagon.slice_ops as sl from ..infrastructure import allocate_hexagon_array, transform_numpy -from ..pytest_util import get_multitest_ids +from ..pytest_util import ( + get_multitest_ids, + create_populated_numpy_ndarray, + TensorContentConstant, + TensorContentRandom, + TensorContentDtypeMin, + TensorContentDtypeMax, +) input_layout = tvm.testing.parameter( @@ -36,8 +42,8 @@ @tvm.testing.fixture -def input_np(input_shape, dtype): - return np.random.random(input_shape).astype(dtype) +def input_np(input_shape, dtype: str, input_tensor_populator): + return create_populated_numpy_ndarray(input_shape, dtype, input_tensor_populator) @tvm.testing.fixture @@ -61,6 +67,7 @@ class TestAvgPool2dSlice: "cnt_padded", # count_include_pad "out_layout", # output_layout None, # dtype + None, # input_tensor_populator ] _multitest_params = [ @@ -74,6 +81,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), ( [1, 16, 16, 32], @@ -85,6 +93,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), ( [1, 8, 8, 32], @@ -96,6 +105,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), # Test non-one stride and dilation ( @@ -108,6 +118,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), ( [1, 8, 8, 32], @@ -119,6 +130,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), ( [1, 8, 8, 32], @@ -130,6 +142,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), # Test non-zero padding ( @@ -142,6 +155,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), ( [1, 8, 8, 32], @@ -153,6 +167,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), ( [1, 8, 8, 32], @@ -164,6 +179,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), ( [1, 8, 8, 32], @@ -175,6 +191,7 @@ class TestAvgPool2dSlice: True, "nhwc-8h2w32c2w-2d", "float16", + TensorContentRandom(), ), # Test n11c-1024c-2d layout which will require input and output to have different layout ( @@ -187,6 +204,7 @@ class TestAvgPool2dSlice: True, "n11c-1024c-2d", "float16", + TensorContentRandom(), ), ( [1, 1, 1, 2048], @@ -198,6 +216,7 @@ class TestAvgPool2dSlice: True, "n11c-1024c-2d", "float16", + TensorContentRandom(), ), ( [1, 1, 1, 2048], @@ -209,6 +228,7 @@ class TestAvgPool2dSlice: True, "n11c-1024c-2d", "float16", + TensorContentRandom(), ), ( [1, 1, 1, 2048], @@ -220,6 +240,7 @@ class TestAvgPool2dSlice: True, "n11c-1024c-2d", "float16", + TensorContentRandom(), ), ] @@ -236,6 +257,7 @@ class TestAvgPool2dSlice: count_include_pad, output_layout, dtype, + input_tensor_populator, ) = tvm.testing.parameters(*_multitest_params, ids=_param_ids) @tvm.testing.fixture