diff --git a/pyproject.toml b/pyproject.toml index 4ee9599a6bd181..e3a29fbadcba00 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ select = [ # NumPy-specific rules "NPY001", - # "NPY003", + "NPY003", # Bugbear "B002", diff --git a/python/paddle/hapi/dynamic_flops.py b/python/paddle/hapi/dynamic_flops.py index c721bc26ed7661..609f1c917a5ed8 100644 --- a/python/paddle/hapi/dynamic_flops.py +++ b/python/paddle/hapi/dynamic_flops.py @@ -128,7 +128,7 @@ def flops(net, input_size, custom_ops=None, print_detail=False): def count_convNd(m, x, y): x = x[0] - kernel_ops = np.product(m.weight.shape[2:]) + kernel_ops = np.prod(m.weight.shape[2:]) bias_ops = 1 if m.bias is not None else 0 total_ops = int(y.numel()) * ( x.shape[1] / m._groups * kernel_ops + bias_ops @@ -167,7 +167,7 @@ def count_avgpool(m, x, y): def count_adap_avgpool(m, x, y): kernel = np.array(x[0].shape[2:]) // np.array(y.shape[2:]) - total_add = np.product(kernel) + total_add = np.prod(kernel) total_div = 1 kernel_ops = total_add + total_div num_elements = y.numel() diff --git a/python/paddle/hapi/static_flops.py b/python/paddle/hapi/static_flops.py index 21a4359743685a..5b67049fc4e081 100644 --- a/python/paddle/hapi/static_flops.py +++ b/python/paddle/hapi/static_flops.py @@ -124,21 +124,21 @@ def var(self, name): def count_convNd(op): filter_shape = op.inputs("Filter")[0].shape() - filter_ops = np.product(filter_shape[1:]) + filter_ops = np.prod(filter_shape[1:]) bias_ops = 1 if len(op.inputs("Bias")) > 0 else 0 - output_numel = np.product(op.outputs("Output")[0].shape()[1:]) + output_numel = np.prod(op.outputs("Output")[0].shape()[1:]) total_ops = output_numel * (filter_ops + bias_ops) total_ops = abs(total_ops) return total_ops def count_leaky_relu(op): - total_ops = np.product(op.outputs("Output")[0].shape()[1:]) + total_ops = np.prod(op.outputs("Output")[0].shape()[1:]) return total_ops def count_bn(op): - output_numel = np.product(op.outputs("Y")[0].shape()[1:]) + output_numel = np.prod(op.outputs("Y")[0].shape()[1:]) total_ops = 2 * output_numel total_ops = abs(total_ops) return total_ops @@ -146,7 +146,7 @@ def count_bn(op): def count_linear(op): total_mul = op.inputs("Y")[0].shape()[0] - numel = np.product(op.outputs("Out")[0].shape()[1:]) + numel = np.prod(op.outputs("Out")[0].shape()[1:]) total_ops = total_mul * numel total_ops = abs(total_ops) return total_ops @@ -156,10 +156,10 @@ def count_pool2d(op): input_shape = op.inputs("X")[0].shape() output_shape = op.outputs('Out')[0].shape() kernel = np.array(input_shape[2:]) // np.array(output_shape[2:]) - total_add = np.product(kernel) + total_add = np.prod(kernel) total_div = 1 kernel_ops = total_add + total_div - num_elements = np.product(output_shape[1:]) + num_elements = np.prod(output_shape[1:]) total_ops = kernel_ops * num_elements total_ops = abs(total_ops) return total_ops @@ -167,7 +167,7 @@ def count_pool2d(op): def count_element_op(op): input_shape = op.inputs("X")[0].shape() - total_ops = np.product(input_shape[1:]) + total_ops = np.prod(input_shape[1:]) total_ops = abs(total_ops) return total_ops diff --git a/test/autograd/utils.py b/test/autograd/utils.py index f077f520deff62..bfa311ad4104ac 100644 --- a/test/autograd/utils.py +++ b/test/autograd/utils.py @@ -26,7 +26,7 @@ # Finite Difference Utils ########################################################## def _product(t): - return int(np.product(t)) + return int(np.prod(t)) def _get_item(t, idx): diff --git a/test/dygraph_to_static/test_cast.py b/test/dygraph_to_static/test_cast.py index 23e31f2b4cc99c..156d25d7471373 100644 --- a/test/dygraph_to_static/test_cast.py +++ b/test/dygraph_to_static/test_cast.py @@ -74,7 +74,7 @@ def prepare(self): self.input_shape = (16, 32) self.input_dtype = 'float32' self.input = ( - np.random.binomial(4, 0.3, size=np.product(self.input_shape)) + np.random.binomial(4, 0.3, size=np.prod(self.input_shape)) .reshape(self.input_shape) .astype(self.input_dtype) ) @@ -114,7 +114,7 @@ def prepare(self): self.input_shape = (1,) self.input_dtype = 'float32' self.input = ( - np.random.normal(loc=6, scale=10, size=np.product(self.input_shape)) + np.random.normal(loc=6, scale=10, size=np.prod(self.input_shape)) .reshape(self.input_shape) .astype(self.input_dtype) ) @@ -129,7 +129,7 @@ def prepare(self): self.input_shape = (8, 16) self.input_dtype = 'bool' self.input = ( - np.random.binomial(2, 0.5, size=np.product(self.input_shape)) + np.random.binomial(2, 0.5, size=np.prod(self.input_shape)) .reshape(self.input_shape) .astype(self.input_dtype) ) @@ -144,7 +144,7 @@ def prepare(self): self.input_shape = (8, 32) self.input_dtype = 'float32' self.input = ( - np.random.normal(loc=6, scale=10, size=np.product(self.input_shape)) + np.random.normal(loc=6, scale=10, size=np.prod(self.input_shape)) .reshape(self.input_shape) .astype(self.input_dtype) ) diff --git a/test/legacy_test/gradient_checker.py b/test/legacy_test/gradient_checker.py index 085100540d11f8..d146c22f08cf31 100644 --- a/test/legacy_test/gradient_checker.py +++ b/test/legacy_test/gradient_checker.py @@ -25,7 +25,7 @@ def _product(t): - return int(np.product(t)) + return int(np.prod(t)) def dtype_to_np_dtype(dtype): diff --git a/test/legacy_test/test_fake_quantize_op.py b/test/legacy_test/test_fake_quantize_op.py index 8fdfd5f142c781..f3512d5c3ceba8 100644 --- a/test/legacy_test/test_fake_quantize_op.py +++ b/test/legacy_test/test_fake_quantize_op.py @@ -312,9 +312,7 @@ def _fake_quantize_moving_average_abs_max( self.dtype = dtype self.check_output(check_dygraph=False) if with_gradient: - gradient = [ - np.ones(input_data.shape) / np.product(input_data.shape) - ] + gradient = [np.ones(input_data.shape) / np.prod(input_data.shape)] self.check_grad(['X'], 'Out', user_defined_grads=gradient) def test_fake_quantize_moving_average_abs_max(self): @@ -367,7 +365,7 @@ def _fake_quantize_dequantize_abs_max( } self.dtype = dtype self.check_output(check_dygraph=False) - gradient = [np.ones(input_data.shape) / np.product(input_data.shape)] + gradient = [np.ones(input_data.shape) / np.prod(input_data.shape)] self.check_grad(['X'], 'Out', user_defined_grads=gradient) def test_fake_quantize_dequantize_abs_max(self): @@ -424,7 +422,7 @@ def _fake_channel_wise_quantize_dequantize_abs_max( self.dtype = dtype self.attrs['quant_axis'] = quant_axis self.check_output(check_dygraph=False) - gradient = [np.ones(input_data.shape) / np.product(input_data.shape)] + gradient = [np.ones(input_data.shape) / np.prod(input_data.shape)] self.check_grad(['X'], 'Out', user_defined_grads=gradient) def test_channel_wise_fake_quant_dequant_abs_max(self): diff --git a/test/legacy_test/test_isfinite_v2_op.py b/test/legacy_test/test_isfinite_v2_op.py index 5c9faa507897da..adde698e3d4b6c 100644 --- a/test/legacy_test/test_isfinite_v2_op.py +++ b/test/legacy_test/test_isfinite_v2_op.py @@ -68,7 +68,7 @@ def np_data_generator( for i, v in enumerate(sv_list): x_np[i] = v ori_shape = x_np.shape - x_np = x_np.reshape((np.product(ori_shape),)) + x_np = x_np.reshape((np.prod(ori_shape),)) np.random.shuffle(x_np) x_np = x_np.reshape(ori_shape) result_np = getattr(np, op_str)(x_np) diff --git a/test/legacy_test/test_lu_op.py b/test/legacy_test/test_lu_op.py index 7cd92444992909..b875be084fffc2 100644 --- a/test/legacy_test/test_lu_op.py +++ b/test/legacy_test/test_lu_op.py @@ -32,7 +32,7 @@ def scipy_lu(A, pivot): return scipy.linalg.lu(A, permute_l=not pivot) else: preshape = shape[:-2] - batchsize = np.product(shape) // (shape[-2] * shape[-1]) + batchsize = np.prod(shape) // (shape[-2] * shape[-1]) PP = [] PL = [] PU = [] @@ -57,7 +57,7 @@ def Pmat_to_perm(Pmat_org, cut): shape = Pmat.shape rows = shape[-2] cols = shape[-1] - batchsize = max(1, np.product(shape[:-2])) + batchsize = max(1, np.prod(shape[:-2])) P = Pmat.reshape(batchsize, rows, cols) permmat = [] for b in range(batchsize): @@ -85,7 +85,7 @@ def Pmat_to_perm(Pmat_org, cut): def perm_to_Pmat(perm, dim): pshape = perm.shape - bs = int(np.product(perm.shape[:-1]).item()) + bs = int(np.prod(perm.shape[:-1]).item()) perm = perm.reshape((bs, pshape[-1])) oneslst = [] for i in range(bs): diff --git a/test/legacy_test/test_lu_unpack_op.py b/test/legacy_test/test_lu_unpack_op.py index ad5f809614c987..57cabc4872d98f 100644 --- a/test/legacy_test/test_lu_unpack_op.py +++ b/test/legacy_test/test_lu_unpack_op.py @@ -32,7 +32,7 @@ def scipy_lu_unpack(A): return scipy.linalg.lu(A) else: preshape = shape[:-2] - batchsize = np.product(shape) // (shape[-2] * shape[-1]) + batchsize = np.prod(shape) // (shape[-2] * shape[-1]) Plst = [] Llst = [] Ulst = [] @@ -62,7 +62,7 @@ def Pmat_to_perm(Pmat_org, cut): shape = Pmat.shape rows = shape[-2] cols = shape[-1] - batchsize = max(1, np.product(shape[:-2])) + batchsize = max(1, np.prod(shape[:-2])) P = Pmat.reshape(batchsize, rows, cols) permmat = [] for b in range(batchsize): @@ -91,7 +91,7 @@ def Pmat_to_perm(Pmat_org, cut): def perm_to_Pmat(perm, dim): pshape = perm.shape - bs = int(np.product(perm.shape[:-1]).item()) + bs = int(np.prod(perm.shape[:-1]).item()) perm = perm.reshape((bs, pshape[-1])) oneslst = [] for i in range(bs): diff --git a/test/legacy_test/test_nn_functional_hot_op.py b/test/legacy_test/test_nn_functional_hot_op.py index 1144c3062272ad..89a140a1f031ce 100644 --- a/test/legacy_test/test_nn_functional_hot_op.py +++ b/test/legacy_test/test_nn_functional_hot_op.py @@ -33,9 +33,9 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') + out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32') - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} @@ -55,11 +55,11 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype( 'float32' ) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, 0, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} @@ -80,9 +80,9 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') + out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32') - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} @@ -102,11 +102,11 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype( 'float32' ) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, 0, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} diff --git a/test/legacy_test/test_one_hot_v2_op.py b/test/legacy_test/test_one_hot_v2_op.py index 40423dcb20f898..2116b2d494dc2f 100644 --- a/test/legacy_test/test_one_hot_v2_op.py +++ b/test/legacy_test/test_one_hot_v2_op.py @@ -38,9 +38,9 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') + out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32') - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} @@ -62,11 +62,11 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype( 'float32' ) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, 0, x[i]] = 1.0 self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} @@ -88,9 +88,9 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') + out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32') - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} @@ -112,11 +112,11 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype( 'float32' ) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, 0, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} diff --git a/test/legacy_test/test_overlap_add_op.py b/test/legacy_test/test_overlap_add_op.py index 82c902dfdcaf50..7534e42d5aeff2 100644 --- a/test/legacy_test/test_overlap_add_op.py +++ b/test/legacy_test/test_overlap_add_op.py @@ -46,15 +46,15 @@ def overlap_add(x, hop_length, axis=-1): reshape_output = True if axis == 0: target_shape = [seq_length] + list(x.shape[2:]) - x = x.reshape(n_frames, frame_length, np.product(x.shape[2:])) + x = x.reshape(n_frames, frame_length, np.prod(x.shape[2:])) else: target_shape = list(x.shape[:-2]) + [seq_length] - x = x.reshape(np.product(x.shape[:-2]), frame_length, n_frames) + x = x.reshape(np.prod(x.shape[:-2]), frame_length, n_frames) if axis == 0: x = x.transpose((2, 1, 0)) - y = np.zeros(shape=[np.product(x.shape[:-2]), seq_length], dtype=x.dtype) + y = np.zeros(shape=[np.prod(x.shape[:-2]), seq_length], dtype=x.dtype) for i in range(x.shape[0]): for frame in range(x.shape[-1]): sample = frame * hop_length diff --git a/test/legacy_test/test_signal.py b/test/legacy_test/test_signal.py index 464ad427210163..013ea22fe6f51a 100644 --- a/test/legacy_test/test_signal.py +++ b/test/legacy_test/test_signal.py @@ -541,15 +541,15 @@ def overlap_add_for_api_test(x, hop_length, axis=-1): reshape_output = True if axis == 0: target_shape = [seq_length] + list(x.shape[2:]) - x = x.reshape(n_frames, frame_length, np.product(x.shape[2:])) + x = x.reshape(n_frames, frame_length, np.prod(x.shape[2:])) else: target_shape = list(x.shape[:-2]) + [seq_length] - x = x.reshape(np.product(x.shape[:-2]), frame_length, n_frames) + x = x.reshape(np.prod(x.shape[:-2]), frame_length, n_frames) if axis == 0: x = x.transpose((2, 1, 0)) - y = np.zeros(shape=[np.product(x.shape[:-2]), seq_length], dtype=x.dtype) + y = np.zeros(shape=[np.prod(x.shape[:-2]), seq_length], dtype=x.dtype) for i in range(x.shape[0]): for frame in range(x.shape[-1]): sample = frame * hop_length diff --git a/test/legacy_test/test_tensor.py b/test/legacy_test/test_tensor.py index bb30cc98d8fb3d..7adeaed67f0fce 100644 --- a/test/legacy_test/test_tensor.py +++ b/test/legacy_test/test_tensor.py @@ -175,7 +175,7 @@ def test_int_lod_tensor(self): lod_tensor.set_recursive_sequence_lengths([[2, 2]]) lod_v = np.array(lod_tensor) - self.assertTrue(np.alltrue(array == lod_v)) + self.assertTrue(np.all(array == lod_v)) lod = lod_tensor.recursive_sequence_lengths() self.assertEqual(2, lod[0][0]) diff --git a/test/legacy_test/test_var_base.py b/test/legacy_test/test_var_base.py index 026675e45b5189..748ac4ca608ab8 100644 --- a/test/legacy_test/test_var_base.py +++ b/test/legacy_test/test_var_base.py @@ -1574,7 +1574,7 @@ def test_numel_normal(self): np_x = np.random.random((3, 8, 8)) x = paddle.to_tensor(np_x, dtype="float64") x_actual_numel = x._numel() - x_expected_numel = np.product((3, 8, 8)) + x_expected_numel = np.prod((3, 8, 8)) self.assertEqual(x_actual_numel, x_expected_numel) def test_numel_without_holder(self): diff --git a/test/quantization/imperative_test_utils.py b/test/quantization/imperative_test_utils.py index 36e931091543f1..12244f1a8bc403 100644 --- a/test/quantization/imperative_test_utils.py +++ b/test/quantization/imperative_test_utils.py @@ -47,7 +47,7 @@ def fix_model_dict(model): value = np.zeros_like(p_value).astype('float32') else: value = ( - np.random.normal(loc=0.0, scale=0.01, size=np.product(p_shape)) + np.random.normal(loc=0.0, scale=0.01, size=np.prod(p_shape)) .reshape(p_shape) .astype('float32') ) diff --git a/test/quantization/test_imperative_qat_user_defined.py b/test/quantization/test_imperative_qat_user_defined.py index bce0dae5e91dd7..300845e09bbf58 100644 --- a/test/quantization/test_imperative_qat_user_defined.py +++ b/test/quantization/test_imperative_qat_user_defined.py @@ -174,9 +174,7 @@ def func_quant_aware_training(self): value = np.zeros_like(p_value).astype('float32') else: value = ( - np.random.normal( - loc=0.0, scale=0.01, size=np.product(p_shape) - ) + np.random.normal(loc=0.0, scale=0.01, size=np.prod(p_shape)) .reshape(p_shape) .astype('float32') ) diff --git a/test/xpu/test_one_hot_op_xpu.py b/test/xpu/test_one_hot_op_xpu.py index 322f5f29d2707e..9536a8202919b7 100644 --- a/test/xpu/test_one_hot_op_xpu.py +++ b/test/xpu/test_one_hot_op_xpu.py @@ -57,9 +57,9 @@ def set_data(self): ) self.out = np.zeros( - shape=(np.product(self.x.shape[:-1]), self.depth) + shape=(np.prod(self.x.shape[:-1]), self.depth) ).astype('float32') - for i in range(np.product(self.x.shape)): + for i in range(np.prod(self.x.shape)): self.out[i, self.x[i]] = 1.0 self.outputs = {'Out': (self.out, self.x_lod)} @@ -113,7 +113,7 @@ def set_data(self): ) self.out = np.zeros( - shape=(np.product(self.x.shape[:-1]), self.depth) + shape=(np.prod(self.x.shape[:-1]), self.depth) ).astype('float32') self.outputs = {'Out': (self.out, self.x_lod)} diff --git a/test/xpu/test_one_hot_v2_op_xpu.py b/test/xpu/test_one_hot_v2_op_xpu.py index f32c3960c6dfc8..e7d0ae440ad82b 100644 --- a/test/xpu/test_one_hot_v2_op_xpu.py +++ b/test/xpu/test_one_hot_v2_op_xpu.py @@ -49,11 +49,9 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), depth)).astype( - self.dtype - ) + out = np.zeros(shape=(np.prod(x.shape), depth)).astype(self.dtype) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} @@ -72,11 +70,11 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype( self.dtype ) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, 0, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} @@ -96,11 +94,9 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), depth)).astype( - self.dtype - ) + out = np.zeros(shape=(np.prod(x.shape), depth)).astype(self.dtype) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} @@ -116,11 +112,11 @@ def setUp(self): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype( self.dtype ) - for i in range(np.product(x.shape)): + for i in range(np.prod(x.shape)): out[i, 0, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} @@ -135,9 +131,7 @@ def setUp(self): x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), depth)).astype( - self.dtype - ) + out = np.zeros(shape=(np.prod(x.shape), depth)).astype(self.dtype) self.inputs = {'X': (x, x_lod)} self.attrs = {'depth': depth, 'allow_out_of_range': True}