Skip to content

Commit

Permalink
[CodeStyle][task 4] enable ruff NPY003 rule in python/paddle/base (P…
Browse files Browse the repository at this point in the history
  • Loading branch information
enkilee authored Sep 19, 2023
1 parent f5b99e5 commit 48d3630
Show file tree
Hide file tree
Showing 20 changed files with 65 additions and 75 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ select = [

# NumPy-specific rules
"NPY001",
# "NPY003",
"NPY003",

# Bugbear
"B002",
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/hapi/dynamic_flops.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def flops(net, input_size, custom_ops=None, print_detail=False):

def count_convNd(m, x, y):
x = x[0]
kernel_ops = np.product(m.weight.shape[2:])
kernel_ops = np.prod(m.weight.shape[2:])
bias_ops = 1 if m.bias is not None else 0
total_ops = int(y.numel()) * (
x.shape[1] / m._groups * kernel_ops + bias_ops
Expand Down Expand Up @@ -167,7 +167,7 @@ def count_avgpool(m, x, y):

def count_adap_avgpool(m, x, y):
kernel = np.array(x[0].shape[2:]) // np.array(y.shape[2:])
total_add = np.product(kernel)
total_add = np.prod(kernel)
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
Expand Down
16 changes: 8 additions & 8 deletions python/paddle/hapi/static_flops.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,29 +124,29 @@ def var(self, name):

def count_convNd(op):
filter_shape = op.inputs("Filter")[0].shape()
filter_ops = np.product(filter_shape[1:])
filter_ops = np.prod(filter_shape[1:])
bias_ops = 1 if len(op.inputs("Bias")) > 0 else 0
output_numel = np.product(op.outputs("Output")[0].shape()[1:])
output_numel = np.prod(op.outputs("Output")[0].shape()[1:])
total_ops = output_numel * (filter_ops + bias_ops)
total_ops = abs(total_ops)
return total_ops


def count_leaky_relu(op):
total_ops = np.product(op.outputs("Output")[0].shape()[1:])
total_ops = np.prod(op.outputs("Output")[0].shape()[1:])
return total_ops


def count_bn(op):
output_numel = np.product(op.outputs("Y")[0].shape()[1:])
output_numel = np.prod(op.outputs("Y")[0].shape()[1:])
total_ops = 2 * output_numel
total_ops = abs(total_ops)
return total_ops


def count_linear(op):
total_mul = op.inputs("Y")[0].shape()[0]
numel = np.product(op.outputs("Out")[0].shape()[1:])
numel = np.prod(op.outputs("Out")[0].shape()[1:])
total_ops = total_mul * numel
total_ops = abs(total_ops)
return total_ops
Expand All @@ -156,18 +156,18 @@ def count_pool2d(op):
input_shape = op.inputs("X")[0].shape()
output_shape = op.outputs('Out')[0].shape()
kernel = np.array(input_shape[2:]) // np.array(output_shape[2:])
total_add = np.product(kernel)
total_add = np.prod(kernel)
total_div = 1
kernel_ops = total_add + total_div
num_elements = np.product(output_shape[1:])
num_elements = np.prod(output_shape[1:])
total_ops = kernel_ops * num_elements
total_ops = abs(total_ops)
return total_ops


def count_element_op(op):
input_shape = op.inputs("X")[0].shape()
total_ops = np.product(input_shape[1:])
total_ops = np.prod(input_shape[1:])
total_ops = abs(total_ops)
return total_ops

Expand Down
2 changes: 1 addition & 1 deletion test/autograd/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
# Finite Difference Utils
##########################################################
def _product(t):
return int(np.product(t))
return int(np.prod(t))


def _get_item(t, idx):
Expand Down
8 changes: 4 additions & 4 deletions test/dygraph_to_static/test_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def prepare(self):
self.input_shape = (16, 32)
self.input_dtype = 'float32'
self.input = (
np.random.binomial(4, 0.3, size=np.product(self.input_shape))
np.random.binomial(4, 0.3, size=np.prod(self.input_shape))
.reshape(self.input_shape)
.astype(self.input_dtype)
)
Expand Down Expand Up @@ -114,7 +114,7 @@ def prepare(self):
self.input_shape = (1,)
self.input_dtype = 'float32'
self.input = (
np.random.normal(loc=6, scale=10, size=np.product(self.input_shape))
np.random.normal(loc=6, scale=10, size=np.prod(self.input_shape))
.reshape(self.input_shape)
.astype(self.input_dtype)
)
Expand All @@ -129,7 +129,7 @@ def prepare(self):
self.input_shape = (8, 16)
self.input_dtype = 'bool'
self.input = (
np.random.binomial(2, 0.5, size=np.product(self.input_shape))
np.random.binomial(2, 0.5, size=np.prod(self.input_shape))
.reshape(self.input_shape)
.astype(self.input_dtype)
)
Expand All @@ -144,7 +144,7 @@ def prepare(self):
self.input_shape = (8, 32)
self.input_dtype = 'float32'
self.input = (
np.random.normal(loc=6, scale=10, size=np.product(self.input_shape))
np.random.normal(loc=6, scale=10, size=np.prod(self.input_shape))
.reshape(self.input_shape)
.astype(self.input_dtype)
)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/gradient_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@


def _product(t):
return int(np.product(t))
return int(np.prod(t))


def dtype_to_np_dtype(dtype):
Expand Down
8 changes: 3 additions & 5 deletions test/legacy_test/test_fake_quantize_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,9 +312,7 @@ def _fake_quantize_moving_average_abs_max(
self.dtype = dtype
self.check_output(check_dygraph=False)
if with_gradient:
gradient = [
np.ones(input_data.shape) / np.product(input_data.shape)
]
gradient = [np.ones(input_data.shape) / np.prod(input_data.shape)]
self.check_grad(['X'], 'Out', user_defined_grads=gradient)

def test_fake_quantize_moving_average_abs_max(self):
Expand Down Expand Up @@ -367,7 +365,7 @@ def _fake_quantize_dequantize_abs_max(
}
self.dtype = dtype
self.check_output(check_dygraph=False)
gradient = [np.ones(input_data.shape) / np.product(input_data.shape)]
gradient = [np.ones(input_data.shape) / np.prod(input_data.shape)]
self.check_grad(['X'], 'Out', user_defined_grads=gradient)

def test_fake_quantize_dequantize_abs_max(self):
Expand Down Expand Up @@ -424,7 +422,7 @@ def _fake_channel_wise_quantize_dequantize_abs_max(
self.dtype = dtype
self.attrs['quant_axis'] = quant_axis
self.check_output(check_dygraph=False)
gradient = [np.ones(input_data.shape) / np.product(input_data.shape)]
gradient = [np.ones(input_data.shape) / np.prod(input_data.shape)]
self.check_grad(['X'], 'Out', user_defined_grads=gradient)

def test_channel_wise_fake_quant_dequant_abs_max(self):
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_isfinite_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def np_data_generator(
for i, v in enumerate(sv_list):
x_np[i] = v
ori_shape = x_np.shape
x_np = x_np.reshape((np.product(ori_shape),))
x_np = x_np.reshape((np.prod(ori_shape),))
np.random.shuffle(x_np)
x_np = x_np.reshape(ori_shape)
result_np = getattr(np, op_str)(x_np)
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_lu_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def scipy_lu(A, pivot):
return scipy.linalg.lu(A, permute_l=not pivot)
else:
preshape = shape[:-2]
batchsize = np.product(shape) // (shape[-2] * shape[-1])
batchsize = np.prod(shape) // (shape[-2] * shape[-1])
PP = []
PL = []
PU = []
Expand All @@ -57,7 +57,7 @@ def Pmat_to_perm(Pmat_org, cut):
shape = Pmat.shape
rows = shape[-2]
cols = shape[-1]
batchsize = max(1, np.product(shape[:-2]))
batchsize = max(1, np.prod(shape[:-2]))
P = Pmat.reshape(batchsize, rows, cols)
permmat = []
for b in range(batchsize):
Expand Down Expand Up @@ -85,7 +85,7 @@ def Pmat_to_perm(Pmat_org, cut):

def perm_to_Pmat(perm, dim):
pshape = perm.shape
bs = int(np.product(perm.shape[:-1]).item())
bs = int(np.prod(perm.shape[:-1]).item())
perm = perm.reshape((bs, pshape[-1]))
oneslst = []
for i in range(bs):
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_lu_unpack_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def scipy_lu_unpack(A):
return scipy.linalg.lu(A)
else:
preshape = shape[:-2]
batchsize = np.product(shape) // (shape[-2] * shape[-1])
batchsize = np.prod(shape) // (shape[-2] * shape[-1])
Plst = []
Llst = []
Ulst = []
Expand Down Expand Up @@ -62,7 +62,7 @@ def Pmat_to_perm(Pmat_org, cut):
shape = Pmat.shape
rows = shape[-2]
cols = shape[-1]
batchsize = max(1, np.product(shape[:-2]))
batchsize = max(1, np.prod(shape[:-2]))
P = Pmat.reshape(batchsize, rows, cols)
permmat = []
for b in range(batchsize):
Expand Down Expand Up @@ -91,7 +91,7 @@ def Pmat_to_perm(Pmat_org, cut):

def perm_to_Pmat(perm, dim):
pshape = perm.shape
bs = int(np.product(perm.shape[:-1]).item())
bs = int(np.prod(perm.shape[:-1]).item())
perm = perm.reshape((bs, pshape[-1]))
oneslst = []
for i in range(bs):
Expand Down
16 changes: 8 additions & 8 deletions test/legacy_test/test_nn_functional_hot_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])

out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32')

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, x[i]] = 1.0

self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
Expand All @@ -55,11 +55,11 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])

out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype(
out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype(
'float32'
)

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, 0, x[i]] = 1.0

self.inputs = {'X': (x, x_lod)}
Expand All @@ -80,9 +80,9 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])

out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32')

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, x[i]] = 1.0

self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
Expand All @@ -102,11 +102,11 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])

out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype(
out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype(
'float32'
)

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, 0, x[i]] = 1.0

self.inputs = {'X': (x, x_lod)}
Expand Down
16 changes: 8 additions & 8 deletions test/legacy_test/test_one_hot_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])

out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32')

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, x[i]] = 1.0

self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
Expand All @@ -62,11 +62,11 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])

out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype(
out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype(
'float32'
)

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, 0, x[i]] = 1.0

self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
Expand All @@ -88,9 +88,9 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])

out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
out = np.zeros(shape=(np.prod(x.shape), depth)).astype('float32')

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, x[i]] = 1.0

self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
Expand All @@ -112,11 +112,11 @@ def setUp(self):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])

out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype(
out = np.zeros(shape=(np.prod(x.shape[:-1]), 1, depth)).astype(
'float32'
)

for i in range(np.product(x.shape)):
for i in range(np.prod(x.shape)):
out[i, 0, x[i]] = 1.0

self.inputs = {'X': (x, x_lod)}
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_overlap_add_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,15 @@ def overlap_add(x, hop_length, axis=-1):
reshape_output = True
if axis == 0:
target_shape = [seq_length] + list(x.shape[2:])
x = x.reshape(n_frames, frame_length, np.product(x.shape[2:]))
x = x.reshape(n_frames, frame_length, np.prod(x.shape[2:]))
else:
target_shape = list(x.shape[:-2]) + [seq_length]
x = x.reshape(np.product(x.shape[:-2]), frame_length, n_frames)
x = x.reshape(np.prod(x.shape[:-2]), frame_length, n_frames)

if axis == 0:
x = x.transpose((2, 1, 0))

y = np.zeros(shape=[np.product(x.shape[:-2]), seq_length], dtype=x.dtype)
y = np.zeros(shape=[np.prod(x.shape[:-2]), seq_length], dtype=x.dtype)
for i in range(x.shape[0]):
for frame in range(x.shape[-1]):
sample = frame * hop_length
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_signal.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,15 +541,15 @@ def overlap_add_for_api_test(x, hop_length, axis=-1):
reshape_output = True
if axis == 0:
target_shape = [seq_length] + list(x.shape[2:])
x = x.reshape(n_frames, frame_length, np.product(x.shape[2:]))
x = x.reshape(n_frames, frame_length, np.prod(x.shape[2:]))
else:
target_shape = list(x.shape[:-2]) + [seq_length]
x = x.reshape(np.product(x.shape[:-2]), frame_length, n_frames)
x = x.reshape(np.prod(x.shape[:-2]), frame_length, n_frames)

if axis == 0:
x = x.transpose((2, 1, 0))

y = np.zeros(shape=[np.product(x.shape[:-2]), seq_length], dtype=x.dtype)
y = np.zeros(shape=[np.prod(x.shape[:-2]), seq_length], dtype=x.dtype)
for i in range(x.shape[0]):
for frame in range(x.shape[-1]):
sample = frame * hop_length
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def test_int_lod_tensor(self):
lod_tensor.set_recursive_sequence_lengths([[2, 2]])

lod_v = np.array(lod_tensor)
self.assertTrue(np.alltrue(array == lod_v))
self.assertTrue(np.all(array == lod_v))

lod = lod_tensor.recursive_sequence_lengths()
self.assertEqual(2, lod[0][0])
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_var_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1574,7 +1574,7 @@ def test_numel_normal(self):
np_x = np.random.random((3, 8, 8))
x = paddle.to_tensor(np_x, dtype="float64")
x_actual_numel = x._numel()
x_expected_numel = np.product((3, 8, 8))
x_expected_numel = np.prod((3, 8, 8))
self.assertEqual(x_actual_numel, x_expected_numel)

def test_numel_without_holder(self):
Expand Down
2 changes: 1 addition & 1 deletion test/quantization/imperative_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def fix_model_dict(model):
value = np.zeros_like(p_value).astype('float32')
else:
value = (
np.random.normal(loc=0.0, scale=0.01, size=np.product(p_shape))
np.random.normal(loc=0.0, scale=0.01, size=np.prod(p_shape))
.reshape(p_shape)
.astype('float32')
)
Expand Down
Loading

0 comments on commit 48d3630

Please sign in to comment.