Skip to content

Commit

Permalink
Fix some typos (test_fill_any_eqaul, LeraningRate, etc.) (#61800)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Feb 20, 2024
1 parent 0964401 commit 796a71f
Show file tree
Hide file tree
Showing 11 changed files with 28 additions and 28 deletions.
2 changes: 1 addition & 1 deletion test/legacy_test/test_fill_any_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def test_fill_any_version(self):
var.fill_(0)
self.assertEqual(var.inplace_version, 3)

def test_fill_any_eqaul(self):
def test_fill_any_equal(self):
with paddle.base.dygraph.guard():
tensor = paddle.to_tensor(
np.random.random((20, 30)).astype(np.float32)
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_activation_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,7 @@ def __init__(self):
self.op_name = 'reciprocal'
self.use_dynamic_create_class = False

class XPUTestRecipocal(TestActivationOPBase):
class XPUTestReciprocal(TestActivationOPBase):
def set_case(self):
self.op_type = "reciprocal"
self.dtype = self.in_type
Expand Down
6 changes: 3 additions & 3 deletions test/xpu/test_collective_api_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,19 +606,19 @@ def convertbf16(origin):
result1 = []
result2 = []

def is_empyt_list(x):
def is_empty_list(x):
if isinstance(x, list) and len(x) == 0:
return True
return False

for i in range(tot_expert):
for arr in output1[i]:
if is_empyt_list(arr):
if is_empty_list(arr):
continue
result1.append(arr)
for i in range(tot_expert):
for arr in output2[i]:
if is_empyt_list(arr):
if is_empty_list(arr):
continue
result2.append(arr)

Expand Down
10 changes: 5 additions & 5 deletions test/xpu/test_collective_base_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,12 +339,12 @@ def check_with_place(
np.testing.assert_allclose(tr0_out, need_result1, rtol=0, atol=0)
np.testing.assert_allclose(tr1_out, need_result2, rtol=0, atol=0)
elif col_type == "reduce_slicegather":
slicesize = input1.shape[0] // 2
tmp10 = input1[0:slicesize]
tmp11 = input2[0:slicesize]
slice_size = input1.shape[0] // 2
tmp10 = input1[0:slice_size]
tmp11 = input2[0:slice_size]
need_result1 = np.concatenate((tmp10, tmp11), axis=1)
tmp20 = input1[slicesize:]
tmp21 = input2[slicesize:]
tmp20 = input1[slice_size:]
tmp21 = input2[slice_size:]
need_result2 = np.concatenate((tmp20, tmp21), axis=1)
np.testing.assert_allclose(tr0_out, need_result1)
np.testing.assert_allclose(tr1_out, need_result2)
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_fill_any_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def test_fill_any_version(self):
var.fill_(0)
self.assertEqual(var.inplace_version, 3)

def test_fill_any_eqaul(self):
def test_fill_any_equal(self):
with paddle.base.dygraph.guard():
tensor = paddle.to_tensor(
np.random.random((20, 30)).astype(np.float32)
Expand Down
12 changes: 6 additions & 6 deletions test/xpu/test_matmul_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,23 +297,23 @@ def dynamic_create_class(self):
for dims in xpu_support_dims_list:
dim_X = dims[0]
dim_Y = dims[1]
for transose_x in [True, False]:
for transose_y in [True, False]:
for transpose_x in [True, False]:
for transpose_y in [True, False]:
for batch in batch_size:
no_need_check_grad = False
if batch >= 5:
no_need_check_grad = True
class_name = 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}'.format(
dim_X, dim_Y, transose_x, transose_y, batch
dim_X, dim_Y, transpose_x, transpose_y, batch
)
shape_x, shape_y = generate_compatible_shapes(
dim_X, dim_Y, transose_x, transose_y, batch
dim_X, dim_Y, transpose_x, transpose_y, batch
)
attr_dict = {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': transose_x,
'transpose_Y': transose_y,
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'no_need_check_grad': no_need_check_grad,
'op_type': "matmul",
}
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_prior_box_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def init_test_output(self):
]
idx += 1

# clip the prior's coordidate such that it is within[0, 1]
# clip the prior's coordinate such that it is within[0, 1]
if self.clip:
out_boxes = np.clip(out_boxes, 0.0, 1.0)
# set the variance.
Expand Down
4 changes: 2 additions & 2 deletions test/xpu/test_refactor_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def setUp(self):
self.__class__.no_need_check_grad = True

self.init_dtype()
self.init_inputshape()
self.init_input_shape()
self.init_axis()
self.init_direction()

Expand Down Expand Up @@ -145,7 +145,7 @@ def get_output(self):
)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)

def init_inputshape(self):
def init_input_shape(self):
self.input_shape = (2, 2, 2, 3, 3)

def init_dtype(self):
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_sgd_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def check_with_place(self, place):
param_array = np.full((height, self.row_numel), 5.0).astype("float32")
param.set(param_array, place)

# create and initialize LeraningRate Variable
# create and initialize LearningRate Variable
lr = scope.var('LearningRate').get_tensor()
lr_array = np.full((1), 2.0).astype("float32")
lr.set(lr_array, place)
Expand Down
10 changes: 5 additions & 5 deletions test/xpu/test_sigmoid_cross_entropy_with_logits_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def init_dtype(self):
class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp
):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label"""
"""Test sigmoid_cross_entropy_with_logit_op with probabilistic label"""

def set_inputs(self):
batch_size = 64
Expand Down Expand Up @@ -119,7 +119,7 @@ def set_output(self):
class TestSigmoidCrossEntropyWithLogitsOp3(
TestSigmoidCrossEntropyWithLogitsOp
):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label"""
"""Test sigmoid_cross_entropy_with_logit_op with probabilistic label"""

def set_inputs(self):
batch_size = 64
Expand Down Expand Up @@ -148,7 +148,7 @@ def set_output(self):
class TestSigmoidCrossEntropyWithLogitsOp4(
TestSigmoidCrossEntropyWithLogitsOp
):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label"""
"""Test sigmoid_cross_entropy_with_logit_op with probabilistic label"""

def set_inputs(self):
batch_size = 64
Expand Down Expand Up @@ -185,7 +185,7 @@ def set_output(self):
class TestSigmoidCrossEntropyWithLogitsOp5(
TestSigmoidCrossEntropyWithLogitsOp
):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label"""
"""Test sigmoid_cross_entropy_with_logit_op with probabilistic label"""

def set_inputs(self):
batch_size = [10, 10]
Expand Down Expand Up @@ -278,7 +278,7 @@ def set_output(self):
class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp
):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label"""
"""Test sigmoid_cross_entropy_with_logit_op with probabilistic label"""

def set_inputs(self):
batch_size = [10, 10]
Expand Down
4 changes: 2 additions & 2 deletions test/xpu/test_warpctc_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def forward_a_sequence(self, softmax_a_sequence, labels_a_sequence):
required_times = labels_a_sequence.shape[0]
old_label = -1
for i in range(labels_a_sequence.shape[0]):
# two contingous labels with the same value
# two contiguous labels with the same value
if labels_a_sequence[i, 0] == old_label:
required_times = required_times + 1
old_label = labels_a_sequence[i, 0]
Expand Down Expand Up @@ -425,7 +425,7 @@ def test_dygraph_with_lod():
paddle.enable_static()

class TestCTCLossAPICase(unittest.TestCase):
def test_functinal_api(self):
def test_functional_api(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.batch_size = 4
Expand Down

0 comments on commit 796a71f

Please sign in to comment.