Skip to content

Commit

Permalink
[CodeStyle] Cleanup useless conditions (part1) (PaddlePaddle#66004)
Browse files Browse the repository at this point in the history
  • Loading branch information
SigureMo authored and lixcli committed Jul 22, 2024
1 parent a4ca1a5 commit c2a4880
Show file tree
Hide file tree
Showing 12 changed files with 52 additions and 134 deletions.
11 changes: 3 additions & 8 deletions test/auto_parallel/semi_auto_parallel_shard_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,9 @@ def test_adamw_shard_optimizer(self, stage1=False):
v, self._mesh, [dist.Replicate()]
)
else:
if 'w' in k:
opt._accumulators[key][k] = dist.shard_tensor(
v, self._mesh, [dist.Shard(0)]
)
else:
opt._accumulators[key][k] = dist.shard_tensor(
v, self._mesh, [dist.Shard(0)]
)
opt._accumulators[key][k] = dist.shard_tensor(
v, self._mesh, [dist.Shard(0)]
)
for _ in range(5):
loss = linear(batch)
loss.backward()
Expand Down
11 changes: 3 additions & 8 deletions test/auto_parallel/test_shard_layer_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,9 @@ def forward(self, x):
def shard_fn(layer_name, layer, process_mesh):
if isinstance(layer, nn.Linear):
for name, param in layer.named_parameters():
if 'weight' in name:
dist_param = dist.shard_tensor(
param, process_mesh, [dist.Replicate()]
)
else:
dist_param = dist.shard_tensor(
param, process_mesh, [dist.Replicate()]
)
dist_param = dist.shard_tensor(
param, process_mesh, [dist.Replicate()]
)
layer.add_parameter(name, dist_param)


Expand Down
5 changes: 1 addition & 4 deletions test/cinn/conv2d_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,4 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):

res_shape = output.shape[1:]

if is_depthwise:
return output, [res_shape]
else:
return output, [res_shape]
return output, [res_shape]
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest

import numpy as np
Expand Down Expand Up @@ -85,17 +84,14 @@ def assertInputData(self, batch_id, input_data, dev_cnt):
start_val += 1

def get_places(self):
place_list = [base.cpu_places(1)]
if base.is_compiled_with_cuda():
if os.name == "nt":
place_list.extend([base.cuda_places(0)])
else:
place_list.extend([base.cuda_places(0)])
return place_list
if paddle.is_compiled_with_cuda():
places = base.cuda_places(0)
else:
places = base.cpu_places(1)
return places

def test_main(self):
for p in self.get_places():
self.run_main_with_place(p)
self.run_main_with_place(self.get_places())

def run_main_with_place(self, places):
with base.scope_guard(base.Scope()):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest

import numpy as np
Expand Down Expand Up @@ -105,17 +104,14 @@ def assertInputData(
start_val += 1

def get_places(self):
place_list = [base.cpu_places(1)]
if base.is_compiled_with_cuda():
if os.name == "nt":
place_list.extend([base.cuda_places(0)])
else:
place_list.extend([base.cuda_places(0)])
return place_list
if paddle.is_compiled_with_cuda():
places = base.cuda_places(0)
else:
places = base.cpu_places(1)
return places

def test_main(self):
for p in self.get_places():
self.run_main_with_place(p)
self.run_main_with_place(self.get_places())

def run_main_with_place(self, places):
with base.scope_guard(base.Scope()):
Expand Down
11 changes: 3 additions & 8 deletions test/deprecated/legacy_test/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -590,14 +590,9 @@ def test_dynamic_load(self):
net = LeNet()
inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
labels = [InputSpec([None, 1], 'int64', 'label')]
if new_optimizer:
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=net.parameters()
)
else:
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=net.parameters()
)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=net.parameters()
)
model = Model(net, inputs, labels)
model.prepare(
optimizer=optim, loss=CrossEntropyLoss(reduction="sum")
Expand Down
10 changes: 2 additions & 8 deletions test/deprecated/legacy_test/test_prune_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,10 +498,7 @@ def test_prune_with_cache_program(self):
fetch_list=[loss1.name],
use_prune=True,
)
if i == 0:
self.assertEqual(exe.prune_called_times, 1)
else:
self.assertEqual(exe.prune_called_times, 1)
self.assertEqual(exe.prune_called_times, 1)

def test_prune_with_cache_program2(self):
'''
Expand Down Expand Up @@ -601,10 +598,7 @@ def test_prune_with_cache_compiled_program(self):
fetch_list=[loss1.name],
use_prune=True,
)
if i == 0:
self.assertEqual(exe.prune_called_times, 1)
else:
self.assertEqual(exe.prune_called_times, 1)
self.assertEqual(exe.prune_called_times, 1)

def test_prune_with_multi_optimizers(self):
'''
Expand Down
23 changes: 6 additions & 17 deletions test/deprecated/legacy_test/test_warprnnt_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import paddle
from paddle import _C_ops
from paddle.base import core
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()
Expand Down Expand Up @@ -232,14 +231,9 @@ def test_check_output(self):

def test_check_grad(self):
self.outputs["warprnntgrad"] = self.gradient
if core.is_compiled_with_rocm():
self.check_grad(
["input"], "loss", numeric_grad_delta=0.009, check_pir=True
)
else:
self.check_grad(
["input"], "loss", numeric_grad_delta=0.009, check_pir=True
)
self.check_grad(
["input"], "loss", numeric_grad_delta=0.009, check_pir=True
)


class TestWarpRNNTFP64Op(TestWarpRNNTOp):
Expand All @@ -250,14 +244,9 @@ def test_check_output(self):
def test_check_grad(self):
self.acts.astype(np.float64)
self.outputs["warprnntgrad"] = self.gradient
if core.is_compiled_with_rocm():
self.check_grad(
["input"], "loss", numeric_grad_delta=0.009, check_pir=True
)
else:
self.check_grad(
["input"], "loss", numeric_grad_delta=0.009, check_pir=True
)
self.check_grad(
["input"], "loss", numeric_grad_delta=0.009, check_pir=True
)


class TestWarpRNNTOpError(unittest.TestCase):
Expand Down
35 changes: 10 additions & 25 deletions test/ir/pir/test_special_op_translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,9 @@ def cond_with_inplace():
running_variance = paddle.to_tensor([1], dtype="float32")
weight = paddle.to_tensor([2], dtype="float32")
bias = paddle.to_tensor([1], dtype="float32")
if x > y:
y = paddle.nn.functional.batch_norm(
x, running_mean, running_variance, weight, bias
)
else:
y = paddle.nn.functional.batch_norm(
x, running_mean, running_variance, weight, bias
)
y = paddle.nn.functional.batch_norm(
x, running_mean, running_variance, weight, bias
)

legacy_program = paddle.jit.to_static(
cond_with_inplace,
Expand All @@ -80,24 +75,14 @@ def cond_with_inplace():
running_variance = paddle.to_tensor([1], dtype="float32")
weight = paddle.to_tensor([2], dtype="float32")
bias = paddle.to_tensor([1], dtype="float32")
if x > y:
if y > z:
z = paddle.nn.functional.batch_norm(
z, running_mean, running_variance, weight, bias
)
else:
y = paddle.nn.functional.batch_norm(
x, running_mean, running_variance, weight, bias
)
if y > z:
z = paddle.nn.functional.batch_norm(
z, running_mean, running_variance, weight, bias
)
else:
if y > z:
z = paddle.nn.functional.batch_norm(
z, running_mean, running_variance, weight, bias
)
else:
y = paddle.nn.functional.batch_norm(
x, running_mean, running_variance, weight, bias
)
y = paddle.nn.functional.batch_norm(
x, running_mean, running_variance, weight, bias
)

legacy_program = paddle.jit.to_static(
cond_with_inplace,
Expand Down
10 changes: 2 additions & 8 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -5601,10 +5601,7 @@ def test_check_grad(self):
create_test_act_fp16_class(TestCELU, check_pir=True)
create_test_act_fp16_class(TestReciprocal, check_pir=True)
create_test_act_fp16_class(TestLog, check_prim=True, check_pir=True)
if core.is_compiled_with_rocm():
create_test_act_fp16_class(TestLog2, check_pir=True)
else:
create_test_act_fp16_class(TestLog2, check_pir=True)
create_test_act_fp16_class(TestLog2, check_pir=True)
create_test_act_fp16_class(TestLog10, check_pir=True)
create_test_act_fp16_class(TestLog1p, check_pir=True)
create_test_act_fp16_class(TestSquare, check_pir=True, check_prim_pir=True)
Expand Down Expand Up @@ -5773,10 +5770,7 @@ def test_check_grad(self):
create_test_act_bf16_class(TestCELU, check_pir=True)
create_test_act_bf16_class(TestReciprocal, check_pir=True)
create_test_act_bf16_class(TestLog, check_prim=True, check_pir=True)
if core.is_compiled_with_rocm():
create_test_act_bf16_class(TestLog2, check_pir=True)
else:
create_test_act_bf16_class(TestLog2, check_pir=True)
create_test_act_bf16_class(TestLog2, check_pir=True)
create_test_act_bf16_class(TestLog10, check_pir=True)
create_test_act_bf16_class(TestLog1p, check_pir=True)
create_test_act_bf16_class(TestSquare, check_pir=True, check_prim_pir=True)
Expand Down
23 changes: 7 additions & 16 deletions test/legacy_test/test_dist_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1697,22 +1697,13 @@ def check_with_place(
need_envs={},
log_name="",
):
if self._dygraph and (self._gloo_mode or self._nccl2_mode):
self.check_with_place_func(
model_file=model_file,
delta=delta,
check_error_log=check_error_log,
need_envs=need_envs,
log_name=log_name,
)
else:
self.check_with_place_func(
model_file=model_file,
delta=delta,
check_error_log=check_error_log,
need_envs=need_envs,
log_name=log_name,
)
self.check_with_place_func(
model_file=model_file,
delta=delta,
check_error_log=check_error_log,
need_envs=need_envs,
log_name=log_name,
)

def check_with_place_func(
self,
Expand Down
15 changes: 3 additions & 12 deletions test/legacy_test/test_elementwise_mod_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,7 @@ def setUp(self):
self.outputs = {'Out': self.out}

def test_check_output(self):
if self.attrs['axis'] == -1:
self.check_output(check_pir=True)
else:
self.check_output(check_pir=True)
self.check_output(check_pir=True)

def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
Expand Down Expand Up @@ -102,10 +99,7 @@ def init_input_output(self):
self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y)

def test_check_output(self):
if self.attrs['axis'] == -1:
self.check_output(check_pir=True)
else:
self.check_output(check_pir=True)
self.check_output(check_pir=True)


@unittest.skipIf(
Expand All @@ -121,10 +115,7 @@ def init_input_output(self):
self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y)

def test_check_output(self):
if self.attrs['axis'] == -1:
self.check_output(check_pir=True)
else:
self.check_output(check_pir=True)
self.check_output(check_pir=True)


class TestElementwiseModFP16Op_ZeroDim1(TestElementwiseModFP16Op):
Expand Down

0 comments on commit c2a4880

Please sign in to comment.