Skip to content

Commit

Permalink
[CodeStyle][task 3] enable Ruff PLR1711 rule in python/paddle/base (P…
Browse files Browse the repository at this point in the history
  • Loading branch information
zrr1999 authored Sep 19, 2023
1 parent 702d4e4 commit caa6d06
Show file tree
Hide file tree
Showing 69 changed files with 11 additions and 171 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ select = [
"PLR0206",
"PLR0402",
"PLR1701",
# "PLR1711", # Confirmation required
"PLR1711",
"PLR1722",
"PLW3301",
]
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/base/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,6 @@ def _set_prim_forward_blacklist(*args):
raise TypeError("ops set in forward_blacklist must belong to str")
else:
prim_config["forward_blacklist"].add(item)
return


def _set_prim_backward_blacklist(*args):
Expand All @@ -516,7 +515,6 @@ def _set_prim_backward_blacklist(*args):
if not isinstance(item, str):
raise TypeError("all items in set must belong to string")
_set_bwd_prim_blacklist(ops)
return


def _set_prim_backward_enabled(value):
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/base/incubate/checkpoint/auto_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -633,8 +633,6 @@ def _normal_yield(max_epoch_num):
max_epoch_num = sys.maxint
yield from range(0, max_epoch_num)

return


def train_epoch_range(max_epoch_num, save_checkpoint_inter=None):
global g_acp_type
Expand Down
1 change: 0 additions & 1 deletion python/paddle/base/layers/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,6 @@ def place(self):
warnings.warn(
"Variable do not have 'place' interface for static graph mode, try not to use it. None will be returned."
)
return None

def astype(self, dtype):
"""
Expand Down
1 change: 0 additions & 1 deletion python/paddle/check_import_scipy.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,3 @@ def check_import_scipy(OsName):
print_info
+ "\nplease download Visual C++ Redistributable from https://support.microsoft.com/en-us/topic/the-latest-supported-visual-c-downloads-2647da03-1eea-4433-9aff-95f26a218cc0"
)
return
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,6 @@ def prim_operator_data_parallel_functor(ctx, src_op):
op_attr.set_input_dims_mapping(grad_var.name, dims_mapping)
ctx.set_op_dist_attr_for_program(allreduce_op, op_attr)

return


class DistributedDefault(DistributedOperatorImplContainer):
def __init__(self, op_type):
Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/auto_tuner/recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ def sort_metric(self, direction, metric_name) -> None:
else float('inf'),
reverse=False,
)
return

def get_best(self, metric, direction) -> Tuple[dict, bool]:
self.sort_metric(direction=direction, metric_name=metric)
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distributed/communication/stream/all_reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,6 @@ def _all_reduce_in_static_mode(tensor, op, group, sync_op, use_calc_stream):
attrs={'ring_id': ring_id, 'use_calc_stream': sync_op},
)

return None


def all_reduce(
tensor, op=ReduceOp.SUM, group=None, sync_op=True, use_calc_stream=False
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distributed/communication/stream/all_to_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,6 @@ def _all_to_all_in_static_mode(
paddle.split(out_tensor, nranks, 0)
)

return None


def alltoall(
out_tensor_or_tensor_list,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ def _broadcast_in_static_mode(
'ring_id': ring_id,
},
)
return None


def broadcast(tensor, src, group=None, sync_op=True, use_calc_stream=False):
Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/communication/stream/recv.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ def _recv_in_static_mode(
'use_calc_stream': sync_op,
},
)
return None


def recv(tensor, src=0, group=None, sync_op=True, use_calc_stream=False):
Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/communication/stream/reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ def _reduce_in_static_mode(
'root_id': dst_rank_in_group,
},
)
return None


def reduce(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ def _reduce_scatter_in_static_mode(tensor, tensor_or_tensor_list, group):
'nranks': nranks,
},
)
return None


def reduce_scatter(
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distributed/communication/stream/scatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,6 @@ def _scatter_in_static_mode(
},
)

return None


def scatter(
tensor,
Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/communication/stream/send.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ def _send_in_static_mode(
'use_calc_stream': sync_op,
},
)
return None


def send(tensor, dst=0, group=None, sync_op=True, use_calc_stream=False):
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distributed/communicator.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,14 +229,12 @@ def start_coordinator(self, self_endpoint, trainer_endpoints):
self.communicator_.start_coordinator(
self_endpoint, trainer_endpoints
)
return

def save_fl_strategy(self, mp):
if self.communicator_ is not None:
self.communicator_.save_fl_strategy(mp)
else:
raise ValueError("self.communicator_ is null")
return

def query_fl_clients_info(self):
info_mp = {}
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distributed/fleet/base/role_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,6 @@ def to_string(self):

def _all_gather(self, input, comm_world="worker"):
print("warning: RoleMakerBase does not have all gather worker.")
return None

def _all_reduce(self, input, mode="sum", comm_world="worker"):
"""
Expand All @@ -507,7 +506,6 @@ def _all_reduce(self, input, mode="sum", comm_world="worker"):
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
return None

def _barrier(self, comm_world):
"""
Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/fleet/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -843,7 +843,6 @@ def direct_start(args):
] + args.training_script_args
proc = subprocess.Popen(cmd)
proc.wait()
return


def get_custom_endpoints(origin_endpoints, offset=0):
Expand Down
11 changes: 0 additions & 11 deletions python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ def check_broadcast(block):
< last_sync_comm_op_idx
)
assert last_sync_comm_op_idx < idx
return


def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1):
Expand Down Expand Up @@ -254,8 +253,6 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1):
if idx_gradient_clip_allreduce != -1:
assert idx_gradient_clip_allreduce > idx_last_grad_allreduce

return


def get_valid_op_role(block, insert_idx):
"""
Expand Down Expand Up @@ -284,7 +281,6 @@ def insert_sync_calc_op(block, insert_idx, calc_dep_vars):
outputs={'Out': calc_dep_vars},
attrs={OP_ROLE_KEY: op_role},
)
return


def insert_sync_comm_op(block, insert_idx, ring_id, comm_dep_vars):
Expand Down Expand Up @@ -339,7 +335,6 @@ def insert_fill_constant_ops(block, insert_idx, fill_constant_vars):
OP_ROLE_KEY: op_role,
},
)
return


def insert_cast_ops(block, insert_idx, cast_ops):
Expand All @@ -359,7 +354,6 @@ def insert_cast_ops(block, insert_idx, cast_ops):
OP_ROLE_KEY: op_role,
},
)
return


def insert_allreduce_ops(
Expand Down Expand Up @@ -873,8 +867,6 @@ def insert_broadcast_ops(block, insert_idx, ring_id, broadcast2root):
},
)

return


DtypeToSize = {
core.VarDesc.VarType.FP16: 2,
Expand Down Expand Up @@ -994,7 +986,6 @@ def add_sync_comm(program, sharding_ring_id):
'op_role': core.op_proto_and_checker_maker.OpRole.Forward,
},
)
return


def save_persistables(exe, dirname, main_program, filename=None):
Expand Down Expand Up @@ -1055,8 +1046,6 @@ def sharding_predicate(var):
filename=None,
)

return


def append_naive_sync(block, sync_var, ring_id):
# NOTE (JZ-LIANG) update this to use barrier sync for more elegent logic
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -980,7 +980,6 @@ def _split_program(self, block):
].desc.input_arg_names(),
)
)
return

def _prune_main_program(self, block, shard, rings):
"""
Expand Down Expand Up @@ -1096,7 +1095,6 @@ def _prune_main_program(self, block, shard, rings):
reserved_x.append(var_name)
op.desc.set_input('X', reserved_x)
block._sync_with_cpp()
return

def _add_broadcast_allreduce(self, block):
"""
Expand Down Expand Up @@ -1665,8 +1663,6 @@ def _build_groups(self):
logger.info(f"pure dp ring id: {self.dp_ring_id}")
logger.info("#####" * 6)

return

def _recreate_not_persist_param_as_var(self):
def recreate_not_persist_param_as_var(program):
block = program.global_block()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -959,7 +959,6 @@ def _release_param(

if offload:
param.fw_storage = _device2cpu(param.fw_storage)
return


def _wait_layer(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ def param_hook(tmp_grad):
param.main_grad.add_(tmp_grad)

tmp_grad._clear_data()
return None

return param_hook

Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/passes/auto_parallel_sharding.py
Original file line number Diff line number Diff line change
Expand Up @@ -1430,7 +1430,6 @@ def _insert_init_and_broadcast_op(
broadcast_var_dist_attr.dims_mapping,
dist_context,
)
return


def _insert_reduce_op(
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distributed/passes/ps_trainer_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -1391,7 +1391,6 @@ def _insert_partA_communicate_op(self, block, idx):
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
},
)
return

def _insert_partB_communicate_op(self, block, idx):
comm_info = f"backward_joint_{2}_{1}@fl_ps"
Expand All @@ -1416,7 +1415,6 @@ def _insert_partB_communicate_op(self, block, idx):
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
},
)
return

def _create_var_for_block(self, vars, block):
for var in vars:
Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/ps/coordinator.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,6 @@ def train_loop(self):
def push_fl_client_info_sync(self, state_info):
str_msg = self.__build_fl_client_info_desc(state_info)
self._client_ptr.push_fl_client_info_sync(str_msg)
return

def pull_fl_strategy(self):
strategy_dict = {}
Expand Down
11 changes: 0 additions & 11 deletions python/paddle/distributed/ps/utils/ps_program_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,6 @@ def _build_pserver_programs(self):
add_listen_and_serv_pass.apply(
[self.attrs['_main_server']], [None], self.pass_ctx
)
return


class NuPsProgramBuilder(PsProgramBuilder):
Expand Down Expand Up @@ -174,8 +173,6 @@ def _build_trainer_programs(self):
if self.launch_barrier and self.launch_barrier_flag:
wait_server_ready(self.server_endpoints)

return


class CpuSyncPsProgramBuilder(PsProgramBuilder):
def __init__(self, pass_ctx):
Expand Down Expand Up @@ -226,8 +223,6 @@ def _build_trainer_programs(self):
if self.launch_barrier and self.launch_barrier_flag:
wait_server_ready(self.server_endpoints)

return


class CpuAsyncPsProgramBuilder(CpuSyncPsProgramBuilder):
def __init__(self, pass_ctx):
Expand Down Expand Up @@ -296,8 +291,6 @@ def _build_trainer_programs(self):
if self.launch_barrier and self.launch_barrier_flag:
wait_server_ready(self.server_endpoints)

return


class HeterAsyncPsProgramBuilder(PsProgramBuilder):
def __init__(self, pass_ctx):
Expand Down Expand Up @@ -355,8 +348,6 @@ def _build_trainer_programs(self):
if self.launch_barrier and self.launch_barrier_flag:
wait_server_ready(self.server_endpoints)

return

def _build_programs(self):
if self.attrs['is_worker'] or self.attrs['is_heter_worker']:
self._build_trainer_programs()
Expand Down Expand Up @@ -458,8 +449,6 @@ def _build_trainer_programs(self):
],
)

return

def _build_pserver_programs(self):
self.loss.block.program = self.attrs['_main_server']

Expand Down
1 change: 0 additions & 1 deletion python/paddle/incubate/distributed/fleet/role_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,6 @@ def all_gather(self, input):
return a list of values
"""
print("warning: RoleMakerBase does not have all gather.")
return None

def all_reduce_worker(self, input, output, mode="sum"):
"""
Expand Down
4 changes: 0 additions & 4 deletions python/paddle/incubate/optimizer/recompute.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,8 +274,6 @@ def _append_fill_constant_ops(self, startup_program):
},
)

return

def _insert_async_memcpy_op(
self, insert_idx, src_varname, dst_varname, op_role, dst_place_type
):
Expand Down Expand Up @@ -639,8 +637,6 @@ def _offload(self, loss, startup_program=None):
# step 4. verify the correctness
self._check_offload_fetch()

return

def backward(
self,
loss,
Expand Down
1 change: 0 additions & 1 deletion python/paddle/jit/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -821,7 +821,6 @@ def set_property(meta, key, val):
meta.set_strings(key, val)
else:
raise ValueError(f"Note support val type: {type(val)}")
return

with open(filename, 'wb') as f:
meta = paddle.framework.core.Property()
Expand Down
Loading

0 comments on commit caa6d06

Please sign in to comment.