Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][C416][C417] rewrite unnecessary comprehension with function call and use generator instead of map #52140

Merged
merged 10 commits into from
Mar 30, 2023
10 changes: 5 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,12 @@ select = [
"C408",
"C409",
"C410",
"C411",
# "C413",
# "C414",
# "C411",
"C413",
"C414",
# "C415",
# "C416",
# "C417",
enkilee marked this conversation as resolved.
Show resolved Hide resolved
"C416",
"C417",

# Pyupgrade
"UP001",
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/auto_parallel/dist_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ def save_inference_model(self, path, feed_vars, fetch_vars, exe, **kwargs):
global_block = dist_main_prog.global_block()

ops = global_block.ops
feed_vars_names = list(map(lambda x: x.name, feed_vars))
fetch_vars_names = list(map(lambda x: x.name, fetch_vars))
feed_vars_names = [x.name for x in feed_vars]
fetch_vars_names = [x.name for x in fetch_vars]

last_idx = -1
for idx, op in enumerate(ops):
Expand Down
14 changes: 7 additions & 7 deletions python/paddle/distributed/auto_parallel/dist_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _validate_sizes_and_dist_attr(
):
if not (
isinstance(sizes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, sizes))
and all((isinstance(x, int) and x >= 0 for x in sizes))
):
raise ValueError(
"The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}".format(
Expand All @@ -48,7 +48,7 @@ def _validate_sizes_and_dist_attr(
)
if not (
isinstance(dims_mapping, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= -1, dims_mapping))
and all((isinstance(x, int) and x >= -1 for x in dims_mapping))
):
raise ValueError(
"The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}".format(
Expand All @@ -57,7 +57,7 @@ def _validate_sizes_and_dist_attr(
)
if not (
isinstance(processes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, processes))
and all((isinstance(x, int) and x >= 0 for x in processes))
):
raise ValueError(
"The processes must be list or tuple and item in processes must be integer, but got {}".format(
Expand All @@ -66,7 +66,7 @@ def _validate_sizes_and_dist_attr(
)
if not (
isinstance(topology, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x > 0, topology))
and all((isinstance(x, int) and x > 0 for x in topology))
):
raise ValueError(
"The topology must be list or tuple and item in topology must be non-negative integer, but got {}".format(
Expand Down Expand Up @@ -162,9 +162,9 @@ def get_local_shard(
len(local_sizes), len(local_offsets)
)

local_end_offsets = list(
map(lambda x: x[0] + x[1], zip(local_offsets, local_sizes))
)
local_end_offsets = [
x[0] + x[1] for x in zip(local_offsets, local_sizes)
]
local_shard = list(zip(local_offsets, local_end_offsets))
return local_shard

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def enum_valid_dist_attr_for_program(
vars = program.global_block().vars

processes = reduce(lambda x, y: x * y, process_mesh_topology)
global_group = [i for i in range(processes)]
global_group = list(range(processes))
global_process_mesh = None
pipeline_process_meshes = None

Expand Down
26 changes: 12 additions & 14 deletions python/paddle/distributed/auto_parallel/reshard.py
Original file line number Diff line number Diff line change
Expand Up @@ -1340,14 +1340,14 @@ def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
if op_input:
op_input_dims_mapping = dist_attr[1]
if all(
map(
lambda x: x,
[
(
x
for x in [
tensor_dims_mapping,
tensor_process_mesh,
op_input_dims_mapping,
op_process_mesh,
],
]
)
):
# judge whether need reshard by dims_mapping
Expand Down Expand Up @@ -1379,14 +1379,14 @@ def need_reshard(self, dist_tensor, dist_attr, op_input=True, dist_op=None):
else:
op_output_dims_mapping = dist_attr[1]
if all(
map(
lambda x: x,
[
(
x
for x in [
tensor_dims_mapping,
tensor_process_mesh,
op_output_dims_mapping,
op_process_mesh,
],
]
)
):
if tensor_dims_mapping != op_output_dims_mapping:
Expand Down Expand Up @@ -1554,7 +1554,7 @@ def find_op_desc_seq(self, dist_tensor, dist_attr, serial=False):
i += 1

if i == len(has_used):
has_used = list(map(lambda x: False, has_used))
has_used = [False for x in has_used]
to_send_process = process_list[0]
has_used[0] = True
assert (
Expand Down Expand Up @@ -1744,11 +1744,9 @@ def parse_op_desc(
if isinstance(op_desc, AllGatherOpDesc): # noqa: F401
if var_name not in self.has_allgather.keys():
self.has_allgather[var_name] = []
if not self.has_allgather[
var_name
] or op_desc.group not in list(
map(lambda x: x[0], self.has_allgather[var_name])
):
if not self.has_allgather[var_name] or op_desc.group not in [
x[0] for x in self.has_allgather[var_name]
]:
if op_desc.is_bool:
# for bool data allgather, cast to int64 -> allgather -> cast bool
out_cast = Inserter.insert_cast_op(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def _generate_dims_mapping_candidates(
return self._cached_dims_mapping_candidates[key]
candidates = []
dims_mapping = [-1 for i in range(dims_mapping_len)]
dims_list = [i for i in range(process_mesh_len)]
dims_list = list(range(process_mesh_len))
visited = [False for i in range(process_mesh_len)]
self._generate_dims_mapping_candidates_helper(
dims_mapping, dims_list, 0, visited, candidates
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def __init__(self, name, values, default=None):
default = bool(default)
else:
self._is_unknown_type = True
self._indices = [i for i in range(len(values))]
self._indices = list(range(len(values)))
self.values = values

if default is not None and default not in values:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1684,7 +1684,7 @@ def _compute_runtime(op_cost, op, vars):
shape = info[
shape_left_boundary + 1 : shape_right_boundary
].split(",")
shape = list(map(lambda x: int(x.strip()), shape))
shape = [int(x.strip()) for x in shape]
dtype_factor = 1
total_static_input_size += reduce(lambda x, y: x * y, shape)
if op.type == "c_embedding":
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/cloud_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,7 @@ def get_cloud_cluster(args_node_ips, args_node_ip, args_port, selected_devices):

if started_port is None:
started_port = 6170
ports = [
x for x in range(started_port, started_port + len(selected_devices))
]
ports = list(range(started_port, started_port + len(selected_devices)))
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/fleet/ascend_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,7 @@ def get_cloud_cluster(
device_count = 1

devices_per_proc = [str(x) for x in range(device_count)]
free_ports = [
x for x in range(start_port, start_port + len(devices_per_proc))
]
free_ports = list(range(start_port, start_port + len(devices_per_proc)))

trainer_endpoints = []
for ip in node_ips:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/base/util_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ def _proto_check(self, config):
if paddle.static.io.is_persistable(v)
]
pruned_vars = OrderedDict(pruned_vars)
pruned_vars_name = [name for name in pruned_vars]
pruned_vars_name = list(pruned_vars)
print("persistable vars in pruned program: {}".format(pruned_vars_name))

# feed and fetch op is added in pruned program when pruning, not need to be found in train program
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/fleet/cloud_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,7 @@ def get_cloud_cluster(

if started_port is None:
started_port = 6170
ports = [
x for x in range(started_port, started_port + len(devices_per_proc))
]
ports = list(range(started_port, started_port + len(devices_per_proc)))
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/fleet/elastic/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ def _host_to_endpoints(
ip = endpoints
port = start_port

ports = [x for x in range(port, port + len(devices_per_proc))]
ports = list(range(port, port + len(devices_per_proc)))
endpoint_list.extend(["%s:%d" % (ip, port) for port in ports])

dist_endpoints = ','.join(endpoint_list)
Expand All @@ -360,7 +360,7 @@ def exit(self, completed=False):
self.etcd.cancel_watch(watch)
self.etcd.delete(self.host_path)

hosts = [i for i in self.etcd.get_prefix(self.node_prefix)]
hosts = list(self.etcd.get_prefix(self.node_prefix))
if len(hosts) == 0:
self.etcd.delete_prefix(self.prefix)

Expand Down
4 changes: 1 addition & 3 deletions python/paddle/distributed/fleet/launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,9 +314,7 @@ def get_cluster_from_args(args, device_mode, devices_per_proc):
if os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))

free_ports = [
x for x in range(start_port, start_port + len(devices_per_proc))
]
free_ports = list(range(start_port, start_port + len(devices_per_proc)))

trainer_endpoints = []
for ip in node_ips:
Expand Down
38 changes: 13 additions & 25 deletions python/paddle/distributed/fleet/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -951,7 +951,7 @@ def get_device_proc_info(args):
if args.nproc_per_node is None:
devices_per_proc = [0]
else:
devices_per_proc = [x for x in range(0, args.nproc_per_node)]
devices_per_proc = list(range(0, args.nproc_per_node))
else:
raise AssertionError(
"Can't support device_mode:{}, support only cpu|gpu|xpu now.".format(
Expand Down Expand Up @@ -1107,20 +1107,14 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
else:
free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
Expand Down Expand Up @@ -1250,20 +1244,14 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
else:
free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def parse_program(
HcomGroupConfig(
name="hcom_group_0",
nranks=fleet.world_size(),
rank_ids=[x for x in range(fleet.world_size())],
rank_ids=list(range(fleet.world_size())),
)
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2422,7 +2422,7 @@ def _apply(self):

x_shape = self.op.block.var(self.op.input_arg_names[1]).shape[1:]
out_grad_shape = self.op.block.var(self.op.input_arg_names[0]).shape
assert list(map(lambda x: out_grad_shape[x], perm)) == list(x_shape)
assert [out_grad_shape[x] for x in perm] == list(x_shape)

x_grad = (
core.GEOperatorFactory.create_operator(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def _obtain_optimizer_parameters_list(optimizer):
for param in group['params']:
parameters_list.append(param)
else:
parameters_list = [param for param in optimizer._parameter_list]
parameters_list = list(optimizer._parameter_list)

return parameters_list

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def _obtain_optimizer_parameters_list(optimizer):
for param in group['params']:
parameters_list.append(param)
else:
parameters_list = [param for param in optimizer._parameter_list]
parameters_list = list(optimizer._parameter_list)

return parameters_list

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def _backward_step(self, input_tensor, output_tensor, output_tensor_grad):
assert len(outputs) == len(output_tensor_grad)
paddle.autograd.backward(
tensors=outputs,
grad_tensors=[t for t in output_tensor_grad],
grad_tensors=list(output_tensor_grad),
)
else:
paddle.autograd.backward(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ def _integration_params(self):

if self.offload:
self._optim._master_weights = self._master_params
cpu_master_params = [p for p in self._master_params.values()]
cpu_master_params = list(self._master_params.values())
for param in cpu_master_params:
size = param._numel() * align[Type.fp32.value]
remaining = size % alignment[self.offload_device]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,12 +79,10 @@ def __init__(
else sharding_optimizer
)
assert all(
list(
map(
lambda opt: isinstance(opt, GroupShardedOptimizerStage2),
self._sharding_optimizers,
)
)
[
isinstance(opt, GroupShardedOptimizerStage2)
for opt in self._sharding_optimizers
]
), "Please use GroupShardedOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/runtime/the_one_ps.py
Original file line number Diff line number Diff line change
Expand Up @@ -1456,7 +1456,7 @@ def _ps_inference_save_inference_model(
generate_vars = self.context[
"user_defined_strategy"
].trainer_desc_configs["stat_var_names"]
generate_vars = [var for var in generate_vars]
generate_vars = list(generate_vars)
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(sparse_names),
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/launch/controllers/master.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def sync_peers(self, prefix, key, value, size, rank=-1) -> (list, int):
while not self.ctx.status.is_done():
self.client.put(path, value.encode('latin-1'))

result = [i for i in self.client.get_prefix(prefix)]
result = list(self.client.get_prefix(prefix))
result = copy.deepcopy(result)
self.ctx.logger.debug("sync peers {}".format(result))

Expand Down
Loading