Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][C403][C404] Replace unnecessary-list-comprehension-set/dict #51964

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 4 additions & 12 deletions python/paddle/distributed/fleet/base/role_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -1060,9 +1060,7 @@ def _ps_env(self): # each role will execute it
self._trainers_num = trainers_num
self._role = role
self._current_id = current_id
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})

def _collective_env(self):
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
Expand All @@ -1078,9 +1076,7 @@ def _collective_env(self):
self._non_distributed = True
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})
self._local_rank = os.getenv("PADDLE_RANK_IN_NODE")
self._local_device_ids = os.getenv("PADDLE_LOCAL_DEVICE_IDS")
self._world_device_ids = os.getenv("PADDLE_WORLD_DEVICE_IDS")
Expand Down Expand Up @@ -1206,18 +1202,14 @@ def _user_defined_ps_env(self):
self._cur_endpoint = self._worker_endpoints[self._current_id]
elif self._role == Role.SERVER:
self._cur_endpoint = self._server_endpoints[self._current_id]
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})

def _user_defined_collective_env(self):
self._worker_endpoints = self._kwargs.get("worker_endpoints")
self._current_id = self._kwargs.get("current_id")
self._trainers_num = len(self._worker_endpoints)
self._training_role = Role.WORKER
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})

def _generate_role(self):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def minimize(
# NOTE in dygraph mode, the only different between step and minimize is that minimize
# allow user to customize the parameters for updating on each step

input_param_names = set([param.name for param in parameters])
input_param_names = {param.name for param in parameters}
parameters = list(
filter(
lambda x: x.name in input_param_names,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(

def setup(self, params_grads, worker_idx, worker_num):
# param names of all devices
self.global_params = set([x[0].name for x in params_grads])
self.global_params = {x[0].name for x in params_grads}
# _param(str) -> device_id(int)
self.worker_idx = worker_idx
self.worker_num = worker_num
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -907,7 +907,7 @@ def _init_comm(self):

def _build_shard(self, params_grads, shard_rank, shard_size):
# step 2: split params
self._params = set([x[0].name for x in params_grads])
self._params = {x[0].name for x in params_grads}
self._shard.setup(params_grads, shard_rank, shard_size)

# step 3: get broadcast vars
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/launch/plugins/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def process_args(ctx):
def collective_compatible(ctx):
if 'PADDLE_TRAINER_ENDPOINTS' in ctx.envs:
eps = ctx.envs['PADDLE_TRAINER_ENDPOINTS'].split(',')
hosts = set([h.split(':')[0] for h in eps])
hosts = {h.split(':')[0] for h in eps}
ctx.args.master = eps[0] if ':' in eps[0] else '{}:6768'.format(eps[0])
ctx.args.nnodes = len(hosts)
ctx.logger.info(
Expand All @@ -54,7 +54,7 @@ def collective_compatible(ctx):

if 'DISTRIBUTED_TRAINER_ENDPOINTS' in ctx.envs:
eps = ctx.envs['DISTRIBUTED_TRAINER_ENDPOINTS'].split(',')
hosts = set([h.split(':')[0] for h in eps])
hosts = {h.split(':')[0] for h in eps}
ctx.args.master = eps[0]
ctx.args.nnodes = len(hosts)
ctx.logger.info(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ def train():
paddle.distributed.barrier(group=group)
return group

node_num = set([i.split(":")[0] for i in parallel_env.trainer_endpoints])
node_num = {i.split(":")[0] for i in parallel_env.trainer_endpoints}
# 3: init gloo context (step 1: httpsever start)
init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if is_cpu_only or init_gloo or backend == "heter":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def collect_group(cur_group, grad_var, ring_id, i):

def op_depend_on_group(op, group):
vars_ = set(op.input_arg_names + op.output_arg_names)
grad_names = set([grad.name for grad in group.gradients])
grad_names = {grad.name for grad in group.gradients}
return len(vars_.intersection(grad_names)) > 0

for i, op in enumerate(ops):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/passes/auto_parallel_sharding.py
Original file line number Diff line number Diff line change
Expand Up @@ -971,7 +971,7 @@ def _group_grads(

def op_depend_on_group(op, group):
vars_ = set(op.input_arg_names + op.output_arg_names)
var_names = set([var.name for var in group.vars])
var_names = {var.name for var in group.vars}
return len(vars_.intersection(var_names)) > 0

# analyze groups
Expand Down Expand Up @@ -1790,7 +1790,7 @@ def group_param(sharding_info, fuse_size):
class ShardingInfo:
def __init__(self, group, rank, params_grads, partition_algor):
self.group = group
self.params_grads = dict([(p.name, (p, g)) for p, g in params_grads])
self.params_grads = {p.name: (p, g) for p, g in params_grads}
assert len(self.params_grads) == len(
set(self.params_grads)
), "found duplicated param in params_grads"
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/transpiler/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ def __init__(self):
def _transpile_startup_program(self):
nodes_num = 0
if len(self.endpoints) > 1:
nodes_num = len(set([x.split(':')[0] for x in self.endpoints]))
nodes_num = len({x.split(':')[0] for x in self.endpoints})
# diffent ip num is multi node
if nodes_num > 1:
self.nranks = nodes_num
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2207,7 +2207,7 @@ def _get_output_names(cur_block, targets):
"""

block = targets[0].block if targets else cur_block
current_output_names = set([out.name for out in targets])
current_output_names = {out.name for out in targets}

# 1. If `targets` in cur_block or the ancestral block of `cur_block`
if block.idx == cur_block.idx or _is_ancestor_block(block, cur_block):
Expand Down Expand Up @@ -2277,7 +2277,7 @@ def _find_op_path_(
The forward op path of block corresponding to backward op.
"""

input_names = set([inp.name for inp in inputs])
input_names = {inp.name for inp in inputs}
output_names = _get_output_names(block, targets)
if op_path_dict is None:
op_path_dict = dict()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def check_save_inference_model(
self, model, inputs, gt_out, feed=None, fetch=None
):

expected_persistable_vars = set([p.name for p in model.parameters()])
expected_persistable_vars = {p.name for p in model.parameters()}

infer_model_prefix = os.path.join(
self.temp_dir.name, "test_dy2stat_inference/model"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
ids = random.sample([i for i in range(num_prior)], gt_num)
match_indices[n, ids] = [i for i in range(gt_num)]

ret_ids = set([i for i in range(num_prior)]) - set(ids)
ret_ids = {i for i in range(num_prior)} - set(ids)
l = neg_lod[n]
neg_ids = random.sample(ret_ids, l)
neg_indices[offset : offset + neg_lod[n], :] = (
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/autograd/composite_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def squeeze2_composite(x, axis):
if len(axis) == 0:
dims = set(range(rank))
else:
dims = set([ax % rank for ax in axis])
dims = {ax % rank for ax in axis}
new_shape = []
for d, s in enumerate(x.shape):
if not (s == 1 and (d in dims)):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/static/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -1498,7 +1498,7 @@ def load(program, model_path, executor=None, var_list=None):
"var_list is required when loading model file saved with [ save_params, save_persistables, save_vars ]"
)
program_var_list = program.list_vars()
program_var_name_set = set([var.name for var in program_var_list])
program_var_name_set = {var.name for var in program_var_list}

# check all the variable inlcuded in program
for var in var_list:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/utils/cpp_extension/extension_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1273,7 +1273,7 @@ def regex(content):
pattern = re.compile(r'PD_BUILD_OP\(([^,\)]+)\)')
content = re.sub(r'\s|\t|\n', '', content)
op_name = pattern.findall(content)
op_name = set([re.sub('_grad', '', name) for name in op_name])
op_name = {re.sub('_grad', '', name) for name in op_name}

return op_name

Expand Down
2 changes: 1 addition & 1 deletion tools/diff_use_default_grad_op_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def generate_spec(filename):

def read_spec(filename):
with open(filename, 'r') as f:
return set([line.strip() for line in f.readlines()])
return {line.strip() for line in f.readlines()}


def get_spec_diff(dev_filename, pr_filename):
Expand Down