Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Ruff][BUAA][K-[1-10]] Fix Ruff RUF019 diagnostic for 10 files in paddle/ and test/ #67184

Merged
merged 1 commit into from
Aug 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 5 additions & 11 deletions paddle/fluid/operators/generator/generate_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,10 +106,7 @@ def process_scalar(op_item, scalar_configs):

scalar_config = scalar_configs[attr_item['name']]
attr_item['is_support_tensor'] = (
True
if 'support_tensor' in scalar_config
and scalar_config['support_tensor']
else False
True if scalar_config.get('support_tensor') else False
)
attr_item['data_type'] = (
scalar_config['data_type']
Expand Down Expand Up @@ -144,10 +141,7 @@ def process_int_array(op_item, int_array_configs):

int_array_config = int_array_configs[attr_item['name']]
attr_item['is_support_tensor'] = (
True
if 'support_tensor' in int_array_config
and int_array_config['support_tensor']
else False
True if int_array_config.get('support_tensor') else False
)
attr_item['data_type'] = (
data_type_map[int_array_config['data_type']]
Expand Down Expand Up @@ -235,7 +229,7 @@ def get_param_list_alias(param_list, args_map):
def update_common_params_name(
op_item, args_name_map, scalar_configs, int_array_configs
):
if 'inplace' in op_item and op_item['inplace']:
if op_item.get('inplace'):
inplace_map = {}
for key, val in op_item['inplace'].items():
if key in args_map:
Expand All @@ -244,11 +238,11 @@ def update_common_params_name(
val = args_map[val]
inplace_map[key] = val
op_item['inplace'] = inplace_map
if 'no_need_buffer' in op_item and op_item['no_need_buffer']:
if op_item.get('no_need_buffer'):
op_item['no_need_buffer'] = get_param_list_alias(
op_item['no_need_buffer'], args_map
)
if 'data_transform' in op_item and op_item['data_transform']:
if op_item.get('data_transform'):
data_trans_item = op_item['data_transform']
if 'skip_transform' in data_trans_item:
data_trans_item['skip_transform'] = get_param_list_alias(
Expand Down
12 changes: 3 additions & 9 deletions paddle/fluid/pir/dialect/op_generator/op_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -1041,10 +1041,7 @@ def parse_invoke_map(self):
return None

def parse_data_transform_info(self):
if (
'data_transform' in self.op_yaml_item
and self.op_yaml_item['data_transform']
):
if self.op_yaml_item.get('data_transform'):
data_trans_item = self.op_yaml_item['data_transform']
return data_trans_item
return None
Expand Down Expand Up @@ -1775,10 +1772,7 @@ def AutoCodeGen(
if op_kernel_map is not None:
kernel_func_str = kernel_func_name
kernel_param_str = '", "'.join(op_kernel_map['param'])
if (
'data_type' in op_kernel_map
and op_kernel_map['data_type']
):
if op_kernel_map.get('data_type'):
for idx in range(
len(op_kernel_map['data_type']['candidates'])
):
Expand All @@ -1804,7 +1798,7 @@ def AutoCodeGen(
)
if kernel_key_dtype != "":
kernel_key_dtype = '"' + kernel_key_dtype[:-3]
if 'backend' in op_kernel_map and op_kernel_map['backend']:
if op_kernel_map.get('backend'):
kernel_key_backend = '", "'.join(
op_kernel_map['backend']['candidates']
)
Expand Down
10 changes: 2 additions & 8 deletions python/paddle/base/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2885,10 +2885,7 @@ def _run_using_fleet_executor(
self._add_scope_cache(cache_key, cached_scope)
if micro_cached_scopes is None:
micro_cached_scopes = []
if (
"inference_generation" in fleet_opt
and fleet_opt["inference_generation"]
):
if fleet_opt.get("inference_generation"):
for _ in range(int(fleet_opt["num_micro_batches"])):
micro_cached_scopes.append(cached_scope.new_scope())
self._add_micro_scopes_cache(cache_key, micro_cached_scopes)
Expand Down Expand Up @@ -2957,10 +2954,7 @@ def _run_using_fleet_executor(
fetch_task.set_program(fetch_program)

micro_scope_list = []
if (
"inference_generation" in fleet_opt
and fleet_opt["inference_generation"]
):
if fleet_opt.get("inference_generation"):
for i in range(int(fleet_opt["num_micro_batches"])):
micro_scope_list.append(cached_scope.new_scope())

Expand Down
6 changes: 3 additions & 3 deletions python/paddle/distributed/auto_parallel/static/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1308,10 +1308,10 @@ def set_var_dist_attr(dist_context, var, dims_mapping, process_mesh, **kwargs):
raise ValueError(
f"{process_mesh} must be a instance of ProcessMesh or list, but receive {type(process_mesh)}"
)
if "mark_annotated" in kwargs and kwargs["mark_annotated"]:
if kwargs.get("mark_annotated"):
tensor_dist_attr.mark_annotated("dims_mapping")
tensor_dist_attr.mark_annotated("process_mesh")
if "chunk_id" in kwargs and kwargs["chunk_id"]:
if kwargs.get("chunk_id"):
tensor_dist_attr.chunk_id = kwargs["chunk_id"]
dist_context.set_tensor_dist_attr_for_program(var, tensor_dist_attr)
return tensor_dist_attr
Expand All @@ -1331,7 +1331,7 @@ def naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
new_op_dist_attr.set_output_dims_mapping(output_varname, ref_mapping)

new_op_dist_attr.process_mesh = process_mesh
if "chunk_id" in kwargs and kwargs["chunk_id"]:
if kwargs.get("chunk_id"):
new_op_dist_attr.chunk_id = kwargs["chunk_id"]
ctx.set_op_dist_attr_for_program(new_op, new_op_dist_attr)

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/base/runtime_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(self):

def _create_runtime(self, context):
# add collective && pslib mode
if "use_fleet_ps" in context and context["use_fleet_ps"]:
if context.get("use_fleet_ps"):
ps_runtime = TheOnePSRuntime()
ps_runtime._set_basic_info(context)
return ps_runtime
Expand Down
12 changes: 3 additions & 9 deletions python/paddle/distributed/launch/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -708,10 +708,7 @@ def launch() -> None:
logger.info(
"Get best config failed. Currently no config can be run."
)
if (
"sharding_overlap" in cur_cfg
and cur_cfg["sharding_overlap"]
):
if cur_cfg.get("sharding_overlap"):
add_overlap_performance(
cur_cfg, tuner_cfg, recorder.history
)
Expand Down Expand Up @@ -811,10 +808,7 @@ def launch() -> None:
logger.info(
"Get best config failed. Currently no config can be run."
)
if (
"sharding_overlap" in cur_cfg
and cur_cfg["sharding_overlap"]
):
if cur_cfg.get("sharding_overlap"):
add_overlap_performance(
cur_cfg, tuner_cfg, recorder.history
)
Expand Down Expand Up @@ -1221,7 +1215,7 @@ def launch() -> None:
logger.info("Get best config failed, no config can be run.")

# record history
if "sharding_overlap" in cur_cfg and cur_cfg["sharding_overlap"]:
if cur_cfg.get("sharding_overlap"):
add_overlap_performance(cur_cfg, tuner_cfg, recorder.history)

recorder.store_history(history_file_path)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/ps/utils/ps_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def _create_ps_program_builder(self, pass_ctx):
return globals()['GpuPsProgramBuilder'](pass_ctx)
elif attrs['is_heter_ps_mode'] and not attrs['is_fl_ps_mode']:
return globals()['HeterAsyncPsProgramBuilder'](pass_ctx)
elif 'is_fl_ps_mode' in attrs and attrs['is_fl_ps_mode']:
elif attrs.get('is_fl_ps_mode'):
return globals()['FlPsProgramBuilder'](pass_ctx)
elif attrs['ps_mode'] == DistributedMode.SYNC:
return globals()['CpuSyncPsProgramBuilder'](pass_ctx)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/static/quantization/quanter.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ def quant_aware(
sub_graphs = list(main_graph.all_sub_graphs())
transform_pass_ops = []
quant_dequant_ops = []
if 'quant_config' in config and config['quant_config']:
if config.get('quant_config'):
transform_pass_ops = config[
'quant_config'
].weight_quant_operation_types
Expand Down
5 changes: 1 addition & 4 deletions python/paddle/static/quantization/quantization_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -3375,10 +3375,7 @@ def apply(self, graph):
else:
var_names = utils._get_op_input_var_names(op_node)
for var_name in var_names:
if (
var_name in dequant_node_map
and dequant_node_map[var_name]
):
if dequant_node_map.get(var_name):
in_node = graph._find_node_by_name(
op_node.inputs, var_name
)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3394,7 +3394,7 @@ def check_grad_with_place(

# oneDNN numeric gradient should use CPU kernel
use_onednn = False
if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]:
if op_attrs.get("use_mkldnn"):
op_attrs["use_mkldnn"] = False
use_onednn = True
if hasattr(self, "attrs"):
Expand Down