Skip to content

Commit

Permalink
[CodeStyle][task 1] enable Ruff UP032 rule with . except `python/padd…
Browse files Browse the repository at this point in the history
…le/base` (PaddlePaddle#57409)

* update up032

* update up032

* Update api_gen.py

* Update api_gen.py

* Update sampcd_processor_utils.py
  • Loading branch information
Liyulingyue authored Sep 22, 2023
1 parent 9cd8996 commit 4d093df
Show file tree
Hide file tree
Showing 316 changed files with 1,038 additions and 2,770 deletions.
4 changes: 1 addition & 3 deletions paddle/phi/api/yaml/generator/api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,9 +305,7 @@ def gene_output(
)
else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)

return kernel_output, output_names, output_create
Expand Down
4 changes: 1 addition & 3 deletions paddle/phi/api/yaml/generator/backward_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,9 +237,7 @@ def gene_output(

else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)

return kernel_output, output_names, output_create
Expand Down
8 changes: 2 additions & 6 deletions paddle/phi/api/yaml/generator/dist_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -595,9 +595,7 @@ def generate_output_creation_code(self) -> str:
)
else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)

return output_creation_code
Expand Down Expand Up @@ -1073,9 +1071,7 @@ def generate_reshard_partial_out_to_replicated_code(self) -> str:
self.vector_output_size_assertion_check()
else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)
else:
reshard_p2r_code = (
Expand Down
8 changes: 2 additions & 6 deletions paddle/phi/api/yaml/generator/dist_bw_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,9 +173,7 @@ def generate_output_creation_code(self) -> str:
self.vector_output_size_assertion_check()
else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)

return output_creation_code
Expand Down Expand Up @@ -249,9 +247,7 @@ def generate_reshard_output_code(self):
self.vector_output_size_assertion_check()
else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)
else:
# do nothing
Expand Down
4 changes: 1 addition & 3 deletions paddle/phi/api/yaml/generator/sparse_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,7 @@ def gene_output(

else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)

return kernel_output, output_names, output_create
Expand Down
4 changes: 1 addition & 3 deletions paddle/phi/api/yaml/generator/sparse_bw_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,7 @@ def gene_output(

else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)

return kernel_output, output_names, output_create
Expand Down
4 changes: 1 addition & 3 deletions paddle/phi/api/yaml/generator/strings_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,7 @@ def gene_output(

else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api
)
f"{self.api} : Output error: the output should not be empty."
)

return kernel_output, output_names, output_create
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -445,13 +445,13 @@ def write_decl_impl(


def write_main_header(forward_impl, backward_impl):
main_header_content = '''
main_header_content = f'''
#pragma once
#ifdef {}
#ifdef {ENABLE_MACRO}
#include "{}"
#include "{}"
#include "{forward_impl}"
#include "{backward_impl}"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
Expand Down Expand Up @@ -528,11 +528,7 @@ def write_main_header(forward_impl, backward_impl):
#include "./cutlass_backward.h"
#endif
'''.format(
ENABLE_MACRO,
forward_impl,
backward_impl,
)
'''

path = Path(args.dst_path) / "autogen"
os.makedirs(path, exist_ok=True)
Expand Down
324 changes: 0 additions & 324 deletions pyproject.toml

Large diffs are not rendered by default.

20 changes: 4 additions & 16 deletions python/paddle/amp/accuracy_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,35 +705,23 @@ def compare_accuracy(
)

for filename in sorted(workerlog_filenames):
print(
"-- [Step 1/4] Parsing FP32 logs under {}/{}".format(
dump_path, filename
)
)
print(f"-- [Step 1/4] Parsing FP32 logs under {dump_path}/{filename}")
fp32_tensor_info_list, fp32_has_tensor_name = parse_log(
dump_path, filename, None
)
print(
"-- [Step 2/4] Parsing FP16 logs under {}/{}".format(
another_dump_path, filename
)
f"-- [Step 2/4] Parsing FP16 logs under {another_dump_path}/{filename}"
)
fp16_tensor_info_list, fp16_has_tensor_name = parse_log(
another_dump_path, filename, None
)

print(
"-- [Step 3/4] Merge FP32 and FP16 tensor info for {}".format(
filename
)
)
print(f"-- [Step 3/4] Merge FP32 and FP16 tensor info for {filename}")
mp_tensor_info_list = merge_tensor_info_list(
fp32_tensor_info_list, fp16_tensor_info_list, grad_scale
)
print(
"-- [Step 4/4] Add worksheet for mixed precision tensor info of {}".format(
filename
)
f"-- [Step 4/4] Add worksheet for mixed precision tensor info of {filename}"
)
excel_writer.add_worksheet(
mp_tensor_info_list,
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/audio/backends/init_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ def list_available_backends() -> List[str]:
version = paddleaudio.__version__
if not _check_version(version):
err_msg = (
"the version of paddleaudio installed is {},\n"
f"the version of paddleaudio installed is {version},\n"
"please ensure the paddleaudio >= 1.0.2."
).format(version)
)
raise ImportError(err_msg)
backends = paddleaudio.backends.list_audio_backends()
backends.append("wave_backend")
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/audio/backends/wave_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ def _error_message():
warn_msg = (
"only PCM16 WAV supportted. \n"
"if want support more other audio types, please "
"manually installed (usually with `pip install {}`). \n "
f"manually installed (usually with `pip install {package}`). \n "
"and use paddle.audio.backends.set_backend('soundfile') to set audio backend"
).format(package)
)
return warn_msg


Expand Down
2 changes: 1 addition & 1 deletion python/paddle/batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def batch_reader():
if batch_size <= 0:
raise ValueError(
"batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size)
f"but got batch_size={batch_size}"
)

return batch_reader
4 changes: 1 addition & 3 deletions python/paddle/dataset/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,7 @@ def download(url, module_name, md5sum, save_name=None):
retry += 1
else:
raise RuntimeError(
"Cannot download {} within retry limit {}".format(
url, retry_limit
)
f"Cannot download {url} within retry limit {retry_limit}"
)
sys.stderr.write(
f"Cache file {filename} not found, downloading {url} \n"
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/device/cuda/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,9 +476,9 @@ def get_device_properties(device=None):
)
else:
raise ValueError(
"The device type {} is not expected. Because paddle.device.cuda."
f"The device type {device} is not expected. Because paddle.device.cuda."
"get_device_properties only support int, str or paddle.CUDAPlace. "
"Please input appropriate device again!".format(device)
"Please input appropriate device again!"
)
else:
device_id = -1
Expand Down
16 changes: 4 additions & 12 deletions python/paddle/distributed/auto_parallel/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,7 @@ def shard_tensor(x, process_mesh=None, shard_spec=None):
if process_mesh is not None:
assert isinstance(
process_mesh, core.ProcessMesh
), "Argument process_mesh {} is not an instance of ProcessMesh".format(
process_mesh
)
), f"Argument process_mesh {process_mesh} is not an instance of ProcessMesh"
else:
process_mesh = get_current_process_mesh()
assert (
Expand Down Expand Up @@ -163,9 +161,7 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
if process_mesh is not None:
assert isinstance(
process_mesh, ProcessMesh
), "Argument process_mesh {} is not an instance of ProcessMesh".format(
process_mesh
)
), f"Argument process_mesh {process_mesh} is not an instance of ProcessMesh"
else:
process_mesh = get_current_process_mesh()
assert (
Expand All @@ -176,9 +172,7 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
assert all(
(isinstance(shard_spec, list) or shard_spec is None)
for shard_spec in in_shard_specs
), "in_shard_spec {} is not a list of list or None".format(
in_shard_specs
)
), f"in_shard_spec {in_shard_specs} is not a list of list or None"
for shard_spec in in_shard_specs:
if shard_spec is not None:
in_dims_mappings.append(
Expand All @@ -191,9 +185,7 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
assert all(
(isinstance(shard_spec, list) or shard_spec is None)
for shard_spec in out_shard_specs
), "out_shard_spec {} is not a list of list or None".format(
out_shard_specs
)
), f"out_shard_spec {out_shard_specs} is not a list of list or None"
for shard_spec in out_shard_specs:
if shard_spec is not None:
out_dims_mappings.append(
Expand Down
8 changes: 2 additions & 6 deletions python/paddle/distributed/auto_parallel/static/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -1211,9 +1211,7 @@ def _get_op_by_id(ops, id):
output_name = grad_op.output_arg_names[0]
assert (
output_name in grad_var_to_var[appended_grad_times]
), "sum op's output '{}' has no corresponding var".format(
output_name
)
), f"sum op's output '{output_name}' has no corresponding var"
ref_fwd_var_name = grad_var_to_var[appended_grad_times][
output_name
]
Expand Down Expand Up @@ -1513,9 +1511,7 @@ def _get_op_by_id(ops, id):
output_name = grad_op.output_arg_names[0]
assert (
output_name in grad_var_to_var
), "sum op's output '{}' has no corresponding var".format(
output_name
)
), f"sum op's output '{output_name}' has no corresponding var"
ref_fwd_var_name = grad_var_to_var[output_name]
ref_fwd_var = vars[ref_fwd_var_name]
ref_fwd_dist_attr = (
Expand Down
16 changes: 5 additions & 11 deletions python/paddle/distributed/auto_parallel/static/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def _check_pre_strategy(self, pre_strategy):
if not isinstance(pre_strategy, dict):
raise TypeError(
"The type of 'pre_strategy' should be 'dict', "
"but got '{}'.".format(str(type(pre_strategy)))
f"but got '{str(type(pre_strategy))}'."
)
return pre_strategy

Expand All @@ -82,7 +82,7 @@ def _check_cur_strategy(self, cur_strategy):
if not isinstance(cur_strategy, dict):
raise TypeError(
"The type of 'cur_strategy' should be 'dict', "
"but got '{}'.".format(str(type(cur_strategy)))
f"but got '{str(type(cur_strategy))}'."
)
return cur_strategy

Expand Down Expand Up @@ -229,9 +229,7 @@ def convert_with_prefix_match(
+ str(err)
)
self._logger.info(
"tensor [{}] is matched with tensor [{}]".format(
cur_name, pre_name
)
f"tensor [{cur_name}] is matched with tensor [{pre_name}]"
)
tensor_match_with_pre.append(cur_name)
tensor_match_with_cur.append(pre_name)
Expand Down Expand Up @@ -309,9 +307,7 @@ def merge_with_dist_attr(tensor_list, dist_attr):

if len(partition_tensor_list) != 1:
raise ValueError(
"Fail to merge tensor with dist_attr '{}'.".format(
str(dist_attr)
)
f"Fail to merge tensor with dist_attr '{str(dist_attr)}'."
)
complete_tensor = partition_tensor_list[0][0]
return complete_tensor
Expand All @@ -336,9 +332,7 @@ def slice_with_dist_attr(tensor, dist_attr):
)
if sliced_tensor_index not in range(len(sliced_tensor_list)):
raise ValueError(
"Fail to slice tensor with dist_attr '{}'.".format(
str(dist_attr)
)
f"Fail to slice tensor with dist_attr '{str(dist_attr)}'."
)
sliced_tensor = sliced_tensor_list[sliced_tensor_index]
return sliced_tensor
Expand Down
12 changes: 3 additions & 9 deletions python/paddle/distributed/auto_parallel/static/cost/base_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -846,9 +846,7 @@ def group_ranks(self):
process_group = get_process_group(ring_id)
if process_group is None:
raise ValueError(
"There not exists process group whose ring_id is {}.".format(
ring_id
)
f"There not exists process group whose ring_id is {ring_id}."
)
self._group_ranks = process_group.ranks
return self._group_ranks
Expand All @@ -858,9 +856,7 @@ def _check_comm_op_type(cls):
if cls.OP_TYPE != "COMM":
if cls.OP_TYPE not in COMM_OP_TYPE:
raise TypeError(
"Please Check op type in {}, but got {}.".format(
COMM_OP_TYPE, cls.OP_TYPE
)
f"Please Check op type in {COMM_OP_TYPE}, but got {cls.OP_TYPE}."
)


Expand Down Expand Up @@ -931,9 +927,7 @@ def calc_time_by_cost_model(op, cluster=None):
"""Calc op time by cost model and the unit is microsecond."""
if not isinstance(op, paddle.base.framework.Operator):
raise TypeError(
"OP must be paddle.base.framework.Operator, but got {}.".format(
type(op)
)
f"OP must be paddle.base.framework.Operator, but got {type(op)}."
)
if not cluster:
cluster = get_default_cluster()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,7 @@ def _check_args(self, tensor, dist_tensor, shape, dtype):

if not isinstance(tensor, Variable):
raise TypeError(
"Please check tensor type is Variable, but got {}".format(
type(tensor)
)
f"Please check tensor type is Variable, but got {type(tensor)}"
)

elif dist_tensor is not None:
Expand All @@ -72,9 +70,7 @@ def _check_args(self, tensor, dist_tensor, shape, dtype):
assert tensor is None and dist_tensor is None and dtype is not None
if not isinstance(shape, (list, set)):
raise TypeError(
"Please check shape type is list or set, but got {}".format(
type(shape)
)
f"Please check shape type is list or set, but got {type(shape)}"
)

elif dtype is not None:
Expand Down
Loading

0 comments on commit 4d093df

Please sign in to comment.