Skip to content

Commit

Permalink
[CodeStyle][UP031] fix some `/python/paddle/{amp, base, dataset, dist…
Browse files Browse the repository at this point in the history
…ributed, audio}` - part 14 (#65574)

---------

Co-authored-by: Nyakku Shigure <sigure.qaq@gmail.com>
  • Loading branch information
gouzil and SigureMo authored Jun 29, 2024
1 parent 87fa2d9 commit 5cc9895
Show file tree
Hide file tree
Showing 14 changed files with 29 additions and 39 deletions.
6 changes: 2 additions & 4 deletions python/paddle/amp/auto_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,8 +516,7 @@ def amp_guard(
or tracer._expected_place.is_custom_place()
):
warnings.warn(
'amp_guard can only be enabled on CUDAPlace, XPUPlace, and CustomPlace, current place is %s, so it makes no effect.'
% tracer._expected_place
f'amp_guard can only be enabled on CUDAPlace, XPUPlace, and CustomPlace, current place is {tracer._expected_place}, so it makes no effect.'
)
enable = False
if enable:
Expand Down Expand Up @@ -873,8 +872,7 @@ def amp_decorate(
if save_dtype is not None:
if save_dtype not in ['float16', 'bfloat16', 'float32', 'float64']:
raise ValueError(
"save_dtype can only be float16 float32 or float64, but your input save_dtype is %s."
% save_dtype
f"save_dtype can only be float16 float32 or float64, but your input save_dtype is {save_dtype}."
)
for idx in range(len(models)):
for layer in models[idx].sublayers(include_self=True):
Expand Down
6 changes: 2 additions & 4 deletions python/paddle/amp/grad_scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,7 @@ def __init__(
or tracer._expected_place.is_custom_place()
):
warnings.warn(
'AmpScaler can only be enabled on CUDAPlace, XPUPlace and CustomPlace, current place is %s, so it makes no effect.'
% tracer._expected_place
f'AmpScaler can only be enabled on CUDAPlace, XPUPlace and CustomPlace, current place is {tracer._expected_place}, so it makes no effect.'
)
enable = False

Expand Down Expand Up @@ -210,8 +209,7 @@ def scale(self, var):
self._use_dynamic_loss_scaling = False
self._init_loss_scaling = 1.0
warnings.warn(
'It is not recommended to use dynamic loss scaling for %s, so GradScaler is disable by default.'
% (amp_global_state().amp_dtype)
f'It is not recommended to use dynamic loss scaling for {amp_global_state().amp_dtype}, so GradScaler is disable by default.'
)

if in_pir_mode():
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/audio/functional/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,9 +376,7 @@ def get_window(
else:
winstr = window
else:
raise ValueError(
"%s as window type is not supported." % str(type(window))
)
raise ValueError(f"{type(window)} as window type is not supported.")

try:
winfunc = window_function_register.get('_' + winstr)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/base/multiprocess_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def _func_executor():

def _func_register(function):
if not callable(function):
raise TypeError("%s is not callable object." % (function))
raise TypeError(f"{function} is not callable object.")
# check function object whether hash-able
if function not in cls._registered_func_set:
atexit.register(_func_executor)
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/dataset/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,9 @@ def fetch_all():
x for x in dir(paddle.dataset) if not x.startswith("__")
]:
if "fetch" in dir(
importlib.import_module("paddle.dataset.%s" % module_name)
importlib.import_module(f"paddle.dataset.{module_name}")
):
importlib.import_module('paddle.dataset.%s' % module_name).fetch()
importlib.import_module(f'paddle.dataset.{module_name}').fetch()


def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump):
Expand Down Expand Up @@ -210,7 +210,7 @@ def reader():
my_file_list = []
for idx, fn in enumerate(file_list):
if idx % trainer_count == trainer_id:
print("append file: %s" % fn)
print(f"append file: {fn}")
my_file_list.append(fn)
for fn in my_file_list:
with open(fn, "r") as f:
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/dataset/conll05.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,7 @@ def reader():
lbl_seq.append('B-' + cur_tag)
is_in_bracket = True
else:
raise RuntimeError(
'Unexpected label: %s' % l
)
raise RuntimeError(f'Unexpected label: {l}')

yield sentences, verb_list[i], lbl_seq

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"forward op [{str(src_op)}] don't have dist attribute !"
), f"forward op [{src_op}] don't have dist attribute !"

# check validation of inputs / outputs
assert 'Ids' in kwargs, "input [{}] is not given".format('Ids')
Expand Down Expand Up @@ -578,7 +578,7 @@ def backward(ctx, *args, **kwargs):
dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
assert (
dist_attr is not None
), f"backward op [{str(backward_op)}] don't have dist attribute !"
), f"backward op [{backward_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in dist_attr.process_mesh.process_ids:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def forward(ctx, *args, **kwargs):
):
assert (
op_dist_attr is not None
), f"forward op [{str(src_op)}] don't have dist attribute !"
), f"forward op [{src_op}] don't have dist attribute !"

if (
len(kwargs.get('fixed_seed_offset', [])) > 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def forward(ctx, *args, **kwargs):
if is_enable_auto_rand_ctrl() and not op_dist_attr.is_recompute:
assert (
op_dist_attr is not None
), f"forward op [{str(src_op)}] don't have dist attribute !"
), f"forward op [{src_op}] don't have dist attribute !"

assert 'seed_tensor' in kwargs, "input [{}] is not given".format(
'seed_tensor'
Expand All @@ -101,7 +101,7 @@ def forward(ctx, *args, **kwargs):
assert (
pre_op.type == "seed"
and len(pre_op.attr("rng_name")) == 0
), f"found exception op {str(pre_op)}"
), f"found exception op {pre_op}"

# determinate rng
X_var = main_block._var_recursive(kwargs['x'][0])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs):
dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
assert (
dist_attr is not None
), f"backward op [{str(backward_op)}] don't have dist attribute !"
), f"backward op [{backward_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in dist_attr.process_mesh.process_ids:
Expand Down Expand Up @@ -782,7 +782,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.process_ids:
Expand Down Expand Up @@ -1037,7 +1037,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.process_ids:
Expand Down Expand Up @@ -1475,7 +1475,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.process_ids:
Expand Down Expand Up @@ -1724,7 +1724,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.process_ids:
Expand Down Expand Up @@ -2155,7 +2155,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.process_ids:
Expand Down Expand Up @@ -2398,7 +2398,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.process_ids:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,9 +230,7 @@ def forward(ctx, *args, **kwargs):

@staticmethod
def backward(ctx, *args, **kwargs):
raise RuntimeError(
f"primitive operator does NOT have backward function, op type: {str(op.type)}" # noqa: F821
)
raise RuntimeError("primitive operator does NOT have backward function")


register_distributed_operator_impl(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
Expand Down Expand Up @@ -551,7 +551,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
Expand Down Expand Up @@ -800,7 +800,7 @@ def forward(ctx, *args, **kwargs):
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert (
op_dist_attr is not None
), f"backward op [{str(src_op)}] don't have dist attribute !"
), f"backward op [{src_op}] don't have dist attribute !"

# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def backward(ctx, *args, **kwargs):
dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
assert (
dist_attr is not None
), f"backward op [{str(backward_op)}] don't have dist attribute !"
), f"backward op [{backward_op}] don't have dist attribute !"

assert rank_id in dist_attr.process_mesh.process_ids

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def __init__(self, name, values, default=None):
types = {type(v) for v in values}
if len(types) > 1:
raise TypeError(
f"Choice can contain only one type of value, but found values: {str(values)} with types: {str(types)}."
f"Choice can contain only one type of value, but found values: {values} with types: {types}."
)
self._is_unknown_type = False

Expand Down Expand Up @@ -185,7 +185,7 @@ def get_state(self):
def _check_int(self, val):
int_val = int(val)
if int_val != val:
raise ValueError(f"Expects val is an int, but found: {str(val)}.")
raise ValueError(f"Expects val is an int, but found: {val}.")
return int_val

def __repr__(self):
Expand Down

0 comments on commit 5cc9895

Please sign in to comment.