diff --git a/python/paddle/amp/auto_cast.py b/python/paddle/amp/auto_cast.py index 02b2c3c12a881..b8a40df7160c3 100644 --- a/python/paddle/amp/auto_cast.py +++ b/python/paddle/amp/auto_cast.py @@ -516,8 +516,7 @@ def amp_guard( or tracer._expected_place.is_custom_place() ): warnings.warn( - 'amp_guard can only be enabled on CUDAPlace, XPUPlace, and CustomPlace, current place is %s, so it makes no effect.' - % tracer._expected_place + f'amp_guard can only be enabled on CUDAPlace, XPUPlace, and CustomPlace, current place is {tracer._expected_place}, so it makes no effect.' ) enable = False if enable: @@ -873,8 +872,7 @@ def amp_decorate( if save_dtype is not None: if save_dtype not in ['float16', 'bfloat16', 'float32', 'float64']: raise ValueError( - "save_dtype can only be float16 float32 or float64, but your input save_dtype is %s." - % save_dtype + f"save_dtype can only be float16 float32 or float64, but your input save_dtype is {save_dtype}." ) for idx in range(len(models)): for layer in models[idx].sublayers(include_self=True): diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index 020c13d6a337a..d0c1b021d9956 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -110,8 +110,7 @@ def __init__( or tracer._expected_place.is_custom_place() ): warnings.warn( - 'AmpScaler can only be enabled on CUDAPlace, XPUPlace and CustomPlace, current place is %s, so it makes no effect.' - % tracer._expected_place + f'AmpScaler can only be enabled on CUDAPlace, XPUPlace and CustomPlace, current place is {tracer._expected_place}, so it makes no effect.' ) enable = False @@ -210,8 +209,7 @@ def scale(self, var): self._use_dynamic_loss_scaling = False self._init_loss_scaling = 1.0 warnings.warn( - 'It is not recommended to use dynamic loss scaling for %s, so GradScaler is disable by default.' - % (amp_global_state().amp_dtype) + f'It is not recommended to use dynamic loss scaling for {amp_global_state().amp_dtype}, so GradScaler is disable by default.' ) if in_pir_mode(): diff --git a/python/paddle/audio/functional/window.py b/python/paddle/audio/functional/window.py index 60eb9626ac693..1422702d97f8b 100644 --- a/python/paddle/audio/functional/window.py +++ b/python/paddle/audio/functional/window.py @@ -376,9 +376,7 @@ def get_window( else: winstr = window else: - raise ValueError( - "%s as window type is not supported." % str(type(window)) - ) + raise ValueError(f"{type(window)} as window type is not supported.") try: winfunc = window_function_register.get('_' + winstr) diff --git a/python/paddle/base/multiprocess_utils.py b/python/paddle/base/multiprocess_utils.py index 1445e985773cd..76800acfa84fb 100644 --- a/python/paddle/base/multiprocess_utils.py +++ b/python/paddle/base/multiprocess_utils.py @@ -71,7 +71,7 @@ def _func_executor(): def _func_register(function): if not callable(function): - raise TypeError("%s is not callable object." % (function)) + raise TypeError(f"{function} is not callable object.") # check function object whether hash-able if function not in cls._registered_func_set: atexit.register(_func_executor) diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index 35155a2de2d22..79628cd045974 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -144,9 +144,9 @@ def fetch_all(): x for x in dir(paddle.dataset) if not x.startswith("__") ]: if "fetch" in dir( - importlib.import_module("paddle.dataset.%s" % module_name) + importlib.import_module(f"paddle.dataset.{module_name}") ): - importlib.import_module('paddle.dataset.%s' % module_name).fetch() + importlib.import_module(f'paddle.dataset.{module_name}').fetch() def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump): @@ -210,7 +210,7 @@ def reader(): my_file_list = [] for idx, fn in enumerate(file_list): if idx % trainer_count == trainer_id: - print("append file: %s" % fn) + print(f"append file: {fn}") my_file_list.append(fn) for fn in my_file_list: with open(fn, "r") as f: diff --git a/python/paddle/dataset/conll05.py b/python/paddle/dataset/conll05.py index 2295e9b1bd447..1e85fc1c46d5e 100644 --- a/python/paddle/dataset/conll05.py +++ b/python/paddle/dataset/conll05.py @@ -126,9 +126,7 @@ def reader(): lbl_seq.append('B-' + cur_tag) is_in_bracket = True else: - raise RuntimeError( - 'Unexpected label: %s' % l - ) + raise RuntimeError(f'Unexpected label: {l}') yield sentences, verb_list[i], lbl_seq diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py index 588a0f30ebb0b..33ddb3e53de32 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py @@ -422,7 +422,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"forward op [{str(src_op)}] don't have dist attribute !" + ), f"forward op [{src_op}] don't have dist attribute !" # check validation of inputs / outputs assert 'Ids' in kwargs, "input [{}] is not given".format('Ids') @@ -578,7 +578,7 @@ def backward(ctx, *args, **kwargs): dist_attr = ctx.get_op_dist_attr_for_program(backward_op) assert ( dist_attr is not None - ), f"backward op [{str(backward_op)}] don't have dist attribute !" + ), f"backward op [{backward_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in dist_attr.process_mesh.process_ids: diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py b/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py index 841dc0a587044..cfbe575ec1f2d 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py @@ -62,7 +62,7 @@ def forward(ctx, *args, **kwargs): ): assert ( op_dist_attr is not None - ), f"forward op [{str(src_op)}] don't have dist attribute !" + ), f"forward op [{src_op}] don't have dist attribute !" if ( len(kwargs.get('fixed_seed_offset', [])) > 0 diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py index fbe74fe6c9503..03d83eacc3c5b 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py @@ -74,7 +74,7 @@ def forward(ctx, *args, **kwargs): if is_enable_auto_rand_ctrl() and not op_dist_attr.is_recompute: assert ( op_dist_attr is not None - ), f"forward op [{str(src_op)}] don't have dist attribute !" + ), f"forward op [{src_op}] don't have dist attribute !" assert 'seed_tensor' in kwargs, "input [{}] is not given".format( 'seed_tensor' @@ -101,7 +101,7 @@ def forward(ctx, *args, **kwargs): assert ( pre_op.type == "seed" and len(pre_op.attr("rng_name")) == 0 - ), f"found exception op {str(pre_op)}" + ), f"found exception op {pre_op}" # determinate rng X_var = main_block._var_recursive(kwargs['x'][0]) diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py index 4b44e17dea210..d60ea6a640865 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py @@ -315,7 +315,7 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): dist_attr = ctx.get_op_dist_attr_for_program(backward_op) assert ( dist_attr is not None - ), f"backward op [{str(backward_op)}] don't have dist attribute !" + ), f"backward op [{backward_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in dist_attr.process_mesh.process_ids: @@ -782,7 +782,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.process_ids: @@ -1037,7 +1037,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.process_ids: @@ -1475,7 +1475,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.process_ids: @@ -1724,7 +1724,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.process_ids: @@ -2155,7 +2155,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.process_ids: @@ -2398,7 +2398,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.process_ids: diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py index e99b57f8f97d8..e9bfa53bb0a9f 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py @@ -230,9 +230,7 @@ def forward(ctx, *args, **kwargs): @staticmethod def backward(ctx, *args, **kwargs): - raise RuntimeError( - f"primitive operator does NOT have backward function, op type: {str(op.type)}" # noqa: F821 - ) + raise RuntimeError("primitive operator does NOT have backward function") register_distributed_operator_impl( diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py index ec1472896f5b2..bc3c565ad0e45 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py @@ -295,7 +295,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # check validation of inputs / outputs for input_name in src_op.desc.input_names(): @@ -551,7 +551,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # check validation of inputs / outputs for input_name in src_op.desc.input_names(): @@ -800,7 +800,7 @@ def forward(ctx, *args, **kwargs): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) assert ( op_dist_attr is not None - ), f"backward op [{str(src_op)}] don't have dist attribute !" + ), f"backward op [{src_op}] don't have dist attribute !" # check validation of inputs / outputs for input_name in src_op.desc.input_names(): diff --git a/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py b/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py index 8ff358d14b1db..39d4fdfef974a 100644 --- a/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py @@ -74,7 +74,7 @@ def backward(ctx, *args, **kwargs): dist_attr = ctx.get_op_dist_attr_for_program(backward_op) assert ( dist_attr is not None - ), f"backward op [{str(backward_op)}] don't have dist attribute !" + ), f"backward op [{backward_op}] don't have dist attribute !" assert rank_id in dist_attr.process_mesh.process_ids diff --git a/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py b/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py index 83ed42c3fe1c0..fc30b377e7dff 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py @@ -87,7 +87,7 @@ def __init__(self, name, values, default=None): types = {type(v) for v in values} if len(types) > 1: raise TypeError( - f"Choice can contain only one type of value, but found values: {str(values)} with types: {str(types)}." + f"Choice can contain only one type of value, but found values: {values} with types: {types}." ) self._is_unknown_type = False @@ -185,7 +185,7 @@ def get_state(self): def _check_int(self, val): int_val = int(val) if int_val != val: - raise ValueError(f"Expects val is an int, but found: {str(val)}.") + raise ValueError(f"Expects val is an int, but found: {val}.") return int_val def __repr__(self):