diff --git a/pyproject.toml b/pyproject.toml index 0e30f861bd4db..b974faad3bf71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -105,7 +105,6 @@ ignore = [ # Temporarily ignored "python/paddle/base/**" = [ - "UP031", "C408", "UP030", "C405", diff --git a/python/paddle/base/backward.py b/python/paddle/base/backward.py index 2f03636c55d41..59bf66ebde7b6 100755 --- a/python/paddle/base/backward.py +++ b/python/paddle/base/backward.py @@ -231,7 +231,7 @@ def modify_forward_desc_for_recompute(self): def _pretty_op_desc_(op_desc, prefix): - out_s = "%s\tname:[%s]\n%s \tinputs:[%s]\n%s \toutputs:[%s]" % ( + out_s = "{}\tname:[{}]\n{} \tinputs:[{}]\n{} \toutputs:[{}]".format( prefix + "_op", str(op_desc.type()), prefix + "_input", @@ -2441,8 +2441,9 @@ def calc_gradient_helper( raise ValueError("all targets must be in the same block") if target.shape != grad.shape: raise ValueError( - "The shapes of target and grad are different: %s %s" - % (target.name, grad.name) + "The shapes of target and grad are different: {} {}".format( + target.name, grad.name + ) ) target_grad_map[_append_grad_suffix_(target.name)] = grad.name input_grad_names_set.add(grad.name) diff --git a/python/paddle/base/core.py b/python/paddle/base/core.py index 158e556cd1afe..aca102364a1ec 100644 --- a/python/paddle/base/core.py +++ b/python/paddle/base/core.py @@ -47,11 +47,10 @@ if os.name == 'nt': executable_path = os.path.abspath(os.path.dirname(sys.executable)) raise ImportError( - """NOTE: You may need to run \"set PATH=%s;%%PATH%%\" + f"""NOTE: You may need to run \"set PATH={executable_path};%PATH%\" if you encounters \"DLL load failed\" errors. If you have python - installed in other directory, replace \"%s\" with your own - directory. The original error is: \n %s""" - % (executable_path, executable_path, str(e)) + installed in other directory, replace \"{executable_path}\" with your own + directory. The original error is: \n {str(e)}""" ) else: raise ImportError( @@ -197,7 +196,7 @@ def run_shell_command(cmd): def get_dso_path(core_so, dso_name): if core_so and dso_name: return run_shell_command( - "ldd %s|grep %s|awk '{print $3}'" % (core_so, dso_name) + f"ldd {core_so}|grep {dso_name}|awk '{{print $3}}'" ) else: return None diff --git a/python/paddle/base/data_feeder.py b/python/paddle/base/data_feeder.py index 6efb86ffcc9ab..5bf2bf2d60f03 100644 --- a/python/paddle/base/data_feeder.py +++ b/python/paddle/base/data_feeder.py @@ -184,8 +184,9 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): ) if not isinstance(input, expected_type): raise TypeError( - "The type of '%s' in %s must be %s, but received %s. %s" - % (input_name, op_name, expected_type, type(input), extra_message) + "The type of '{}' in {} must be {}, but received {}. {}".format( + input_name, op_name, expected_type, type(input), extra_message + ) ) @@ -197,8 +198,9 @@ def check_dtype( return if convert_dtype(input_dtype) in ['float16']: warnings.warn( - "The data type of '%s' in %s only support float16 in GPU now. %s" - % (input_name, op_name, extra_message) + "The data type of '{}' in {} only support float16 in GPU now. {}".format( + input_name, op_name, extra_message + ) ) if convert_dtype(input_dtype) in ['uint16'] and op_name not in [ 'reshape', @@ -206,13 +208,13 @@ def check_dtype( 'scale', ]: warnings.warn( - "The data type of '%s' in %s only support bfloat16 in OneDNN now. %s" - % (input_name, op_name, extra_message) + "The data type of '{}' in {} only support bfloat16 in OneDNN now. {}".format( + input_name, op_name, extra_message + ) ) if convert_dtype(input_dtype) not in expected_dtype: raise TypeError( - "The data type of '%s' in %s must be %s, but received %s. %s" - % ( + "The data type of '{}' in {} must be {}, but received {}. {}".format( input_name, op_name, expected_dtype, diff --git a/python/paddle/base/dygraph/base.py b/python/paddle/base/dygraph/base.py index 0997d24ad4db4..9c340b7eab1d1 100644 --- a/python/paddle/base/dygraph/base.py +++ b/python/paddle/base/dygraph/base.py @@ -884,8 +884,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): ) if not isinstance(value, support_type): raise TypeError( - "The type of 'value' in base.dygraph.to_variable must be %s, but received %s." - % (support_type, type(value)) + "The type of 'value' in base.dygraph.to_variable must be {}, but received {}.".format( + support_type, type(value) + ) ) if isinstance(value, (core.eager.Tensor, framework.Variable)): return value diff --git a/python/paddle/base/executor.py b/python/paddle/base/executor.py index 2d2b53e71f1e6..b41bfb749dafc 100755 --- a/python/paddle/base/executor.py +++ b/python/paddle/base/executor.py @@ -256,8 +256,9 @@ def check_feed_shape_type(var, feed, num_places=1): else feed._dtype() ) raise ValueError( - 'The data type of fed Variable %r must be %r, but received %r' - % (var.name, var_dtype_format, feed_dtype_format) + 'The data type of fed Variable {!r} must be {!r}, but received {!r}'.format( + var.name, var_dtype_format, feed_dtype_format + ) ) return True @@ -305,8 +306,9 @@ def pir_check_feed_shape_type(feed, name, target_shape, dtype, num_places=1): else feed._dtype() ) raise ValueError( - 'The data type of fed Variable %r must be %r, but received %r' - % (name, var_dtype_format, feed_dtype_format) + 'The data type of fed Variable {!r} must be {!r}, but received {!r}'.format( + name, var_dtype_format, feed_dtype_format + ) ) return True @@ -487,7 +489,7 @@ def _add_feed_fetch_ops( for i, var in enumerate(fetch_list): assert isinstance( var, (Variable, str) - ), "Wrong type for fetch_list[%s]: %s" % (i, type(var)) + ), f"Wrong type for fetch_list[{i}]: {type(var)}" global_block.append_op( type=fetch_op, inputs={'X': [var]}, @@ -510,7 +512,7 @@ def _add_pir_fetch_ops(program, fetch_list, fetch_var_name): for i, fetch_input in enumerate(fetch_list): assert isinstance( fetch_input, OpResult - ), "Wrong type for fetch_list[%s]: %s" % (i, type(fetch_input)) + ), f"Wrong type for fetch_list[{i}]: {type(fetch_input)}" paddle._pir_ops.fetch(fetch_input, fetch_var_name + str(i), i) @@ -2792,7 +2794,7 @@ def _add_fetch_ops( for i, var in enumerate(fetch_list): assert isinstance( var, (Variable, str) - ), "Wrong type for fetch_list[%s]: %s" % (i, type(var)) + ), f"Wrong type for fetch_list[{i}]: {type(var)}" global_block.append_op( type=fetch_op, inputs={'X': [var]}, diff --git a/python/paddle/base/framework.py b/python/paddle/base/framework.py index d6ec848283fc8..a2d136989e8c9 100644 --- a/python/paddle/base/framework.py +++ b/python/paddle/base/framework.py @@ -521,17 +521,19 @@ def version_cmp(ver_a, ver_b): if version_cmp(version_installed, zero_version) == 0: if max_version is not None: warnings.warn( - "PaddlePaddle version in [%s, %s] required, but %s installed. " + "PaddlePaddle version in [{}, {}] required, but {} installed. " "Maybe you are using a develop version, " - "please make sure the version is good with your code." - % (min_version, max_version, fluid_version.full_version) + "please make sure the version is good with your code.".format( + min_version, max_version, fluid_version.full_version + ) ) else: warnings.warn( - "PaddlePaddle version %s or higher is required, but %s installed, " + "PaddlePaddle version {} or higher is required, but {} installed, " "Maybe you are using a develop version, " - "please make sure the version is good with your code." - % (min_version, fluid_version.full_version) + "please make sure the version is good with your code.".format( + min_version, fluid_version.full_version + ) ) return @@ -551,15 +553,17 @@ def version_cmp(ver_a, ver_b): or version_cmp(version_installed, min_version_to_check) < 0 ): raise Exception( - "VersionError: PaddlePaddle version in [%s, %s] required, but %s installed." - % (min_version, max_version, fluid_version.full_version) + "VersionError: PaddlePaddle version in [{}, {}] required, but {} installed.".format( + min_version, max_version, fluid_version.full_version + ) ) else: if version_cmp(version_installed, min_version_to_check) < 0: raise Exception( - "VersionError: PaddlePaddle version %s or higher is required, but %s installed, " - "please upgrade your PaddlePaddle to %s or other higher version." - % (min_version, fluid_version.full_version, min_version) + "VersionError: PaddlePaddle version {} or higher is required, but {} installed, " + "please upgrade your PaddlePaddle to {} or other higher version.".format( + min_version, fluid_version.full_version, min_version + ) ) @@ -623,11 +627,12 @@ def _set_pipeline_stage(stage): def _fake_interface_only_(func): def __impl__(*args, **kwargs): raise AssertionError( - "'%s' only can be called by `paddle.Tensor` in dynamic graph mode. Suggestions:\n" + "'{}' only can be called by `paddle.Tensor` in dynamic graph mode. Suggestions:\n" " 1. If you are in static graph mode, you can switch to dynamic graph mode by turning off `paddle.enable_static()` or calling `paddle.disable_static()`.\n" " 2. If you are using `@paddle.jit.to_static`, you can call `paddle.jit.enable_to_static(False)`. " - "If you have to translate dynamic graph to static graph, please use other API to replace '%s'." - % (func.__name__, func.__name__) + "If you have to translate dynamic graph to static graph, please use other API to replace '{}'.".format( + func.__name__, func.__name__ + ) ) return __impl__ @@ -1882,7 +1887,7 @@ def to_string(self, throw_on_error, with_details=False): if with_details: additional_attr = ("error_clip",) for attr_name in additional_attr: - res_str += "%s: %s\n" % (attr_name, getattr(self, attr_name)) + res_str += f"{attr_name}: {getattr(self, attr_name)}\n" return res_str @@ -3055,20 +3060,14 @@ def find_name(var_list, name): or m.intermediate ): raise ValueError( - ( - "Incorrect setting for output(s) of " - "operator \"%s\", should set: [%s]." - ) - % (type, m.name) + "Incorrect setting for output(s) of " + f"operator \"{type}\", should set: [{m.name}]." ) else: if not ((m.name in outputs) or m.dispensable): raise ValueError( - ( - "Incorrect setting for output(s) of " - "operator \"%s\", should set: [%s]." - ) - % (type, m.name) + "Incorrect setting for output(s) of " + f"operator \"{type}\", should set: [{m.name}]." ) for out_proto in proto.outputs: @@ -3110,9 +3109,7 @@ def find_name(var_list, name): self._update_desc_attr(attr_name, attr_val) for attr_name in extra_attrs_map.keys(): if os.environ.get('FLAGS_print_extra_attrs', '0') == '1': - warnings.warn( - "op %s use extra_attr: %s" % (type, attr_name) - ) + warnings.warn(f"op {type} use extra_attr: {attr_name}") if (attr_name not in op_attrs) or ( op_attrs[attr_name] is None @@ -3129,7 +3126,7 @@ def find_name(var_list, name): for attr in attrs: if attr in op_attrs.keys(): warnings.warn( - "op %s use extra_attr: %s" % (type, attr) + f"op {type} use extra_attr: {attr}" ) if type in special_op_attrs: @@ -3142,8 +3139,7 @@ def find_name(var_list, name): and default_value != op_attrs[a_name] ): warnings.warn( - "op %s's attr %s = %s is not the default value: %s" - % ( + "op {}'s attr {} = {} is not the default value: {}".format( type, a_name, op_attrs[a_name], @@ -3718,8 +3714,9 @@ def check_if_to_static_diff_with_dygraph(op_type, inplace_map, outputs): and inplace_map.get("Input", None) == "Out" ): raise ValueError( - 'Sorry about what\'s happend. In to_static mode, %s\'s output variable %s is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.base.framework._stride_in_no_check_dy2st_diff() in your safe code block.' - % (op_type, k) + 'Sorry about what\'s happend. In to_static mode, {}\'s output variable {} is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.base.framework._stride_in_no_check_dy2st_diff() in your safe code block.'.format( + op_type, k + ) ) elif isinstance(v, list): for var in v: @@ -3729,8 +3726,9 @@ def check_if_to_static_diff_with_dygraph(op_type, inplace_map, outputs): and inplace_map.get("Input", None) == "Out" ): raise ValueError( - 'Sorry about what\'s happend. In to_static mode, %s\'s output variable %s is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.base.framework._stride_in_no_check_dy2st_diff() in your safe code block.' - % (op_type, k) + 'Sorry about what\'s happend. In to_static mode, {}\'s output variable {} is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.base.framework._stride_in_no_check_dy2st_diff() in your safe code block.'.format( + op_type, k + ) ) @@ -7309,7 +7307,7 @@ def to_string(self, throw_on_error, with_details=False): "need_clip", ) for attr_name in additional_attr: - res_str += "%s: %s\n" % (attr_name, getattr(self, attr_name)) + res_str += f"{attr_name}: {getattr(self, attr_name)}\n" else: res_str = Variable.to_string(self, throw_on_error, False) return res_str diff --git a/python/paddle/base/layers/layer_function_generator.py b/python/paddle/base/layers/layer_function_generator.py index 390096123ac4e..b0f35af4fefed 100644 --- a/python/paddle/base/layers/layer_function_generator.py +++ b/python/paddle/base/layers/layer_function_generator.py @@ -329,8 +329,9 @@ def func(x, name=None): and x.is_view_var ): raise ValueError( - 'Sorry about what\'s happend. In to_static mode, %s\'s output variable %s is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. You mast find the location of the strided API be called, and call %s = %s.assign().' - % (inplace_op_type, x.name, x.name, x.nameb) + 'Sorry about what\'s happend. In to_static mode, {}\'s output variable {} is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. You mast find the location of the strided API be called, and call {} = {}.assign().'.format( + inplace_op_type, x.name, x.name, x.nameb + ) ) return generate_activation_fn(origin_op_type)(x, name) diff --git a/python/paddle/base/layers/math_op_patch.py b/python/paddle/base/layers/math_op_patch.py index ba327411264ea..f2b1ac7c6d04d 100644 --- a/python/paddle/base/layers/math_op_patch.py +++ b/python/paddle/base/layers/math_op_patch.py @@ -548,10 +548,9 @@ def __impl__(self, other_var): file_name = stack[1] line_num = stack[2] warnings.warn( - "%s:%s\nThe behavior of expression %s has been unified with %s(X, Y, axis=-1) from Paddle 2.0. " + "{}:{}\nThe behavior of expression {} has been unified with {}(X, Y, axis=-1) from Paddle 2.0. " "If your code works well in the older versions but crashes in this version, try to use " - "%s(X, Y, axis=0) instead of %s. This transitional warning will be dropped in the future." - % ( + "{}(X, Y, axis=0) instead of {}. This transitional warning will be dropped in the future.".format( file_name, line_num, EXPRESSION_MAP[method_name], diff --git a/python/paddle/base/trainer_desc.py b/python/paddle/base/trainer_desc.py index 1533647270dfb..255ddf05a580a 100644 --- a/python/paddle/base/trainer_desc.py +++ b/python/paddle/base/trainer_desc.py @@ -237,7 +237,7 @@ def _set_copy_table_config(self, config_dict): if len(src_sparse_tables) != len(dest_sparse_tables): raise ValueError( "len(src_sparse_tables) != len(dest_sparse_tables)," - " %s vs %s" % (len(src_sparse_tables), len(dest_sparse_tables)) + f" {len(src_sparse_tables)} vs {len(dest_sparse_tables)}" ) for i in src_sparse_tables: config.src_sparse_tables.append(i) @@ -253,7 +253,7 @@ def _set_copy_table_config(self, config_dict): if len(src_dense_tables) != len(dest_dense_tables): raise ValueError( "len(src_dense_tables) != len(dest_dense_tables)," - " %s vs %s" % (len(src_dense_tables), len(dest_dense_tables)) + f" {len(src_dense_tables)} vs {len(dest_dense_tables)}" ) for i in src_dense_tables: config.src_dense_tables.append(i) @@ -270,8 +270,8 @@ def _set_copy_table_config(self, config_dict): dest_var_list = [dest_var_list] if len(src_var_list) != len(dest_var_list): raise ValueError( - "len(src_var_list) != len(dest_var_list), %s vs" - " %s" % (len(src_var_list), len(dest_var_list)) + f"len(src_var_list) != len(dest_var_list), {len(src_var_list)} vs" + f" {len(dest_var_list)}" ) for i in src_var_list: config.src_var_list.append(i)