diff --git a/.flake8 b/.flake8 index 91137a006d088..5187a0cdefe03 100644 --- a/.flake8 +++ b/.flake8 @@ -28,9 +28,3 @@ per-file-ignores = # Ignore compare with True in sot unittest test/sot/test_dup_top.py:E712 - - # temp ignore base directory - python/paddle/base/*: - E712, - E266, - E714 diff --git a/python/paddle/base/backward.py b/python/paddle/base/backward.py index 876db0abc3aa7..e62a5b9245a1b 100755 --- a/python/paddle/base/backward.py +++ b/python/paddle/base/backward.py @@ -2348,7 +2348,7 @@ def _find_op_path_( # If block is while block, dealing with op specifically again. # TODO(liym27): Consider special types of ops. for i, op in reversed(list(enumerate(block.ops))): - if relevant_op_flags[i] == False and _some_in_set_( + if relevant_op_flags[i] is False and _some_in_set_( op.desc.output_arg_names(), output_names ): relevant_op_flags[i] = True diff --git a/python/paddle/base/device_worker.py b/python/paddle/base/device_worker.py index 755f7257b735a..c20677f6acd5e 100644 --- a/python/paddle/base/device_worker.py +++ b/python/paddle/base/device_worker.py @@ -450,7 +450,7 @@ def _gen_worker_desc(self, trainer_desc): if ( opt_info["use_cvm"] or "no_cvm" in opt_info - and opt_info["no_cvm"] == True + and opt_info["no_cvm"] is True ): sparse_table.emb_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ i @@ -560,7 +560,7 @@ def _gen_worker_desc(self, trainer_desc): if ( opt_info["use_cvm"] or "no_cvm" in opt_info - and opt_info["no_cvm"] == True + and opt_info["no_cvm"] is True ): sparse_table.emb_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ i diff --git a/python/paddle/base/dygraph/base.py b/python/paddle/base/dygraph/base.py index cadb6bcb08937..69ee71395b9aa 100644 --- a/python/paddle/base/dygraph/base.py +++ b/python/paddle/base/dygraph/base.py @@ -928,7 +928,7 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): # (2): when used in flask framework, it may result in hang. # Details: https://github.com/PaddlePaddle/Paddle/issues/26635 # So, we temporally diable the zero_copy strategy. - if zero_copy == True: + if zero_copy is True: warnings.warn( "Currently, zero_copy is not supported, and it will be discarded." ) diff --git a/python/paddle/base/executor.py b/python/paddle/base/executor.py index 65f460309d4a3..3abc40acc6c2f 100755 --- a/python/paddle/base/executor.py +++ b/python/paddle/base/executor.py @@ -1711,7 +1711,7 @@ def _run_impl( if isinstance(program, Program) and program._heter_pipeline_opt: # print("program._heter_pipeline_opt: {}".format( # program._heter_pipeline_opt)) - ## change default executor + # change default executor heter_place = program._heter_pipeline_opt["heter_place"] heter_place = framework._get_paddle_place(heter_place) p = core.Place() @@ -1868,12 +1868,12 @@ def _run_impl( varobj = global_block.vars[varname] if ( - vardesc.persistable() == False + vardesc.persistable() is False and vardesc.type() == core.VarDesc.VarType.LOD_TENSOR - and vardesc.need_check_feed() == True - and varobj.stop_gradient == True - and varobj.is_data == True - and varobj.belong_to_optimizer == False + and vardesc.need_check_feed() is True + and varobj.stop_gradient is True + and varobj.is_data is True + and varobj.belong_to_optimizer is False and varname not in feed ): raise ValueError('Need feed data for variable %s' % varname) @@ -2159,7 +2159,7 @@ def _prepare_trainer( ): is_heter = 0 use_ps_gpu = 0 - if not program._fleet_opt is None: + if program._fleet_opt is not None: if program._fleet_opt.get("worker_class", "") == "HeterCpuWorker": is_heter = 1 if program._fleet_opt.get("trainer", "") == "HeterXpuTrainer": @@ -2285,7 +2285,7 @@ def _run_from_dataset( raise RuntimeError( "dataset is need and should be initialized" ) - ## change default executor + # change default executor heter_place = framework._get_paddle_place(heter_place) p = core.Place() p.set_place(heter_place) diff --git a/python/paddle/base/framework.py b/python/paddle/base/framework.py index ca9bcf5fd8db5..5d7aea27bacb3 100644 --- a/python/paddle/base/framework.py +++ b/python/paddle/base/framework.py @@ -2995,7 +2995,7 @@ def __init__( if ( type == 'less_than' and op_attrs['force_cpu'] is not None - ) or op_attrs['force_cpu'] != False: + ) or op_attrs['force_cpu'] is not False: warnings.warn( "The Attr(force_cpu) of Op(%s) will be deprecated in the future, " "please use 'device_guard' instead. 'device_guard' has higher priority when they are " @@ -4266,7 +4266,7 @@ def _rename_var(self, name, new_name): return var def _remove_var(self, name, sync=True): - if sync == True: + if sync is True: self._sync_with_cpp() self.desc._remove_var(name.encode()) del self.vars[name] @@ -4455,7 +4455,7 @@ def _remove_op(self, index, sync=True): Returns: None """ - if sync == True: + if sync is True: self._sync_with_cpp() self.desc._remove_op(index, index + 1) del self.ops[index] diff --git a/python/paddle/base/trainer_desc.py b/python/paddle/base/trainer_desc.py index 255ddf05a580a..3d6c947db484e 100644 --- a/python/paddle/base/trainer_desc.py +++ b/python/paddle/base/trainer_desc.py @@ -112,7 +112,7 @@ def _set_infer(self, infer): def _set_fleet_desc(self, fleet_desc): self._fleet_desc = fleet_desc - ## serialize fleet_desc + # serialize fleet_desc from google.protobuf import text_format fleet_desc_str = text_format.MessageToString(fleet_desc) diff --git a/python/paddle/base/trainer_factory.py b/python/paddle/base/trainer_factory.py index c5743ca22a29e..c8b61fdf7c112 100644 --- a/python/paddle/base/trainer_factory.py +++ b/python/paddle/base/trainer_factory.py @@ -186,7 +186,7 @@ def handler_launch_func(self, scope, handler): elapsed_secs = 0 while True: self.running_lock.acquire() - if self.running == False: + if self.running is False: break if elapsed_secs < period_secs: # TODO(guru4elephant): needs customized condition