Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][ruff] update ruff v0.0.287 and fix F401 #57124

Closed
wants to merge 4 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -66,7 +66,7 @@ repos:
- id: flake8
args: ["--config=.flake8"]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.272
rev: v0.0.287
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix, --no-cache]
26 changes: 26 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -40,11 +40,14 @@ select = [

# NumPy-specific rules
"NPY001",
# "NPY003",

# Bugbear
"B002",
"B003",
"B004",
# "B006", # To be confirmed
# "B007",
Comment on lines +49 to +50
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这俩不都是你调研的不适合引入的么 #51729

"B009",
"B010",
"B011",
@@ -69,6 +72,10 @@ select = [
"PLC3002",
"PLR0206",
"PLR0402",
# "PLR1701",
# "PLR1711",
# "PLR1722",
# "PLW3301",
Comment on lines +75 to +78
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PLR1701、PLR1722 应该是修复了但是忘记修改配置

PLR1711、PLW3301 是最近新增的是么

]
unfixable = [
"NPY001"
@@ -82,6 +89,25 @@ ignore = [
"UP015",
# It will cause the performance regression on python3.10
"UP038",

"F403", # Open Later
"F522", # Open Later
"F811", # Open Later
"F821", # Open Later
"C408", # Open Later
"C403", # Open Later
"C405", # Open Later
"C417", # Open Later
"C411", # Open Later
"C416", # Open Later
"UP032", # Open Later
"UP015", # Open Later
"UP018", # Open Later
"UP031", # Open Later
"UP030", # Open Later
"UP028", # Open Later
"UP034", # Open Later
"UP004", # Open Later
Comment on lines +93 to +110
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

全局 ignore?为什么全局 ignore?这是有风险的

]

[tool.ruff.per-file-ignores]
4 changes: 1 addition & 3 deletions python/paddle/base/compiler.py
Original file line number Diff line number Diff line change
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import multiprocessing
import os
import sys
import warnings
from . import framework
from .framework import _get_paddle_place, _get_paddle_place_list
from .framework import _get_paddle_place, _get_paddle_place_list # noqa: F401
from .framework import cuda_places, cpu_places, xpu_places
from . import core

104 changes: 58 additions & 46 deletions python/paddle/base/core.py
Original file line number Diff line number Diff line change
@@ -17,7 +17,6 @@
import os
import warnings
import platform
import logging

has_paddle_dy_lib = False

@@ -280,34 +279,45 @@ def to_list(s):
libpaddle.LoDTensor = libpaddle.Tensor

from .libpaddle import *
from .libpaddle import __doc__, __file__, __name__, __package__
from .libpaddle import __unittest_throw_exception__
from .libpaddle import _append_python_callable_object_and_return_id
from .libpaddle import _cleanup, _Scope
from .libpaddle import _get_use_default_grad_op_desc_maker_ops
from .libpaddle import _get_all_register_op_kernels
from .libpaddle import _get_registered_phi_kernels
from .libpaddle import _is_program_version_supported
from .libpaddle import _set_eager_deletion_mode
from .libpaddle import _get_eager_deletion_vars
from .libpaddle import _set_fuse_parameter_group_size
from .libpaddle import _set_fuse_parameter_memory_size
from .libpaddle import _is_dygraph_debug_enabled
from .libpaddle import _dygraph_debug_level
from .libpaddle import _switch_tracer
from .libpaddle import (
__doc__,
__file__,
__name__,
__package__,
) # noqa: F401
from .libpaddle import __unittest_throw_exception__ # noqa: F401
from .libpaddle import (
_append_python_callable_object_and_return_id,
) # noqa: F401
from .libpaddle import _cleanup, _Scope # noqa: F401
from .libpaddle import _get_use_default_grad_op_desc_maker_ops # noqa: F401
from .libpaddle import _get_all_register_op_kernels # noqa: F401
from .libpaddle import _get_registered_phi_kernels # noqa: F401
from .libpaddle import _is_program_version_supported # noqa: F401
from .libpaddle import _set_eager_deletion_mode # noqa: F401
from .libpaddle import _get_eager_deletion_vars # noqa: F401
from .libpaddle import _set_fuse_parameter_group_size # noqa: F401
from .libpaddle import _set_fuse_parameter_memory_size # noqa: F401
from .libpaddle import _is_dygraph_debug_enabled # noqa: F401
from .libpaddle import _dygraph_debug_level # noqa: F401
from .libpaddle import _switch_tracer # noqa: F401
from .libpaddle import _set_paddle_lib_path
from .libpaddle import _create_loaded_parameter
from .libpaddle import _cuda_synchronize
from .libpaddle import _test_enforce_gpu_success
from .libpaddle import _is_compiled_with_heterps
from .libpaddle import _promote_types_if_complex_exists
from .libpaddle import _set_cached_executor_build_strategy
from .libpaddle import _device_synchronize
from .libpaddle import _xpu_device_synchronize
from .libpaddle import _get_current_stream
from .libpaddle import _Profiler, _ProfilerResult, _RecordEvent
from .libpaddle import _set_current_stream
from .libpaddle import _get_phi_kernel_name
from .libpaddle import _create_loaded_parameter # noqa: F401
from .libpaddle import _cuda_synchronize # noqa: F401
from .libpaddle import _test_enforce_gpu_success # noqa: F401
from .libpaddle import _is_compiled_with_heterps # noqa: F401
from .libpaddle import _promote_types_if_complex_exists # noqa: F401
from .libpaddle import _set_cached_executor_build_strategy # noqa: F401
from .libpaddle import _device_synchronize # noqa: F401
from .libpaddle import _xpu_device_synchronize # noqa: F401
from .libpaddle import _get_current_stream # noqa: F401
from .libpaddle import (
_Profiler,
_ProfilerResult,
_RecordEvent,
) # noqa: F401
from .libpaddle import _set_current_stream # noqa: F401
from .libpaddle import _get_phi_kernel_name # noqa: F401

# prim controller flags
from .libpaddle import __set_bwd_prim_enabled
@@ -317,31 +327,33 @@ def to_list(s):
from .libpaddle import __set_all_prim_enabled
from .libpaddle import _is_eager_prim_enabled
from .libpaddle import __set_eager_prim_enabled
from .libpaddle import _set_prim_target_grad_name
from .libpaddle import _add_skip_comp_ops
from .libpaddle import _set_prim_target_grad_name # noqa: F401
from .libpaddle import _add_skip_comp_ops # noqa: F401
from .libpaddle import _set_bwd_prim_blacklist
from .libpaddle import _remove_skip_comp_ops
from .libpaddle import _remove_skip_comp_ops # noqa: F401

# custom devivce
from .libpaddle import _get_current_custom_device_stream
from .libpaddle import _set_current_custom_device_stream
from .libpaddle import _synchronize_custom_device
from .libpaddle import CustomDeviceStream
from .libpaddle import CustomDeviceEvent
from .libpaddle import _get_current_custom_device_stream # noqa: F401
from .libpaddle import _set_current_custom_device_stream # noqa: F401
from .libpaddle import _synchronize_custom_device # noqa: F401
from .libpaddle import CustomDeviceStream # noqa: F401
from .libpaddle import CustomDeviceEvent # noqa: F401

if sys.platform != 'win32':
from .libpaddle import _set_process_pids
from .libpaddle import _erase_process_pids
from .libpaddle import _set_process_signal_handler
from .libpaddle import _throw_error_if_process_failed
from .libpaddle import _convert_to_tensor_list
from .libpaddle import _array_to_share_memory_tensor
from .libpaddle import _cleanup_mmap_fds
from .libpaddle import _remove_tensor_list_mmap_fds
from .libpaddle import _set_max_memory_map_allocation_pool_size
from .libpaddle import _set_process_pids # noqa: F401
from .libpaddle import _erase_process_pids # noqa: F401
from .libpaddle import _set_process_signal_handler # noqa: F401
from .libpaddle import _throw_error_if_process_failed # noqa: F401
from .libpaddle import _convert_to_tensor_list # noqa: F401
from .libpaddle import _array_to_share_memory_tensor # noqa: F401
from .libpaddle import _cleanup_mmap_fds # noqa: F401
from .libpaddle import _remove_tensor_list_mmap_fds # noqa: F401
from .libpaddle import (
_set_max_memory_map_allocation_pool_size,
) # noqa: F401

# CINN
from .libpaddle import is_run_with_cinn
from .libpaddle import is_run_with_cinn # noqa: F401

except Exception as e:
if has_paddle_dy_lib:
4 changes: 1 addition & 3 deletions python/paddle/base/data_feeder.py
Original file line number Diff line number Diff line change
@@ -14,16 +14,14 @@

from . import core
import numpy as np
import os
import multiprocessing
import warnings
import struct

from .framework import (
Variable,
default_main_program,
in_dygraph_mode,
_current_expected_place,
_current_expected_place, # noqa: F401
)
from .framework import _cpu_num, _cuda_ids

3 changes: 0 additions & 3 deletions python/paddle/base/device_worker.py
Original file line number Diff line number Diff line change
@@ -623,7 +623,6 @@ def _gen_worker_desc(self, trainer_desc):
Args:
trainer_desc(TrainerDesc): a TrainerDesc object
"""
from google.protobuf import text_format
from . import core

trainer_desc.device_worker_name = "SectionWorker"
@@ -671,8 +670,6 @@ def _gen_worker_desc(self, trainer_desc):
Args:
trainer_desc(TrainerDesc): a TrainerDesc object
"""
from google.protobuf import text_format
from . import core

trainer_desc.device_worker_name = "HeterSectionWorker"
heter_pipeline_opt = self._program._heter_pipeline_opt
11 changes: 5 additions & 6 deletions python/paddle/base/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
@@ -13,18 +13,17 @@
# limitations under the License.

from .. import core
from ..framework import (
from ..framework import ( # noqa: F401
Variable,
convert_np_dtype_to_dtype_,
in_dygraph_mode,
)
from ..framework import _create_tensor as framework_create_tensor
from ..layers.layer_function_generator import OpProtoHolder
from . import no_grad
from .. import framework
from ..framework import _create_tensor as framework_create_tensor # noqa: F401
from ..layers.layer_function_generator import OpProtoHolder # noqa: F401
from . import no_grad # noqa: F401
from .. import framework # noqa: F401

import numpy as np
import warnings
from paddle import _C_ops, _legacy_C_ops

_supported_int_dtype_ = [
13 changes: 6 additions & 7 deletions python/paddle/base/dygraph/tensor_patch_methods.py
Original file line number Diff line number Diff line change
@@ -15,15 +15,14 @@
import inspect
import numpy as np
import warnings
import weakref
import sys

import paddle
from .. import framework
from ..framework import convert_np_dtype_to_dtype_
from .. import core
from .. import unique_name
from ..framework import (
from ..framework import ( # noqa: F401
Variable,
Parameter,
_getitem_static,
@@ -32,18 +31,18 @@
EagerParamBase,
in_dygraph_mode,
)
from .base import switch_to_static_graph
from .math_op_patch import monkey_patch_math_tensor
from .base import switch_to_static_graph # noqa: F401
from .math_op_patch import monkey_patch_math_tensor # noqa: F401
from paddle.base.data_feeder import (
convert_uint16_to_float,
_PADDLE_DTYPE_2_NUMPY_DTYPE,
)
import paddle.utils.deprecated as deprecated
import paddle.profiler as profiler
from paddle.profiler.utils import in_profiler_mode
from paddle import _C_ops, _legacy_C_ops
from paddle.device import get_all_custom_device_type
from paddle.base.framework import _global_flags
from paddle import _C_ops, _legacy_C_ops # noqa: F401
from paddle.device import get_all_custom_device_type # noqa: F401
from paddle.base.framework import _global_flags # noqa: F401

_grad_scalar = None

3 changes: 0 additions & 3 deletions python/paddle/base/dygraph/tracer.py
Original file line number Diff line number Diff line change
@@ -12,9 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from collections import defaultdict

import numpy as np

from paddle.base import core
from paddle.base import framework
3 changes: 1 addition & 2 deletions python/paddle/base/executor.py
Original file line number Diff line number Diff line change
@@ -14,7 +14,6 @@

import logging
import os
import multiprocessing
import sys
import warnings
import numpy as np
@@ -42,7 +41,7 @@
import copy
from . import framework
from .incubate.checkpoint import auto_checkpoint as acp
from .compiler import _prune_feed_ops
from .compiler import _prune_feed_ops # noqa: F401

from functools import lru_cache

6 changes: 2 additions & 4 deletions python/paddle/base/framework.py
Original file line number Diff line number Diff line change
@@ -14,9 +14,7 @@

import textwrap
import collections
from collections import defaultdict
from collections.abc import Iterable
import contextlib
from .wrapped_decorator import signature_safe_contextmanager, wrap_decorator
import os
import re
@@ -28,9 +26,9 @@
import subprocess
import multiprocessing
import sys
import logging
import logging # noqa: F401

from .proto import framework_pb2, data_feed_pb2
from .proto import framework_pb2

from . import core
from . import unique_name
5 changes: 1 addition & 4 deletions python/paddle/base/incubate/checkpoint/auto_checkpoint.py
Original file line number Diff line number Diff line change
@@ -14,13 +14,10 @@

import sys
import logging
import hashlib
import json
import os
import time
import collections
from threading import Thread, current_thread
from contextlib import contextmanager
from threading import current_thread

from paddle.base import unique_name, compiler
from .checkpoint_saver import SerializableBase, CheckpointSaver, PaddleModel
Loading