Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][py2][U004] unecessary explicit object inheritance in class definition #47642

Merged
merged 3 commits into from
Nov 8, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion paddle/infrt/tests/models/efficientnet-b4/net/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def efficientnet_params(model_name):
return params_dict[model_name]


class BlockDecoder(object):
class BlockDecoder:
"""Block Decoder for readability, straight from the official TensorFlow repository"""

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/yaml/generator/api_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
PREFIX_META_TENSOR_NAME = 'meta_'


class BaseAPI(object):
class BaseAPI:
def __init__(self, api_item_yaml):
self.api = self.get_api_name(api_item_yaml)

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/audio/functional/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from paddle import Tensor


class WindowFunctionRegister(object):
class WindowFunctionRegister:
def __init__(self):
self._functions_dict = dict()

Expand Down
6 changes: 3 additions & 3 deletions python/paddle/autograd/py_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
__all__ = []


class LegacyPyLayerContext(object):
class LegacyPyLayerContext:
"""
The object of this class is a context that is used in PyLayer to enhance the function.

Expand Down Expand Up @@ -131,7 +131,7 @@ def __new__(cls, name, temp_bases, attrs):
return type.__new__(impl, "impl", (), {})


class CPyLayer(object):
class CPyLayer:
@classmethod
@dygraph_only
def apply(cls, *args, **kwargs):
Expand Down Expand Up @@ -336,7 +336,7 @@ def backward(ctx, dy):
)


class EagerPyLayerContext(object):
class EagerPyLayerContext:
def save_for_backward(self, *tensors):
"""
Saves given tensors that backward need. Use ``saved_tensor`` in the `backward` to get the saved tensors.
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/dataset/imikolov.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
MD5 = '30177ea32e27c525793142b6bf2c8e2d'


class DataType(object):
class DataType:
NGRAM = 1
SEQ = 2

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/dataset/movielens.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
MD5 = 'c4d9eecfca2ab87c1945afe126590906'


class MovieInfo(object):
class MovieInfo:
"""
Movie id, title and categories information are stored in MovieInfo.
"""
Expand Down Expand Up @@ -69,7 +69,7 @@ def __repr__(self):
return self.__str__()


class UserInfo(object):
class UserInfo:
"""
User id, gender, age, and job information are stored in UserInfo.
"""
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/cluster_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def mesh(self):
return self._mesh


# class Cluster(object):
# class Cluster:
# """
# The cluster represents the hardware resource.
# """
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from ..utils.log_utils import get_logger


class Converter(object):
class Converter:
"""
Converter is a class object for auto parallel to convert tensors from
one parallel strategy to another one. Tensors will merge and slice value
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/distributed/auto_parallel/cost_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class CostNodeType(Enum):
NOP = 5


class Cost(object):
class Cost:
def __init__(self):
self.runtime = None
self.static_mem = None
Expand All @@ -49,7 +49,7 @@ class CostModelMode(Enum):
MIXED = 3


class CostNode(object):
class CostNode:
def __init__(self, node, node_type, id=None):
self.id = id
self.node = node
Expand Down Expand Up @@ -172,7 +172,7 @@ def init_comp_cost(self, cost_data):
self.cost = 0.0


class PipeEvent(object):
class PipeEvent:
def __init__(self, stage_id, event_name, duration, start_time=-1):
self.stage_id = stage_id
self.name = event_name
Expand All @@ -181,7 +181,7 @@ def __init__(self, stage_id, event_name, duration, start_time=-1):
self.e_time = -1


class CostModel(object):
class CostModel:
def __init__(
self,
mode=CostModelMode.BENCHMARKING,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/dist_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -1146,7 +1146,7 @@ def prepare_context(self, src_op):
return kinputs, koutputs


class BlockState(object):
class BlockState:
def __init__(self):
self.nblock = 0
self.forward_indices = []
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def clear(self):
self.states = defaultdict(bool)


class ProgramHelper(object):
class ProgramHelper:
"""
A Helper class for Engine to provides different Program IR according specified 'mode'.
"""
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def __call__(self, *args, **kwargs):
_g_collections = {}


class CollectionNames(object):
class CollectionNames:
FETCHES = "fetches"
LOGGING = "logging"

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
]


class Partitioner(object):
class Partitioner:
"""
warning:: Partitioner is experimental and subject to change.

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/process_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def reset_current_process_mesh():
_g_current_process_mesh = _g_previous_process_mesh


class ProcessMesh(object):
class ProcessMesh:
"""
The `Processmesh` object describes the topology of the used processes.

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from . import constants


class BaseConfig(object):
class BaseConfig:
def __init__(self, category, config_dict=None):
self._category = category
self._config_dict = None
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/tuner/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def _get_pass_config(strategy, pass_name):
return config


class TuningConfig(object):
class TuningConfig:
"""
A uniform config wrap:
distributed strategy: the user defined configuration for optimization pass
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/distributed/auto_parallel/tuner/recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import numpy as np


class MetricRecord(object):
class MetricRecord:
"""
One record for a single metric at a given execution step.
"""
Expand Down Expand Up @@ -62,7 +62,7 @@ def __repr__(self):
return "MetricRecord(value={}, step={})".format(self.value, self.step)


class MetricRecords(object):
class MetricRecords:
"""
Records of a single metric across different executions.
"""
Expand Down Expand Up @@ -143,7 +143,7 @@ def from_state(cls, state):
return records


class MetricsRecorder(object):
class MetricsRecorder:
"""
Record the values for all metrics.
"""
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/tuner/storable.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import json


class Storable(object):
class Storable:
def get_state(self):
raise NotImplementedError

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from .tunable_variable import FloatRange


class TunableSpace(object):
class TunableSpace:
"""
A TunableSpace is constructed by the tunable variables.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import numpy as np


class TunableVariable(object):
class TunableVariable:
"""
Tunablevariable base class.
"""
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -1702,7 +1702,7 @@ def irecv(tensor, src=None, group=None):
raise RuntimeError("Only support eager dygraph mode.")


class P2POp(object):
class P2POp:
"""
A class that makes point-to-point operations for "batch_isend_irecv".

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/elastic.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import os


class Command(object):
class Command:
def __init__(self, server, name):
import etcd3

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/entry_attr.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
__all__ = []


class EntryAttr(object):
class EntryAttr:
"""
Entry Config for paddle.static.nn.sparse_embedding with Parameter Server.

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/fleet/base/distributed_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def check_configs_key(msg, config, field_name):
assert key in key_list, "key:{} not in {}".format(key, field_name)


class DistributedJobInfo(object):
class DistributedJobInfo:
"""
DistributedJobInfo will serialize all distributed training information
Just for inner use: 1) debug 2) replicate experiments
Expand Down Expand Up @@ -106,7 +106,7 @@ def _set_distributed_strategy(self, dist_strategy):
ReduceStrategyFleet = int


class DistributedStrategy(object):
class DistributedStrategy:
__lock_attr = False

def __init__(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
meta_optimizer_names.remove("HeterParallelOptimizer")


class MetaOptimizerFactory(object):
class MetaOptimizerFactory:
def __init__(self):
pass

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/fleet/base/role_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class Role:
COORDINATOR = 5


class Gloo(object):
class Gloo:
"""
Gloo is a universal class for barrier and collective communication
"""
Expand Down Expand Up @@ -383,7 +383,7 @@ def all_gather(self, input, comm_world="worker"):
return output


class RoleMakerBase(object):
class RoleMakerBase:
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/base/runtime_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
__all__ = []


class RuntimeFactory(object):
class RuntimeFactory:
def __init__(self):
pass

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/base/strategy_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def maximum_path_len_algo(optimizer_list):
return candidate


class StrategyCompilerBase(object):
class StrategyCompilerBase:
def __init__(self):
pass

Expand Down
8 changes: 4 additions & 4 deletions python/paddle/distributed/fleet/base/topology.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
_HYBRID_PARALLEL_GROUP = None


class ParallelMode(object):
class ParallelMode:
"""
There are all the parallel modes currently supported:
- DATA_PARALLEL: Distribute input data to different devices.
Expand All @@ -47,7 +47,7 @@ class ParallelMode(object):
SHARDING_PARALLEL = 3


class CommunicateTopology(object):
class CommunicateTopology:
def __init__(
self,
hybrid_group_names=["data", "pipe", "sharding", "model"],
Expand Down Expand Up @@ -133,7 +133,7 @@ def get_rank_from_stage(self, global_rank, **kwargs):
return self.get_rank(**tf)


class HybridCommunicateGroup(object):
class HybridCommunicateGroup:
def __init__(self, topology):
self.nranks = paddle.distributed.get_world_size()
self.global_rank = paddle.distributed.get_rank()
Expand Down Expand Up @@ -410,7 +410,7 @@ def get_rank_from_stage(self, stage_id, **kwargs):
)


class _CommunicateGroup(object):
class _CommunicateGroup:
"""tmp for static"""

def __init__(self):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/fleet/base/util_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
__all__ = []


class UtilFactory(object):
class UtilFactory:
def _create_util(self, context=None):
util = UtilBase()
if context is not None and "valid_strategy" in context:
Expand All @@ -41,7 +41,7 @@ def _create_util(self, context=None):
return util


class UtilBase(object):
class UtilBase:
def __init__(self):
self.role_maker = None
self.dist_strategy = None
Expand Down
Loading