diff --git a/paddle/infrt/tests/models/efficientnet-b4/net/utils.py b/paddle/infrt/tests/models/efficientnet-b4/net/utils.py index 9a060306995bba614548d29d18e72eca6d42b834..0617870bf019097ca6e0acc68eb95e08c67e2da3 100644 --- a/paddle/infrt/tests/models/efficientnet-b4/net/utils.py +++ b/paddle/infrt/tests/models/efficientnet-b4/net/utils.py @@ -239,7 +239,7 @@ def efficientnet_params(model_name): return params_dict[model_name] -class BlockDecoder(object): +class BlockDecoder: """Block Decoder for readability, straight from the official TensorFlow repository""" @staticmethod diff --git a/paddle/phi/api/yaml/generator/api_base.py b/paddle/phi/api/yaml/generator/api_base.py index 53b950b63f05231b0e8d7067b79407aea6158ca1..696ad8736b90e4224c0bb770f3b7702da90d432f 100644 --- a/paddle/phi/api/yaml/generator/api_base.py +++ b/paddle/phi/api/yaml/generator/api_base.py @@ -19,7 +19,7 @@ PREFIX_TENSOR_NAME = 'input_' PREFIX_META_TENSOR_NAME = 'meta_' -class BaseAPI(object): +class BaseAPI: def __init__(self, api_item_yaml): self.api = self.get_api_name(api_item_yaml) diff --git a/python/paddle/audio/functional/window.py b/python/paddle/audio/functional/window.py index 472c56b87acf95db0dc0c04f487af90caa5a3bb4..4836afbb61d03f81422b8102efbd63a00b7b794e 100644 --- a/python/paddle/audio/functional/window.py +++ b/python/paddle/audio/functional/window.py @@ -19,7 +19,7 @@ import paddle from paddle import Tensor -class WindowFunctionRegister(object): +class WindowFunctionRegister: def __init__(self): self._functions_dict = dict() diff --git a/python/paddle/autograd/py_layer.py b/python/paddle/autograd/py_layer.py index 2efb1ece83e4366a9b65663f51e1ba73bfbe64dc..252cfd5d91decc73ee6c9b40f4fa542d70e47e67 100644 --- a/python/paddle/autograd/py_layer.py +++ b/python/paddle/autograd/py_layer.py @@ -21,7 +21,7 @@ from paddle.fluid import core __all__ = [] -class LegacyPyLayerContext(object): +class LegacyPyLayerContext: """ The object of this class is a context that is used in PyLayer to enhance the function. @@ -131,7 +131,7 @@ def with_mateclass(meta, *bases): return type.__new__(impl, "impl", (), {}) -class CPyLayer(object): +class CPyLayer: @classmethod @dygraph_only def apply(cls, *args, **kwargs): @@ -336,7 +336,7 @@ class LegacyPyLayer(with_mateclass(LayerMeta, CPyLayer)): ) -class EagerPyLayerContext(object): +class EagerPyLayerContext: def save_for_backward(self, *tensors): """ Saves given tensors that backward need. Use ``saved_tensor`` in the `backward` to get the saved tensors. diff --git a/python/paddle/dataset/imikolov.py b/python/paddle/dataset/imikolov.py index 4630d88e21a7cb8873df699c982d36e24c3a72bf..f618bbc2c4867c2e99b3813df987d4a96739abce 100644 --- a/python/paddle/dataset/imikolov.py +++ b/python/paddle/dataset/imikolov.py @@ -31,7 +31,7 @@ URL = 'https://dataset.bj.bcebos.com/imikolov%2Fsimple-examples.tgz' MD5 = '30177ea32e27c525793142b6bf2c8e2d' -class DataType(object): +class DataType: NGRAM = 1 SEQ = 2 diff --git a/python/paddle/dataset/movielens.py b/python/paddle/dataset/movielens.py index fd57ad8edf758ffab672296507177d337815fc75..7a47293c3c7e0421e03e66c387d2c85c5a4da7c3 100644 --- a/python/paddle/dataset/movielens.py +++ b/python/paddle/dataset/movielens.py @@ -38,7 +38,7 @@ URL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip' MD5 = 'c4d9eecfca2ab87c1945afe126590906' -class MovieInfo(object): +class MovieInfo: """ Movie id, title and categories information are stored in MovieInfo. """ @@ -69,7 +69,7 @@ class MovieInfo(object): return self.__str__() -class UserInfo(object): +class UserInfo: """ User id, gender, age, and job information are stored in UserInfo. """ diff --git a/python/paddle/distributed/auto_parallel/cluster_v2.py b/python/paddle/distributed/auto_parallel/cluster_v2.py index debcb078f6eb444809c4c621ca8dd413902ab216..1ec2332ad4003bd2d1dad8c19203f8fbac80741f 100644 --- a/python/paddle/distributed/auto_parallel/cluster_v2.py +++ b/python/paddle/distributed/auto_parallel/cluster_v2.py @@ -116,7 +116,7 @@ class DeviceMesh(core.DeviceMesh): return self._mesh -# class Cluster(object): +# class Cluster: # """ # The cluster represents the hardware resource. # """ diff --git a/python/paddle/distributed/auto_parallel/converter.py b/python/paddle/distributed/auto_parallel/converter.py index d0fae414b1981d291c8b31f99dfb232e289b31e0..cc0966be4aba4c06b0288b409ad35c649e0bad6e 100644 --- a/python/paddle/distributed/auto_parallel/converter.py +++ b/python/paddle/distributed/auto_parallel/converter.py @@ -19,7 +19,7 @@ import numpy as np from ..utils.log_utils import get_logger -class Converter(object): +class Converter: """ Converter is a class object for auto parallel to convert tensors from one parallel strategy to another one. Tensors will merge and slice value diff --git a/python/paddle/distributed/auto_parallel/cost_model.py b/python/paddle/distributed/auto_parallel/cost_model.py index f335244e36073ecbd945a104b28fa2ce1ca85d3e..73e899614d433201be7b1d741658fdd0a2b5f8f2 100644 --- a/python/paddle/distributed/auto_parallel/cost_model.py +++ b/python/paddle/distributed/auto_parallel/cost_model.py @@ -35,7 +35,7 @@ class CostNodeType(Enum): NOP = 5 -class Cost(object): +class Cost: def __init__(self): self.runtime = None self.static_mem = None @@ -49,7 +49,7 @@ class CostModelMode(Enum): MIXED = 3 -class CostNode(object): +class CostNode: def __init__(self, node, node_type, id=None): self.id = id self.node = node @@ -172,7 +172,7 @@ class CompOpCostNode(CostNode): self.cost = 0.0 -class PipeEvent(object): +class PipeEvent: def __init__(self, stage_id, event_name, duration, start_time=-1): self.stage_id = stage_id self.name = event_name @@ -181,7 +181,7 @@ class PipeEvent(object): self.e_time = -1 -class CostModel(object): +class CostModel: def __init__( self, mode=CostModelMode.BENCHMARKING, diff --git a/python/paddle/distributed/auto_parallel/dist_context.py b/python/paddle/distributed/auto_parallel/dist_context.py index 4b4fca8730cd9a13b0f4ba2b6f90e8b287e0fcb9..f410468f45b8584c8152c317cdaf4977a93df939 100644 --- a/python/paddle/distributed/auto_parallel/dist_context.py +++ b/python/paddle/distributed/auto_parallel/dist_context.py @@ -1146,7 +1146,7 @@ class DistributedOperatorContext: return kinputs, koutputs -class BlockState(object): +class BlockState: def __init__(self): self.nblock = 0 self.forward_indices = [] diff --git a/python/paddle/distributed/auto_parallel/helper.py b/python/paddle/distributed/auto_parallel/helper.py index 3c13f3d9abc69ee7959a143109de94eb0577c34b..31deaea4275eb969a5e9bed344663cd78b054236 100644 --- a/python/paddle/distributed/auto_parallel/helper.py +++ b/python/paddle/distributed/auto_parallel/helper.py @@ -192,7 +192,7 @@ class BuildInfo: self.states = defaultdict(bool) -class ProgramHelper(object): +class ProgramHelper: """ A Helper class for Engine to provides different Program IR according specified 'mode'. """ diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index 124f622d40f049f224d3405379b76d625e664f33..d2f7e894149c79ba2a9061e021be3bacf6491dd7 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -220,7 +220,7 @@ def recompute(op): _g_collections = {} -class CollectionNames(object): +class CollectionNames: FETCHES = "fetches" LOGGING = "logging" diff --git a/python/paddle/distributed/auto_parallel/partitioner.py b/python/paddle/distributed/auto_parallel/partitioner.py index 2a7b7f3e67daabfd017ca7563294cccc23c0598a..6ec52ff69796fe183eccfb12e7b9040df194f16e 100644 --- a/python/paddle/distributed/auto_parallel/partitioner.py +++ b/python/paddle/distributed/auto_parallel/partitioner.py @@ -32,7 +32,7 @@ __not_shape_var_type__ = [ ] -class Partitioner(object): +class Partitioner: """ warning:: Partitioner is experimental and subject to change. diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index 9d8a44b92d2e2b964c0c1f76faff865b211c0785..1630289dde8aabaafa05125c2df0d39544a023fd 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -39,7 +39,7 @@ def reset_current_process_mesh(): _g_current_process_mesh = _g_previous_process_mesh -class ProcessMesh(object): +class ProcessMesh: """ The `Processmesh` object describes the topology of the used processes. diff --git a/python/paddle/distributed/auto_parallel/strategy.py b/python/paddle/distributed/auto_parallel/strategy.py index 12b6a9b78a3cdbba13647ccc7aa1b7e826e0df90..dcfd453f63a33a42a090e596f9283e6bfbf0538b 100644 --- a/python/paddle/distributed/auto_parallel/strategy.py +++ b/python/paddle/distributed/auto_parallel/strategy.py @@ -16,7 +16,7 @@ import copy from . import constants -class BaseConfig(object): +class BaseConfig: def __init__(self, category, config_dict=None): self._category = category self._config_dict = None diff --git a/python/paddle/distributed/auto_parallel/tuner/config.py b/python/paddle/distributed/auto_parallel/tuner/config.py index 4ed6340eccb6f25e6c5e6c0921e517c7adc4d336..7bb9d4f18bcef00748f591513e00415f88172cef 100644 --- a/python/paddle/distributed/auto_parallel/tuner/config.py +++ b/python/paddle/distributed/auto_parallel/tuner/config.py @@ -25,7 +25,7 @@ def _get_pass_config(strategy, pass_name): return config -class TuningConfig(object): +class TuningConfig: """ A uniform config wrap: distributed strategy: the user defined configuration for optimization pass diff --git a/python/paddle/distributed/auto_parallel/tuner/recorder.py b/python/paddle/distributed/auto_parallel/tuner/recorder.py index 2c838cfb1492164fbb0e27f6085238f8271dfdcb..8174ba22cf78bfc24ab8fcc9987f30f041477d2e 100644 --- a/python/paddle/distributed/auto_parallel/tuner/recorder.py +++ b/python/paddle/distributed/auto_parallel/tuner/recorder.py @@ -18,7 +18,7 @@ import numpy as np -class MetricRecord(object): +class MetricRecord: """ One record for a single metric at a given execution step. """ @@ -62,7 +62,7 @@ class MetricRecord(object): return "MetricRecord(value={}, step={})".format(self.value, self.step) -class MetricRecords(object): +class MetricRecords: """ Records of a single metric across different executions. """ @@ -143,7 +143,7 @@ class MetricRecords(object): return records -class MetricsRecorder(object): +class MetricsRecorder: """ Record the values for all metrics. """ diff --git a/python/paddle/distributed/auto_parallel/tuner/storable.py b/python/paddle/distributed/auto_parallel/tuner/storable.py index fb03070ad099a7b122f514c3c56386a35f3cbe92..01e10b4a3b4965261ace9918c350faa96525758a 100644 --- a/python/paddle/distributed/auto_parallel/tuner/storable.py +++ b/python/paddle/distributed/auto_parallel/tuner/storable.py @@ -18,7 +18,7 @@ import json -class Storable(object): +class Storable: def get_state(self): raise NotImplementedError diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py b/python/paddle/distributed/auto_parallel/tuner/tunable_space.py index e3e503401b45371d9372aec6b6ff4ee9d42940df..a4383f5385dee34918684dcfc17b5a2dc91ef90d 100644 --- a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py +++ b/python/paddle/distributed/auto_parallel/tuner/tunable_space.py @@ -22,7 +22,7 @@ from .tunable_variable import IntRange from .tunable_variable import FloatRange -class TunableSpace(object): +class TunableSpace: """ A TunableSpace is constructed by the tunable variables. """ diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py b/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py index 74594a8b4ad6dcbeafc0905b202b64dd2858fa83..3f45c68c1de6db8cc4d9bc7f9f7a79ce1f6e9037 100644 --- a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py +++ b/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py @@ -18,7 +18,7 @@ import numpy as np -class TunableVariable(object): +class TunableVariable: """ Tunablevariable base class. """ diff --git a/python/paddle/distributed/communication/batch_isend_irecv.py b/python/paddle/distributed/communication/batch_isend_irecv.py index 073ccb0b41e131f6ac74176c38eae4d9fa56614f..d3f0372b6853327d9573584b6c0047eb203db55d 100644 --- a/python/paddle/distributed/communication/batch_isend_irecv.py +++ b/python/paddle/distributed/communication/batch_isend_irecv.py @@ -22,7 +22,7 @@ from paddle.distributed.communication.group import ( ) -class P2POp(object): +class P2POp: """ A class that makes point-to-point operations for "batch_isend_irecv". diff --git a/python/paddle/distributed/elastic.py b/python/paddle/distributed/elastic.py index 55b73ab315bb4f3e96045e6de6e310c3960bf931..082fdd3c07bea54ef9e9f7831856e7cf11d14d76 100644 --- a/python/paddle/distributed/elastic.py +++ b/python/paddle/distributed/elastic.py @@ -16,7 +16,7 @@ import argparse import os -class Command(object): +class Command: def __init__(self, server, name): import etcd3 diff --git a/python/paddle/distributed/entry_attr.py b/python/paddle/distributed/entry_attr.py index be54d4ab7b17ab3bb2bf442cf9c5782a3bf8c8eb..dcd5153bb5f1ea85533595838e5ed8912f53aeaf 100644 --- a/python/paddle/distributed/entry_attr.py +++ b/python/paddle/distributed/entry_attr.py @@ -15,7 +15,7 @@ __all__ = [] -class EntryAttr(object): +class EntryAttr: """ Entry Config for paddle.static.nn.sparse_embedding with Parameter Server. diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index c32b1f2d68c59383db82bd908ca9ef507b0664fc..5b3b599513d22d191c75d76cf5f52044b5228060 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -65,7 +65,7 @@ def check_configs_key(msg, config, field_name): assert key in key_list, "key:{} not in {}".format(key, field_name) -class DistributedJobInfo(object): +class DistributedJobInfo: """ DistributedJobInfo will serialize all distributed training information Just for inner use: 1) debug 2) replicate experiments @@ -106,7 +106,7 @@ ReduceStrategyFluid = paddle.fluid.BuildStrategy.ReduceStrategy ReduceStrategyFleet = int -class DistributedStrategy(object): +class DistributedStrategy: __lock_attr = False def __init__(self): diff --git a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py index 380a9b8c177af3115b61dda74efc1089611af975..dd4611fc0a8c1daf052ed55b8361a3eadc44d559 100755 --- a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py +++ b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py @@ -26,7 +26,7 @@ meta_optimizer_names.remove("HybridParallelOptimizer") meta_optimizer_names.remove("HeterParallelOptimizer") -class MetaOptimizerFactory(object): +class MetaOptimizerFactory: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 1fcd18789c0f7d38d9ccc396bf67ecbe4ad2e663..b001c5482fdfc7af804f89728f9e644db05bd233 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -35,7 +35,7 @@ class Role: COORDINATOR = 5 -class Gloo(object): +class Gloo: """ Gloo is a universal class for barrier and collective communication """ @@ -383,7 +383,7 @@ class Gloo(object): return output -class RoleMakerBase(object): +class RoleMakerBase: """ RoleMakerBase is a base class for assigning a role to current process in distributed training. diff --git a/python/paddle/distributed/fleet/base/runtime_factory.py b/python/paddle/distributed/fleet/base/runtime_factory.py index e9006409674a724727098c5ef04af016e741d376..51758859035d49e1e5077f38d6987733c1cba2d9 100644 --- a/python/paddle/distributed/fleet/base/runtime_factory.py +++ b/python/paddle/distributed/fleet/base/runtime_factory.py @@ -17,7 +17,7 @@ from ...ps.the_one_ps import TheOnePSRuntime __all__ = [] -class RuntimeFactory(object): +class RuntimeFactory: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/strategy_compiler.py b/python/paddle/distributed/fleet/base/strategy_compiler.py index 348a79b9d442aa17604ef87883f8380e8b54fdd8..f1844a112398387dff06dcc340e8524584a8c913 100644 --- a/python/paddle/distributed/fleet/base/strategy_compiler.py +++ b/python/paddle/distributed/fleet/base/strategy_compiler.py @@ -106,7 +106,7 @@ def maximum_path_len_algo(optimizer_list): return candidate -class StrategyCompilerBase(object): +class StrategyCompilerBase: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/topology.py b/python/paddle/distributed/fleet/base/topology.py index 6fa1521d58c46427d7ae8691656b5f935a4d49f5..335125123c4d6689619579a2639336cb0504a8c1 100644 --- a/python/paddle/distributed/fleet/base/topology.py +++ b/python/paddle/distributed/fleet/base/topology.py @@ -23,7 +23,7 @@ __all__ = ['CommunicateTopology', 'HybridCommunicateGroup'] _HYBRID_PARALLEL_GROUP = None -class ParallelMode(object): +class ParallelMode: """ There are all the parallel modes currently supported: - DATA_PARALLEL: Distribute input data to different devices. @@ -47,7 +47,7 @@ class ParallelMode(object): SHARDING_PARALLEL = 3 -class CommunicateTopology(object): +class CommunicateTopology: def __init__( self, hybrid_group_names=["data", "pipe", "sharding", "model"], @@ -133,7 +133,7 @@ class CommunicateTopology(object): return self.get_rank(**tf) -class HybridCommunicateGroup(object): +class HybridCommunicateGroup: def __init__(self, topology): self.nranks = paddle.distributed.get_world_size() self.global_rank = paddle.distributed.get_rank() @@ -410,7 +410,7 @@ class HybridCommunicateGroup(object): ) -class _CommunicateGroup(object): +class _CommunicateGroup: """tmp for static""" def __init__(self): diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index dcaa256a26d11b0720f3a99a571a0bff91656c6d..8717619eafe357252fb737a3b693ce32a62ab1a7 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -31,7 +31,7 @@ import numpy as np __all__ = [] -class UtilFactory(object): +class UtilFactory: def _create_util(self, context=None): util = UtilBase() if context is not None and "valid_strategy" in context: @@ -41,7 +41,7 @@ class UtilFactory(object): return util -class UtilBase(object): +class UtilBase: def __init__(self): self.role_maker = None self.dist_strategy = None diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index ec6114dd21f3a5c6cab612b030866e6960616770..abf8f5f49fa1f7b911a7d377627dda0b06b2ceeb 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -17,7 +17,7 @@ import sys __all__ = [] -class DataGenerator(object): +class DataGenerator: """ DataGenerator is a general Base class for user to inherit A user who wants to define his/her own python processing logic diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index f5b3140064593adfcd50e4f38209f324eb1e1753..870c936e543a67aadcf20a4df596daf9368d2413 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -20,7 +20,7 @@ import paddle.fluid.core as core __all__ = [] -class DatasetBase(object): +class DatasetBase: """Base dataset class.""" def __init__(self): diff --git a/python/paddle/distributed/fleet/dataset/index_dataset.py b/python/paddle/distributed/fleet/dataset/index_dataset.py index 87bf2bc738dedbc5b90ed933b50b24d0b1636ac8..7df2931b5d87db69433c520a74a29af4f8b3c251 100644 --- a/python/paddle/distributed/fleet/dataset/index_dataset.py +++ b/python/paddle/distributed/fleet/dataset/index_dataset.py @@ -16,7 +16,7 @@ from paddle.fluid import core __all__ = [] -class Index(object): +class Index: def __init__(self, name): self._name = name diff --git a/python/paddle/distributed/fleet/elastic/manager.py b/python/paddle/distributed/fleet/elastic/manager.py index b23a11642259073245148a198fe99ce63484ef2d..0344c1d43741b830300c8f889a4d8da0a7f238f0 100644 --- a/python/paddle/distributed/fleet/elastic/manager.py +++ b/python/paddle/distributed/fleet/elastic/manager.py @@ -52,7 +52,7 @@ class ElasticStatus: EXIT = "exit" -class LauncherInterface(object): +class LauncherInterface: def __init__(self, args): self.args = args self.procs = [] @@ -124,7 +124,7 @@ class LauncherInterface(object): raise NotImplementedError -class ElasticManager(object): +class ElasticManager: def __init__(self, args, etcd_client): self.args = args diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 695f03fe1f2a06fb955f3add9cb1bfaaaf319275..617eb5729aef6451b8adb17396b3817b899f5cb6 100644 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -95,7 +95,7 @@ inited_runtime_handler = wrap_decorator(_inited_runtime_handler_) is_non_distributed_check = wrap_decorator(_is_non_distributed_check_) -class Fleet(object): +class Fleet: """ Unified API for distributed training of PaddlePaddle Please reference the https://github.com/PaddlePaddle/PaddleFleetX for details diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 64795140cd7c6c52a2d1b0a85972b6b30adc6d5f..b676eee5bfb1892d66183b62f48b55d89b52c6b7 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -60,7 +60,7 @@ class DeviceMode: MLU = 4 -class Cluster(object): +class Cluster: def __init__(self, hdfs): self.job_server = None self.pods = [] @@ -133,7 +133,7 @@ class Cluster(object): return None -class JobServer(object): +class JobServer: def __init__(self): self.endpoint = None @@ -147,7 +147,7 @@ class JobServer(object): return not self == j -class Trainer(object): +class Trainer: def __init__(self): self.accelerators = [] self.endpoint = None @@ -179,7 +179,7 @@ class Trainer(object): return self.rank -class Pod(object): +class Pod: def __init__(self): self.rank = None self.id = None @@ -483,7 +483,7 @@ def pretty_print_envs(envs, header=None): return _str -class TrainerProc(object): +class TrainerProc: def __init__(self): self.proc = None self.log_fn = None @@ -1278,7 +1278,7 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode): ) -class ParameterServerLauncher(object): +class ParameterServerLauncher: def __init__(self, args, distribute_mode): self.args = args self.distribute_mode = distribute_mode diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py index ee3f96e243fe2a1ccd88879ff25aa6017ca23a34..b0495e13b21c83a205c4e79012a9d70f245f0332 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py @@ -24,7 +24,7 @@ HcomGroupConfig = namedtuple('HcomGroupConfig', ['name', 'nranks', 'rank_ids']) __all__ = [] -class AscendIRParser(object): +class AscendIRParser: def __init__(self, auto_dp=False, world_rank_size=1): self.graph_idx = 0 self.hcom_endpoints = {} diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py index b24e51896f622f5355b2de0e54ed6f47f26d6117..6158b4a7d4108f8528f63524a84bc84362df78f2 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py @@ -101,7 +101,7 @@ global_cnt = -1 global_input_cnt = -1 -class AscendHelper(object): +class AscendHelper: def __init__(self): self.dtype2ge_map = { 0: core.GEDataType.DT_BOOL, @@ -136,7 +136,7 @@ class AscendHelper(object): return self.dtype2np_map[index] -class AscendParserFactory(object): +class AscendParserFactory: def __init__(self, graph, var2geop): self.graph = graph self.var2geop = var2geop @@ -149,7 +149,7 @@ class AscendParserFactory(object): raise ValueError("parser class %s does not exist" % parser_class) -class AscendParserBase(object): +class AscendParserBase: def __init__(self, graph, var2geop): self.graph = graph self.var2geop = var2geop diff --git a/python/paddle/distributed/fleet/meta_optimizers/common.py b/python/paddle/distributed/fleet/meta_optimizers/common.py index 7a3c89f1e9dea2b584f56f8461b2f8624d47d0cf..03ed84563b6283256ea0ac25e3f3cc861eac809b 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/common.py +++ b/python/paddle/distributed/fleet/meta_optimizers/common.py @@ -53,7 +53,7 @@ def is_optimizer_op(op): ) & int(OpRole.Optimize) -class CollectiveHelper(object): +class CollectiveHelper: def __init__(self, role_maker, nrings=1, wait_port=True): self.nrings = nrings self.wait_port = wait_port diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py index 7e5f6983867e19a9a4cd0ff3f9313f135fe93423..63037dc6f616825cded87026bf446a419ee014e3 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py @@ -25,7 +25,7 @@ def _is_trainable(param): return not param.stop_gradient -class DygraphShardingOptimizer(object): +class DygraphShardingOptimizer: """ A wrapper for Sharding Optimizer in Dygraph. diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py index e4db252cf7c95132660555ea93c389fecb2e67b7..1c500ea56b5cb6242f9251960ff48f7a9d2a13df 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py @@ -23,7 +23,7 @@ from paddle.fluid import core __all__ = [] -class FP16Utils(object): +class FP16Utils: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py index 563757d35f4057a5b2f1b444fe09677388ebda56..288e9d7d8a4d96065a487f06e88d5f45ed908e60 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py @@ -17,7 +17,7 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole __all__ = [] -class GradientClipHelper(object): +class GradientClipHelper: def __init__(self, mp_ring_id): self.mp_ring_id = mp_ring_id diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py index ac10bb42383d1598c51485972bc1e723f907ed48..c1951299c2cdc4311f676aba3912cb673cb74946 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py @@ -44,7 +44,7 @@ class PlaceType: return PlaceType.CPU -class OffloadHelper(object): +class OffloadHelper: cpu_place_type = 0 cuda_place_type = PlaceType.default_device() cuda_pinned_place_type = PlaceType.default_pinned() diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py index 895fd2f7acb2712afef26b12cb07661a18869a3d..9a264a7dd1ba0c419218513c80110380853bcb7d 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py @@ -15,7 +15,7 @@ __all__ = [] -class ProgramDeps(object): +class ProgramDeps: def __init__(self, block, start_vars, end_vars): self._block = block # vars where to start to build the deps diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py index d33d04098d051a73aff63bcd268d14382d761bc7..82a7a7494d5e66476726bfdbcfdade2f73c2c3e7 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py @@ -22,7 +22,7 @@ from paddle.distributed.fleet.meta_optimizers.sharding.fp16_helper import ( __all__ = [] -class Shard(object): +class Shard: def __init__( self, ): @@ -155,7 +155,7 @@ class Shard(object): return grads_in_shard -class ProgramSegment(object): +class ProgramSegment: def __init__(self, block): self._block = block self._allreduce_vars = [] diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py index ea42130300f11208f0f75917e3615b9d9fb86dd7..9feed7b1e5aad83f6ae67319347d791b7422bd3e 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py @@ -408,7 +408,7 @@ def insert_allreduce_ops( return -class FuseHelper(object): +class FuseHelper: @staticmethod def sort_vars_by_dtype(block, vars_name): fp32_vars = [] diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py index 3d5d8aa2a3851bc1ece59ae8443a5f260d91e3b7..0a841cf243d14bf7a1cdb7135938699e14074c05 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py @@ -17,7 +17,7 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_VAR_KEY __all__ = [] -class WeightDecayHelper(object): +class WeightDecayHelper: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index 663cd7d28140abe77b0a9c61ee515ecc58131948..7ddbb64883d9144f9fdf6a8658220c54ccd42ff3 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -53,7 +53,7 @@ from paddle.incubate.distributed.fleet import recompute_hybrid __all__ = [] -class LayerDesc(object): +class LayerDesc: def __init__(self, layer_func, *inputs, **kwargs): self.layer_func = layer_func self.inputs = inputs @@ -89,7 +89,7 @@ class SharedLayerDesc(LayerDesc): self.shared_weight_attr = shared_weight_attr -class SegmentLayers(object): +class SegmentLayers: def __init__( self, layers_desc, diff --git a/python/paddle/distributed/fleet/runtime/runtime_base.py b/python/paddle/distributed/fleet/runtime/runtime_base.py index 2e8bacfbc3b1ded58e63e8d9e93764a0c0090b91..192754d6638b9d09a1882c3650eab22d7ca2a9ef 100644 --- a/python/paddle/distributed/fleet/runtime/runtime_base.py +++ b/python/paddle/distributed/fleet/runtime/runtime_base.py @@ -15,7 +15,7 @@ __all__ = [] -class RuntimeBase(object): +class RuntimeBase: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index 8a67301e174618832215e564d553a771b50d3a2e..667752e668a96f753a81013128344b77e101cbb2 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -46,7 +46,7 @@ class FSShellCmdAborted(ExecuteError): pass -class FS(object): +class FS: @abc.abstractmethod def ls_dir(self, fs_path): raise NotImplementedError diff --git a/python/paddle/distributed/fleet/utils/http_server.py b/python/paddle/distributed/fleet/utils/http_server.py index 5602c5f01afc86137c337ba9b7c4cc895191dde5..2828b9e5ddfecf9b92540f14281431b3ef258e12 100644 --- a/python/paddle/distributed/fleet/utils/http_server.py +++ b/python/paddle/distributed/fleet/utils/http_server.py @@ -128,7 +128,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): self.end_headers() -class KVHTTPServer(HTTPServer, object): +class KVHTTPServer(HTTPServer): """ it is a http server storing kv pairs. """ diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index a56c71fc40b5d0ec6855863061ef7c56c82adee4..5ba7c9c29762adfb5a83be356ff9817e77852011 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -20,7 +20,7 @@ import paddle.distributed.fleet as fleet import numpy as np -class HybridParallelInferenceHelper(object): +class HybridParallelInferenceHelper: """ A helper class to split program for inference with hybrid parallelism. diff --git a/python/paddle/distributed/launch/context/__init__.py b/python/paddle/distributed/launch/context/__init__.py index 037fc0efbc51be8c732b757d9570b869f0c11125..0d3410e368e7093cc57714be69483a111f3cb6d5 100644 --- a/python/paddle/distributed/launch/context/__init__.py +++ b/python/paddle/distributed/launch/context/__init__.py @@ -21,7 +21,7 @@ from .args_envs import parse_args, fetch_envs, env_args_mapping import logging -class Context(object): +class Context: def __init__(self, enable_plugin=True): self.args, self.unknown_args = parse_args() self.envs = fetch_envs() diff --git a/python/paddle/distributed/launch/context/device.py b/python/paddle/distributed/launch/context/device.py index f05bc1b776869675915464da95ad483798c89543..2708755c2eb7a29aa26c0ab0d7d7e0d799e41546 100644 --- a/python/paddle/distributed/launch/context/device.py +++ b/python/paddle/distributed/launch/context/device.py @@ -27,7 +27,7 @@ class DeviceType: CUSTOM_DEVICE = 'custom_device' -class Device(object): +class Device: def __init__(self, dtype=None, memory="", labels=""): self._dtype = dtype self._memory = memory diff --git a/python/paddle/distributed/launch/context/event.py b/python/paddle/distributed/launch/context/event.py index 3859edb83f29c1320ff90a91253d56d1f0a04a2a..7792907e47b0af9ebbd70804c6ce2276800aff7c 100644 --- a/python/paddle/distributed/launch/context/event.py +++ b/python/paddle/distributed/launch/context/event.py @@ -13,7 +13,7 @@ # limitations under the License. -class Event(object): +class Event: def __init__(self, kind="status", message="", fatal=False): self.kind = kind self.message = message diff --git a/python/paddle/distributed/launch/context/node.py b/python/paddle/distributed/launch/context/node.py index b8c1a2a14f5ce64e1eb05464012404d75a50b8d3..04748d2935bcc968be50dec9378a841d75803657 100644 --- a/python/paddle/distributed/launch/context/node.py +++ b/python/paddle/distributed/launch/context/node.py @@ -21,7 +21,7 @@ import struct from contextlib import closing -class Node(object): +class Node: def __init__(self): # self.device = Device.detect_device() self.device = Device.parse_device() diff --git a/python/paddle/distributed/launch/context/resource.py b/python/paddle/distributed/launch/context/resource.py index b7baf6fd7b621c647c6708843c10fc24a32e1b5e..3b4d16e7b592047bb6f606b0f6cedffcdb013717 100644 --- a/python/paddle/distributed/launch/context/resource.py +++ b/python/paddle/distributed/launch/context/resource.py @@ -13,6 +13,6 @@ # limitations under the License. -class Resource(object): +class Resource: def __init__(self): self.devices = [] diff --git a/python/paddle/distributed/launch/context/status.py b/python/paddle/distributed/launch/context/status.py index b87b7b3fb82d8ea76e63d27ad83a7edbe4a611b0..460f14f0cbfe1419419548eb0d4cd3bde9b719af 100644 --- a/python/paddle/distributed/launch/context/status.py +++ b/python/paddle/distributed/launch/context/status.py @@ -13,7 +13,7 @@ # limitations under the License. -class Status(object): +class Status: UNINIT = "uninit" READY = "ready" RUNNING = "running" diff --git a/python/paddle/distributed/launch/controllers/controller.py b/python/paddle/distributed/launch/controllers/controller.py index 9ff18d5b5d511f88ea45a443d0a8f070ea61caea..73954adaab741351d4df2ff22c7732a66fc010b4 100644 --- a/python/paddle/distributed/launch/controllers/controller.py +++ b/python/paddle/distributed/launch/controllers/controller.py @@ -31,7 +31,7 @@ class ControleMode: RPC = "rpc" -class ControllerBase(object): +class ControllerBase: def __init__(self, ctx): signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGABRT, self.signal_handler) diff --git a/python/paddle/distributed/launch/controllers/master.py b/python/paddle/distributed/launch/controllers/master.py index 9c3f0a8501a3ac2334452ad5152bb58986ecfa33..37c6f7a50872be2a442e2a2fcb7ce34d50d6d3de 100644 --- a/python/paddle/distributed/launch/controllers/master.py +++ b/python/paddle/distributed/launch/controllers/master.py @@ -24,7 +24,7 @@ import random ETCD_PROTOCAL = 'etcd://' -class Master(object): +class Master: ''' Master is a distributed store design to exchange info among nodes ''' diff --git a/python/paddle/distributed/launch/controllers/watcher.py b/python/paddle/distributed/launch/controllers/watcher.py index a9c1f509666875bcac3f43fb455f6d97b49af6fa..c76a428d7489a7dc6358db6ab67a2195b47e7b97 100644 --- a/python/paddle/distributed/launch/controllers/watcher.py +++ b/python/paddle/distributed/launch/controllers/watcher.py @@ -19,7 +19,7 @@ import os from threading import Thread -class Watcher(object): +class Watcher: def __init__(self, ctx): self.ctx = ctx diff --git a/python/paddle/distributed/launch/job/container.py b/python/paddle/distributed/launch/job/container.py index 6eb313ea579657abed1dff9d79df433b8d8c3247..c78c3323a8597e0fe0a9fa484b76442672aa045f 100644 --- a/python/paddle/distributed/launch/job/container.py +++ b/python/paddle/distributed/launch/job/container.py @@ -20,7 +20,7 @@ import os import sys -class Container(object): +class Container: ''' TODO(kuizhiqing) A container can be run by process/thread or just a callable function ''' diff --git a/python/paddle/distributed/launch/job/job.py b/python/paddle/distributed/launch/job/job.py index f5c805e31bf63d00d24b459b18ccc952df5970e3..261e6ee7f292c7277b5dc3dc7715fb6552a27820 100644 --- a/python/paddle/distributed/launch/job/job.py +++ b/python/paddle/distributed/launch/job/job.py @@ -19,7 +19,7 @@ class JobMode: HETER = 'heter' -class Job(object): +class Job: def __init__(self, jid='default', mode=JobMode.COLLECTIVE, nnodes="1"): self._mode = mode self._id = jid diff --git a/python/paddle/distributed/launch/job/pod.py b/python/paddle/distributed/launch/job/pod.py index b65aad6e0fcb43a869d5038489db927e13261216..a322bcdccfe6d7428d1719daebde031e3850183f 100644 --- a/python/paddle/distributed/launch/job/pod.py +++ b/python/paddle/distributed/launch/job/pod.py @@ -20,7 +20,7 @@ import random import time -class PodSepc(object): +class PodSepc: def __init__(self): self._name = ''.join( random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6) diff --git a/python/paddle/distributed/launch/job/status.py b/python/paddle/distributed/launch/job/status.py index 88fd09bbf2267a8f6aa69857d982ba25d787259b..d8a346ea292a4601daf3fab4d4ec402b9864c643 100644 --- a/python/paddle/distributed/launch/job/status.py +++ b/python/paddle/distributed/launch/job/status.py @@ -13,7 +13,7 @@ # limitations under the License. -class Status(object): +class Status: UNINIT = "uninit" READY = "ready" RUNNING = "running" diff --git a/python/paddle/distributed/launch/utils/kv_client.py b/python/paddle/distributed/launch/utils/kv_client.py index 8ed46053de0d32eaa6b432254b455594b133d850..b60970382aee807bb5a26dab1f762e13bd9c0a53 100644 --- a/python/paddle/distributed/launch/utils/kv_client.py +++ b/python/paddle/distributed/launch/utils/kv_client.py @@ -16,7 +16,7 @@ import requests import time -class KVClient(object): +class KVClient: def __init__(self, endpoint='localhost:2379'): self.endpoint = ( endpoint diff --git a/python/paddle/distributed/launch/utils/kv_server.py b/python/paddle/distributed/launch/utils/kv_server.py index d27836f1bfe6f327e50723e1a0aec073aab1c905..90dcbcf9375230efca0d7b8bd6f60af5792bf2fb 100644 --- a/python/paddle/distributed/launch/utils/kv_server.py +++ b/python/paddle/distributed/launch/utils/kv_server.py @@ -67,7 +67,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): return -class KVServer(HTTPServer, object): +class KVServer(HTTPServer): def __init__(self, port): super().__init__(('', port), KVHandler) self.kv_lock = threading.Lock() diff --git a/python/paddle/distributed/launch/utils/nvsmi.py b/python/paddle/distributed/launch/utils/nvsmi.py index d1a14f11666d2521438d84b8132c830e39ef1270..762870baa15a1272fe036273020b4c1cfb78c1dc 100644 --- a/python/paddle/distributed/launch/utils/nvsmi.py +++ b/python/paddle/distributed/launch/utils/nvsmi.py @@ -18,7 +18,7 @@ import json import shutil -class Info(object): +class Info: def __repr__(self): return str(self.__dict__) diff --git a/python/paddle/distributed/launch/utils/process_context.py b/python/paddle/distributed/launch/utils/process_context.py index 682a857f2ee6c3bfbe2e1f89f68fb13da4598852..3a8c52851778c5917fe9f88ab3d7dd004bbb1841 100644 --- a/python/paddle/distributed/launch/utils/process_context.py +++ b/python/paddle/distributed/launch/utils/process_context.py @@ -16,7 +16,7 @@ import subprocess import os, sys, signal, time -class ProcessContext(object): +class ProcessContext: def __init__( self, cmd, diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index 85f8ec2e1fd96134c2f5cd7cec8ee16a5c9d2e57..d15d38abee9133049ba63634575bc286391fde6e 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -54,7 +54,7 @@ from ..auto_parallel.utils import is_forward_op, is_backward_op, is_loss_op world_process_group = get_world_process_group() -class AMPState(object): +class AMPState: def __init__(self, block): self._block = block self._op_fp16_dict = ( diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index c2779d3557199ade3749278a918ea15b34e1b389..ec3d799ee84484aa65b7d162986f059be784ccd3 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -572,7 +572,7 @@ class DataParallelOptimizationPass(PassBase): self._logger.info("individual gradient {}".format(individual_grads)) -class GradientsGroup(object): +class GradientsGroup: def __init__(self, ops, max_group_size): self.max_group_size = max_group_size self.ops = ops diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index cdb5f4221237f720809fe200e38fd1d7d568109a..a952986c21dca717a9ac12e099fe276db06df771 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -126,7 +126,7 @@ def _keep_fp32_output(op, out_name): return False -class FP16State(object): +class FP16State: def __init__( self, program, diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index d570bf9c3fc1b53a74d953ad18fd2e3787fb7e8a..73432baa1d3c3d17d7aa8f9f22b3b9a9b3cb5309 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -143,7 +143,7 @@ def _is_about_global_norm( return rank_id in complete_param_ranks -class ClipHelper(object): +class ClipHelper: def __init__(self, params_grads, rank_id, block, dist_context): params, _ = zip(*params_grads) self.params = list(params) diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index 290a1741caed32c6811285c1c7aab55bbc7354c5..a80af73c2bced73f9de8ece59e041cafbe6db4fc 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -850,7 +850,7 @@ def shard_parameters(params, group_size): return mapping -class ShardingInfo(object): +class ShardingInfo: def __init__(self, group, rank, params_grads): self.group = group self.params_grads = dict([(p.name, (p, g)) for p, g in params_grads]) diff --git a/python/paddle/distributed/ps/coordinator.py b/python/paddle/distributed/ps/coordinator.py index d73193845e95f90273ea124038e9418d038060b8..612502edad9d34b96140e922db1f33b2619c3240 100755 --- a/python/paddle/distributed/ps/coordinator.py +++ b/python/paddle/distributed/ps/coordinator.py @@ -353,7 +353,7 @@ class FLClient(FLClientBase): f.write(str(self.train_statical_info)) -class Coordinator(object): +class Coordinator: def __init__(self, ps_hosts): self._communicator = FLCommunicator(ps_hosts) self._client_selector = None diff --git a/python/paddle/distributed/ps/the_one_ps.py b/python/paddle/distributed/ps/the_one_ps.py index b4881959ee5733ff3913ee6c16e3c37bea3a9178..3dbbae864789a739433a5a349ca1067c2dada90c 100755 --- a/python/paddle/distributed/ps/the_one_ps.py +++ b/python/paddle/distributed/ps/the_one_ps.py @@ -914,7 +914,7 @@ class fsClient: proto.hadoop_bin = self.fs_client_param.hadoop_bin -class PsDescBuilder(object): +class PsDescBuilder: def __init__(self, context): self.context = context self.is_sync = context['is_sync'] diff --git a/python/paddle/distributed/ps/utils/ps_factory.py b/python/paddle/distributed/ps/utils/ps_factory.py index 0726fe15dc4cd437810b22b64ae5cfdb6dd04540..0161352ec95dc9bc912b961c76d46c4b32ddf92e 100755 --- a/python/paddle/distributed/ps/utils/ps_factory.py +++ b/python/paddle/distributed/ps/utils/ps_factory.py @@ -27,7 +27,7 @@ __all__ = [ ] -class PsProgramBuilderFactory(object): +class PsProgramBuilderFactory: def __init__(self): pass diff --git a/python/paddle/distributed/ps/utils/ps_program_builder.py b/python/paddle/distributed/ps/utils/ps_program_builder.py index a07e2ebe1408a7515d1fdded23aebedf77fa8559..1831f7061b6e0c2badbdec2b4ec0585862f7d644 100755 --- a/python/paddle/distributed/ps/utils/ps_program_builder.py +++ b/python/paddle/distributed/ps/utils/ps_program_builder.py @@ -19,7 +19,7 @@ from paddle.distributed.fleet.base.private_helper_function import ( from paddle.distributed.passes import new_pass -class PsProgramBuilder(object): +class PsProgramBuilder: def __init__(self, pass_ctx): self.pass_ctx = pass_ctx self.attrs = self.pass_ctx._attrs diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 578d664dc4137bdc79da8af539178a3ae20503ed..3b3a44d49890eb02ccb80fe624e64d99a0fc0a73 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -88,7 +88,7 @@ class DistributedMode: NU = 5 -class TrainerRuntimeConfig(object): +class TrainerRuntimeConfig: def __init__(self, valid_strategy): self.mode = None num_threads = os.getenv("CPU_NUM", "1") diff --git a/python/paddle/distributed/spawn.py b/python/paddle/distributed/spawn.py index 2df9118ac4e870a7ff2ae5d24c099eeaf4c4bc83..a371f5d559b597ea06171eefc6fc69dcbe56e6bf 100644 --- a/python/paddle/distributed/spawn.py +++ b/python/paddle/distributed/spawn.py @@ -43,7 +43,7 @@ from paddle.fluid.framework import set_flags __all__ = [] -class ParallelEnvArgs(object): +class ParallelEnvArgs: def __init__(self): # Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17.. self.cluster_node_ips = None @@ -412,7 +412,7 @@ def _func_wrapper(func, args, error_queue, return_queue, env_dict, backend): sys.exit(1) -class MultiprocessContext(object): +class MultiprocessContext: def __init__(self, processes, error_queues, return_queues): _py_supported_check() self.error_queues = error_queues diff --git a/python/paddle/distributed/utils/launch_utils.py b/python/paddle/distributed/utils/launch_utils.py index 975f5d4935644d0fe3f3abc7c10519517b2f234f..0c1ab76e5506d1b3896bb57f8d731acd3787d390 100644 --- a/python/paddle/distributed/utils/launch_utils.py +++ b/python/paddle/distributed/utils/launch_utils.py @@ -99,7 +99,7 @@ def get_gpus(selected_gpus): return gpus -class Hdfs(object): +class Hdfs: def __init__(self): self.hdfs_ugi = None self.hdfs_name = None @@ -128,7 +128,7 @@ class Hdfs(object): return not self == n -class Cluster(object): +class Cluster: def __init__(self, hdfs): self.job_server = None self.pods = [] @@ -194,7 +194,7 @@ class Cluster(object): return None -class JobServer(object): +class JobServer: def __init__(self): self.endpoint = None @@ -208,7 +208,7 @@ class JobServer(object): return not self == j -class Trainer(object): +class Trainer: def __init__(self): self.gpus = [] self.endpoint = None @@ -239,7 +239,7 @@ class Trainer(object): return self.rank -class Pod(object): +class Pod: def __init__(self): self.rank = None self.id = None @@ -454,7 +454,7 @@ def _prepare_trainer_env(cluster, trainer, backend=None): return proc_env -class TrainerProc(object): +class TrainerProc: def __init__(self): self.proc = None self.log_fn = None diff --git a/python/paddle/distribution/constraint.py b/python/paddle/distribution/constraint.py index 46f919f3858592ce9152dc1fa426bc863a9214c2..092faf16937b4e2a25ccc9b309d64a302932537f 100644 --- a/python/paddle/distribution/constraint.py +++ b/python/paddle/distribution/constraint.py @@ -14,7 +14,7 @@ import paddle -class Constraint(object): +class Constraint: """Constraint condition for random variable.""" def __call__(self, value): diff --git a/python/paddle/distribution/distribution.py b/python/paddle/distribution/distribution.py index 92dd306ce022f68dc95c09a8b21ed2297ab47482..27febe07a3810c1aec990c6a0b8abe20a0e68775 100644 --- a/python/paddle/distribution/distribution.py +++ b/python/paddle/distribution/distribution.py @@ -33,7 +33,7 @@ from paddle.fluid.framework import ( from paddle.fluid.layers import tensor -class Distribution(object): +class Distribution: """ The abstract base class for probability distributions. Functions are implemented in specific distributions. diff --git a/python/paddle/distribution/kl.py b/python/paddle/distribution/kl.py index 4f95366391a155eb3365002529e594bf9fed3492..cf8857629893becfaee8cb69136cdd8142fb3132 100644 --- a/python/paddle/distribution/kl.py +++ b/python/paddle/distribution/kl.py @@ -127,7 +127,7 @@ def _dispatch(cls_p, cls_q): @functools.total_ordering -class _Compare(object): +class _Compare: def __init__(self, *classes): self.classes = classes diff --git a/python/paddle/distribution/transform.py b/python/paddle/distribution/transform.py index f13c224691593a132a0f97e07640b84bc626145c..db55eca2d1a71c837ec3e94bf4eb4145d693c96e 100644 --- a/python/paddle/distribution/transform.py +++ b/python/paddle/distribution/transform.py @@ -58,7 +58,7 @@ class Type(enum.Enum): return _type in (cls.BIJECTION, cls.INJECTION) -class Transform(object): +class Transform: r"""Base class for the transformations of random variables. ``Transform`` can be used to represent any differentiable and injective diff --git a/python/paddle/distribution/variable.py b/python/paddle/distribution/variable.py index e7aa1e1a680a6a58bf73a9e3bed605119743cd42..99cafc5ea788ec29a3d09ba0d7ffba8bc89fe2e7 100644 --- a/python/paddle/distribution/variable.py +++ b/python/paddle/distribution/variable.py @@ -15,7 +15,7 @@ from paddle.distribution import constraint -class Variable(object): +class Variable: """Random variable of probability distribution. Args: diff --git a/python/paddle/fluid/average.py b/python/paddle/fluid/average.py index 2d83246df7f636036a1e6d886bb349c130da291e..32db4ba0a9093b5376861e016c891acd545bcc44 100644 --- a/python/paddle/fluid/average.py +++ b/python/paddle/fluid/average.py @@ -39,7 +39,7 @@ def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) -class WeightedAverage(object): +class WeightedAverage: """ Calculate weighted average. diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index e73e2fe1ab10b1cef7d35541c7e0eb0203d97012..279ac480453dd7b2ced706e5936fb438198d5f66 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -41,7 +41,7 @@ _logger = log_helper.get_logger( ) -class ProgramStats(object): +class ProgramStats: def __init__(self, block, ops): self.block = block self.ops = ops @@ -789,7 +789,7 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set): (set[core.OpDesc]): A set of OpDescs which should be pruned. """ - class Var(object): + class Var: def __init__(self, var_name): self.var_name = var_name self.gen_op = None @@ -804,7 +804,7 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set): assert isinstance(op, Op) self.pendding_ops.append(op) - class Op(object): + class Op: def __init__(self, op_desc): self.op_desc = op_desc self.inputs = [] diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 1ad98da957213735b6ebea3eab2334745fd5951e..68a2f8a0deea36274a1ab2db2796cdcb122fe706 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -92,7 +92,7 @@ def _squared_l2_norm(x): return out -class BaseErrorClipAttr(object): +class BaseErrorClipAttr: def __str__(self): raise NotImplementedError() @@ -177,7 +177,7 @@ def error_clip_callback(block, context): error_clip._append_clip_op(block, grad_n) -class ClipGradBase(object): +class ClipGradBase: def __init__(self): super().__init__() diff --git a/python/paddle/fluid/communicator.py b/python/paddle/fluid/communicator.py index bd43dce83711188deb44e158c9fa9bcf3633911e..55733c87f67c9b557bc156fefcb08aed27b8975e 100755 --- a/python/paddle/fluid/communicator.py +++ b/python/paddle/fluid/communicator.py @@ -38,7 +38,7 @@ from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode __all__ = ['Communicator', 'FLCommunicator', 'LargeScaleKV'] -class Communicator(object): +class Communicator: def __init__(self, mode, kwargs=None, envs=None): """ Communicator is used for async distribute training in distribute_transpiler mode. @@ -246,7 +246,7 @@ class FLCommunicator(Communicator): ## only for coordinator return info_mp -class LargeScaleKV(object): +class LargeScaleKV: def __init__(self): self.scale_kv = core.LargeScaleKV() @@ -260,7 +260,7 @@ class LargeScaleKV(object): return self.scale_kv.size(varname) -class HeterClient(object): +class HeterClient: def __init__(self, endpoint, previous_endpoint, trainer_id): self.heter_client_ = core.HeterClient( endpoint, previous_endpoint, trainer_id diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 1adef41f86a35065bf6f44b48d50875ff9aa71ed..47110b1e0bf7e58d503011be28e6cbcc9ae3a673 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -105,7 +105,7 @@ def _should_broadcast_or_not_exists(program, var_name): return not is_distributed -class CompiledProgram(object): +class CompiledProgram: """ :api_attr: Static Graph @@ -567,7 +567,7 @@ class CompiledProgram(object): return place_list -class IpuDynamicPatcher(object): +class IpuDynamicPatcher: """ Patcher for IPU dynamic2static support. """ @@ -777,7 +777,7 @@ class IpuDynamicPatcher(object): setattr(module, key, attr) -class IpuStrategy(object): +class IpuStrategy: """ Help users precisely control the graph building in :code:`paddle.static.IpuCompiledProgram` . @@ -1237,7 +1237,7 @@ class IpuStrategy(object): return self.get_option('enable_fp16') -class IpuCompiledProgram(object): +class IpuCompiledProgram: """ The IpuCompiledProgram is used to transform a program to a ipu-target program, such as forward graph extraction, computing graph transformation, useless scale Ops clean, etc. diff --git a/python/paddle/fluid/contrib/decoder/beam_search_decoder.py b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py index 429feda47bb53d9c750d1ff7de69ff4ed7fbba6f..717d31c2fe1b9766db0ab90544e19abd531e586b 100644 --- a/python/paddle/fluid/contrib/decoder/beam_search_decoder.py +++ b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py @@ -37,7 +37,7 @@ class _DecoderType: BEAM_SEARCH = 2 -class InitState(object): +class InitState: """ The initial hidden state object. The state objects holds a variable, and may use it to initialize the hidden state cell of RNN. Usually used as input to @@ -98,7 +98,7 @@ class InitState(object): return self._need_reorder -class _MemoryState(object): +class _MemoryState: def __init__(self, state_name, rnn_obj, init_state): self._state_name = state_name # each is a rnn.memory self._rnn_obj = rnn_obj @@ -113,7 +113,7 @@ class _MemoryState(object): self._rnn_obj.update_memory(self._state_mem, state) -class _ArrayState(object): +class _ArrayState: def __init__(self, state_name, block, init_state): self._state_name = state_name self._block = block @@ -161,7 +161,7 @@ class _ArrayState(object): layers.array_write(state, array=self._state_array, i=self._counter) -class StateCell(object): +class StateCell: """ The state cell class stores the hidden state of the RNN cell. A typical RNN cell has one or more hidden states, and one or more step inputs. This class @@ -401,7 +401,7 @@ class StateCell(object): return self._cur_states[self._out_state] -class TrainingDecoder(object): +class TrainingDecoder: """ A decoder that can only be used for training. The decoder could be initialized with a `StateCell` object. The computation within the RNN cell @@ -547,7 +547,7 @@ class TrainingDecoder(object): ) -class BeamSearchDecoder(object): +class BeamSearchDecoder: """ A beam search decoder that can be used for inference. The decoder should be initialized with a `StateCell` object. The decode process can be defined diff --git a/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py b/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py index e85f3e45a24108528d0e622addd7a8c5e32664b5..53a010c23ce9ddb61de407d2b385868e034e71fd 100644 --- a/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py +++ b/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py @@ -17,7 +17,7 @@ from paddle.fluid import framework as framework __all__ = ["extend_with_decoupled_weight_decay"] -class DecoupledWeightDecay(object): +class DecoupledWeightDecay: def __init__(self, coeff=0.0, apply_decay_param_fun=None, **kwargs): if not isinstance(coeff, float) and not isinstance( coeff, framework.Variable diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py index 33694f4d127eed76fd84ba8b0074d1229f38d9a3..180e28ddab351499f7e3ecf65a8384a32bd29454 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py @@ -24,7 +24,7 @@ from ..fp16_lists import ( __all__ = ["AutoMixedPrecisionListsBF16"] -class AutoMixedPrecisionListsBF16(object): +class AutoMixedPrecisionListsBF16: """ AutoMixedPrecisionListsBF16 is a class for fp32/bf16 op types list. The lists are used for an algorithm which determines op's execution mode (fp32 or bf16).It can update pre-defined diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py b/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py index 9110686582220fe1762e2c52551638f85d5ff828..dd1b07bfff6d5f1886c08260436a45d8a9056f77 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py @@ -31,7 +31,7 @@ import warnings __all__ = ["decorate_bf16"] -class OptimizerWithMixedPrecision(object): +class OptimizerWithMixedPrecision: """ Optimizer with mixed-precision (MP) training. This is a wrapper of a common optimizer, plus the support of mixed-precision pre-training. The object diff --git a/python/paddle/fluid/contrib/mixed_precision/decorator.py b/python/paddle/fluid/contrib/mixed_precision/decorator.py index 75554ff9c812307364ae52c118005dd3724d356b..6b9f3f6eaabc51cf32845f6b704d2aa4ed7a9c14 100644 --- a/python/paddle/fluid/contrib/mixed_precision/decorator.py +++ b/python/paddle/fluid/contrib/mixed_precision/decorator.py @@ -34,7 +34,7 @@ import paddle __all__ = ["decorate"] -class OptimizerWithMixedPrecision(object): +class OptimizerWithMixedPrecision: """ Optimizer with mixed-precision (MP) training. This is a wrapper of a common optimizer, plus the support of mixed-precision pre-training. The object diff --git a/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py b/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py index ef8f222bac23b22244748968385bbfb67898d01c..101af59861880898c4d5bdfb80536659f1648769 100644 --- a/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py +++ b/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py @@ -26,7 +26,7 @@ _extra_unsupported_fp16_list = { } -class AutoMixedPrecisionLists(object): +class AutoMixedPrecisionLists: """ AutoMixedPrecisionLists is a class for black/white list. It can update pre-defined black list and white list according to users' custom black diff --git a/python/paddle/fluid/contrib/quantize/quantize_transpiler.py b/python/paddle/fluid/contrib/quantize/quantize_transpiler.py index 6e225fdbcc8e6c8731e730d831d6907adc1d7d77..edd07c0ba9f3684087ed7720c35e296c84f39b22 100644 --- a/python/paddle/fluid/contrib/quantize/quantize_transpiler.py +++ b/python/paddle/fluid/contrib/quantize/quantize_transpiler.py @@ -81,7 +81,7 @@ def quant(x, scale, num_bits): return y -class QuantizeTranspiler(object): +class QuantizeTranspiler: def __init__( self, weight_bits=8, diff --git a/python/paddle/fluid/contrib/slim/quantization/adaround.py b/python/paddle/fluid/contrib/slim/quantization/adaround.py index 9dd00ddadc6acd0eaf9af25d713ef564250ea960..278994ef318a12ef83c063147a88977b27ee2b96 100644 --- a/python/paddle/fluid/contrib/slim/quantization/adaround.py +++ b/python/paddle/fluid/contrib/slim/quantization/adaround.py @@ -51,7 +51,7 @@ def compute_soft_rounding_np(alpha_v): ) -class AdaRoundLoss(object): +class AdaRoundLoss: def __init__(self, reg_param=0.01, default_beta_range=(20, 2)): self.default_reg_param = reg_param self.default_beta_range = default_beta_range @@ -111,7 +111,7 @@ class AdaRoundLoss(object): return beta -class AdaRound(object): +class AdaRound: def __init__( self, scale, diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py index 79f0a2bf359d737c8dfca8c77c2d70a32a7b85c7..358e08cf588903887086c0506ef6bc6e36c477bc 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py @@ -36,7 +36,7 @@ _logger = get_logger( ) -class ImperativePTQ(object): +class ImperativePTQ: """ Static post training quantization. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py index b02de6cee4676ef2793a3dd066ead020e64ac704..88eb998c0e071ac7dc6a797e561e3ce349b1b099 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py @@ -22,7 +22,7 @@ from .ptq_quantizer import * __all__ = ['PTQConfig', 'default_ptq_config'] -class PTQConfig(object): +class PTQConfig: """ The PTQ config shows how to quantize the inputs and outputs. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py index cb56bfcb47acbcef02728d9e77b33aaeffa929ab..d8df91f78fb8baadd9f82080934e4d2b19fc59d2 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py @@ -17,7 +17,7 @@ import paddle __all__ = ['PTQRegistry'] -class LayerInfo(object): +class LayerInfo: """ Store the argnames of the inputs and outputs. """ @@ -63,7 +63,7 @@ QUANT_LAYERS_INFO = [ SIMULATED_LAYERS = [paddle.nn.Conv2D, paddle.nn.Linear] -class PTQRegistry(object): +class PTQRegistry: """ Register the supported layers for PTQ and provide layers info. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py index aa768ab9745fe5332cf2ea7c5b1132edc3933763..9e64ecd96b11bd40e96580cd9432e19669b58408 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py @@ -57,7 +57,7 @@ def lazy_import_fleet(layer_name_map, fake_quant_input_layers): return layer_name_map, fake_quant_input_layers -class ImperativeQuantAware(object): +class ImperativeQuantAware: """ Applying quantization aware training (QAT) to the dgraph model. """ @@ -304,7 +304,7 @@ class ImperativeQuantAware(object): ) -class ImperativeQuantizeInputs(object): +class ImperativeQuantizeInputs: """ Based on the input params, add the quant_dequant computational logic both for activation inputs and weight inputs. @@ -448,7 +448,7 @@ class ImperativeQuantizeInputs(object): return quant_layers.__dict__[quant_layer_name](layer, **self._kwargs) -class ImperativeQuantizeOutputs(object): +class ImperativeQuantizeOutputs: """ Calculate the output scales for target layers. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py index 3db16060e0e609577f085e1182db69f0b78ca311..68df2a8adcc961c2b34a6cbc92d803fa1bb211c2 100644 --- a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py +++ b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py @@ -112,7 +112,7 @@ def _apply_pass( return graph -class PostTrainingQuantization(object): +class PostTrainingQuantization: """ Utilizing post training quantization methon to quantize the FP32 model, and it uses calibrate data to get the quantization information for all @@ -1481,7 +1481,7 @@ class PostTrainingQuantizationProgram(PostTrainingQuantization): self._fetch_list = fetch_list -class WeightQuantization(object): +class WeightQuantization: _supported_quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul'] _supported_weight_quantize_type = ['channel_wise_abs_max', 'abs_max'] diff --git a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py index fcc2daff208ef285795ba695610adfcaf6c0e3ef..a617bac359b6f10b1d642a72283c722ce95b3bb8 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py @@ -22,7 +22,7 @@ __all__ = ['Quant2Int8MkldnnPass'] OpRole = core.op_proto_and_checker_maker.OpRole -class Quant2Int8MkldnnPass(object): +class Quant2Int8MkldnnPass: """ Transform a quant model IrGraph into MKL-DNN supported INT8 IrGraph. The pass consists of the following transformations: diff --git a/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py index 73c611db0120600400a0e4a22118a859fe141032..25278fc6913ee9e0e122cddc65bc37313dcc8ebb 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py @@ -21,7 +21,7 @@ from ....framework import _get_paddle_place __all__ = ['QuantInt8MkldnnPass'] -class QuantInt8MkldnnPass(object): +class QuantInt8MkldnnPass: """ Convert QuantizationFreezePass generated IrGraph to MKL-DNN supported INT8 IrGraph. Following transformations did in this pass: diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 020bdcec489d6d28342df5ec468dce8fefdf624c..f0caabd6f4ea19a0ca22e64bd80323281b54aaa5 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -107,7 +107,7 @@ def _check_grandchild_op_node(op_node, grandchild_op_name): return False -class QuantizationTransformPass(object): +class QuantizationTransformPass: """ Quantize the ops that have weights. Add quant and dequant ops for the quantized ops's inputs. @@ -1068,7 +1068,7 @@ class QuantizationTransformPass(object): return is_skip -class QuantizationFreezePass(object): +class QuantizationFreezePass: def __init__( self, scope, @@ -1444,7 +1444,7 @@ class QuantizationFreezePass(object): ) -class ConvertToInt8Pass(object): +class ConvertToInt8Pass: def __init__(self, scope, place, quantizable_op_type=None): """ Convert the weights into int8_t type. @@ -1537,7 +1537,7 @@ class ConvertToInt8Pass(object): graph.safe_remove_nodes(all_unused_vars) -class TransformForMobilePass(object): +class TransformForMobilePass: def __init__(self): """ This pass is used to convert the frozen graph for paddle-mobile execution. @@ -1579,7 +1579,7 @@ class TransformForMobilePass(object): return graph -class OutScaleForTrainingPass(object): +class OutScaleForTrainingPass: def __init__( self, scope=None, @@ -1745,7 +1745,7 @@ class OutScaleForTrainingPass(object): return "%s@scale" % (var_name) -class OutScaleForInferencePass(object): +class OutScaleForInferencePass: def __init__(self, scope=None): """ This pass is used for setting output scales of some operators. @@ -1815,7 +1815,7 @@ class OutScaleForInferencePass(object): return "%s@scale" % (var_name) -class AddQuantDequantPass(object): +class AddQuantDequantPass: """ Quantize the ops that do not have weights, and add quant_dequant op for the quantized ops's inputs. @@ -2087,7 +2087,7 @@ class AddQuantDequantPass(object): return quant_var_node, scale_out_node -class InsertQuantizeLinear(object): +class InsertQuantizeLinear: """ Insert quantize_linear and dequantize_linear op before ops. @@ -2664,7 +2664,7 @@ class QuantizationTransformPassV2(QuantizationTransformPass): return graph -class AddQuantDequantPassV2(object): +class AddQuantDequantPassV2: """ Quantize the ops that do not have weights, and add quant_linear and dequant_linear op for the quantized ops's inputs. It is used in the new format of quantization. @@ -2850,7 +2850,7 @@ class AddQuantDequantPassV2(object): return graph -class ReplaceFakeQuantDequantPass(object): +class ReplaceFakeQuantDequantPass: """ replace quant-dequant ops with quantize_linear and dequantize_linear ops. """ @@ -2987,7 +2987,7 @@ class ReplaceFakeQuantDequantPass(object): return "%s@zero_point" % (var_name) -class QuantWeightPass(object): +class QuantWeightPass: """ quant weights and remove weights input quantize_linear node. for example: `weight -> quant -> dequant -> conv2d` will be frozen into `weight -> dequant -> conv2d`, @@ -3129,7 +3129,7 @@ class QuantWeightPass(object): tensor.set(array, self._place) -class AddQuantDequantForInferencePass(object): +class AddQuantDequantForInferencePass: """ When export quant model, it will traverse to find the output of each op, and then insert the quant/dequant op after it. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py index dbc6277a3bf10d0a54a13a237636fbab5530a61a..64bb1a6c45a1c846c2b6f669771712710c4af1a5 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py @@ -49,7 +49,7 @@ def load_variable_data(scope, var_name): return np.array(var_node.get_tensor()) -class QuantizeTranspilerV2(object): +class QuantizeTranspilerV2: def __init__( self, weight_bits=8, diff --git a/python/paddle/fluid/contrib/slim/quantization/utils.py b/python/paddle/fluid/contrib/slim/quantization/utils.py index 9862772c64a9881428e0ae1d0fc2e1d7eb099522..5f5fc99b44c7dbd46fc7a5f2c8236c14dbb3d9a6 100644 --- a/python/paddle/fluid/contrib/slim/quantization/utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/utils.py @@ -472,7 +472,7 @@ def l2_loss(gt, pred): return ((gt - pred) ** 2).mean() -class tqdm(object): +class tqdm: def __init__(self, total, bar_format='Loading|{bar}', ncols=80): self.total = total self.bar_format = bar_format diff --git a/python/paddle/fluid/contrib/sparsity/asp.py b/python/paddle/fluid/contrib/sparsity/asp.py index 11fc3ca259f5fc25ff86a00406857428ca2caeec..d2165def6f0cbf6d8268b49386933df8074c82c1 100644 --- a/python/paddle/fluid/contrib/sparsity/asp.py +++ b/python/paddle/fluid/contrib/sparsity/asp.py @@ -478,7 +478,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): ) -class ProgramASPInfo(object): +class ProgramASPInfo: r""" ProgramASPInfo is a container to keep ASP relevant information of Pragrom. It contains three inner-variables: 1. __mask_vars (Dictionary): Key is parameter's name and vaule is its corresponding sparse mask Variable object, which is created by `ASPHelper.create_mask_variables`. @@ -516,7 +516,7 @@ class ProgramASPInfo(object): return self.__excluded_layers -class ASPHelper(object): +class ASPHelper: r""" ASPHelper is a collection of Auto SParsity (ASP) functions to enable @@ -917,7 +917,7 @@ class ASPHelper(object): ) -class OptimizerWithSparsityGuarantee(object): +class OptimizerWithSparsityGuarantee: r""" OptimizerWithSparsityGuarantee is a wrapper to decorate `minimize` function of given optimizer by `_minimize` of ASPHelper. The decorated `minimize` function would do three things (exactly same as `ASPHelper._minimize`): diff --git a/python/paddle/fluid/data_feed_desc.py b/python/paddle/fluid/data_feed_desc.py index 7a58c5ef5e1b41da615d77d3f2851d5c501cdb2a..9e6257d96ef6afbbf8226f49a483bb0e9429d406 100644 --- a/python/paddle/fluid/data_feed_desc.py +++ b/python/paddle/fluid/data_feed_desc.py @@ -18,7 +18,7 @@ from google.protobuf import text_format __all__ = ['DataFeedDesc'] -class DataFeedDesc(object): +class DataFeedDesc: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index e84d41e56204c22e50625c83e79177d6f4ecb1e8..1f900eff0df87f7636bc5667b547ad61fa9525e7 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -221,7 +221,7 @@ def check_shape( check_dtype(shape.dtype, 'shape', expected_tensor_dtype, op_name) -class DataToLoDTensorConverter(object): +class DataToLoDTensorConverter: def __init__(self, place, lod_level, shape, dtype): self.place = place self.lod_level = lod_level @@ -280,7 +280,7 @@ class DataToLoDTensorConverter(object): return t -class BatchedTensorProvider(object): +class BatchedTensorProvider: def __init__(self, feed_list, place, batch_size, generator, drop_last): self.place = place self.batch_size = batch_size @@ -319,7 +319,7 @@ class BatchedTensorProvider(object): [c._reset() for c in self.converters] -class DataFeeder(object): +class DataFeeder: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/dataloader/batch_sampler.py b/python/paddle/fluid/dataloader/batch_sampler.py index 624754ae286ad3d84473e8f64bd8df9160169d56..5ac1c79d0cdf70c68e045b96826fc36dc8cd7be3 100644 --- a/python/paddle/fluid/dataloader/batch_sampler.py +++ b/python/paddle/fluid/dataloader/batch_sampler.py @@ -159,7 +159,7 @@ class BatchSampler(Sampler): return num_samples // self.batch_size -class _InfiniteIterableSampler(object): +class _InfiniteIterableSampler: def __init__(self, dataset, batch_size=1): assert isinstance( dataset, IterableDataset diff --git a/python/paddle/fluid/dataloader/dataloader_iter.py b/python/paddle/fluid/dataloader/dataloader_iter.py index 06464d528df9e60bf71fc861c0ab1e9f54d9c9db..c89c3e079f62401567fd37a12816af8803273b94 100644 --- a/python/paddle/fluid/dataloader/dataloader_iter.py +++ b/python/paddle/fluid/dataloader/dataloader_iter.py @@ -90,7 +90,7 @@ def _clear_loader(): CleanupFuncRegistrar.register(_clear_loader) -class _DataLoaderIterBase(object): +class _DataLoaderIterBase: """ Iterator implement of DataLoader, will load and feed mini-batch data by setting in given dataloader. diff --git a/python/paddle/fluid/dataloader/dataset.py b/python/paddle/fluid/dataloader/dataset.py index 5fe52196cd88f2c3a4c5df10f6487973e38bc991..04e03ec844aacf37e4f6e02ee9c2590cb97e7d56 100755 --- a/python/paddle/fluid/dataloader/dataset.py +++ b/python/paddle/fluid/dataloader/dataset.py @@ -26,7 +26,7 @@ __all__ = [ ] -class Dataset(object): +class Dataset: """ An abstract class to encapsulate methods and behaviors of datasets. diff --git a/python/paddle/fluid/dataloader/fetcher.py b/python/paddle/fluid/dataloader/fetcher.py index 8d5a908729a6824fc963457f1be85823d8d6b78a..b097a315c0c7353631fac76e3bdaca52d6ea2435 100644 --- a/python/paddle/fluid/dataloader/fetcher.py +++ b/python/paddle/fluid/dataloader/fetcher.py @@ -19,7 +19,7 @@ from collections.abc import Sequence, Mapping _WARNING_TO_LOG = True -class _DatasetFetcher(object): +class _DatasetFetcher: def __init__(self, dataset, auto_collate_batch, collate_fn, drop_last): self.dataset = dataset self.auto_collate_batch = auto_collate_batch diff --git a/python/paddle/fluid/dataloader/sampler.py b/python/paddle/fluid/dataloader/sampler.py index 3626ed63e521c12f8bdfd0ef3001823c4a0acac6..afd8fa7da025705ff83fe6d3da20ffa483406f3a 100644 --- a/python/paddle/fluid/dataloader/sampler.py +++ b/python/paddle/fluid/dataloader/sampler.py @@ -23,7 +23,7 @@ __all__ = [ ] -class Sampler(object): +class Sampler: """ An abstract class to encapsulate methods and behaviors of samplers. diff --git a/python/paddle/fluid/dataloader/worker.py b/python/paddle/fluid/dataloader/worker.py index 6016c04054d6fc0bbc1ce80bb77881959044c8a1..155208791eacd2f828dbaf1d314617bf7accd8cf 100644 --- a/python/paddle/fluid/dataloader/worker.py +++ b/python/paddle/fluid/dataloader/worker.py @@ -34,16 +34,16 @@ import queue __all__ = ['get_worker_info'] -class _IterableDatasetStopIteration(object): +class _IterableDatasetStopIteration: def __init__(self, worker_id): self.worker_id = worker_id -class _ResumeIteration(object): +class _ResumeIteration: pass -class _DatasetKind(object): +class _DatasetKind: MAP = 0 ITER = 1 @@ -63,7 +63,7 @@ class _DatasetKind(object): raise NotImplementedError("unknown Dataset kind {}".format(kind)) -class ParentWatchDog(object): +class ParentWatchDog: def __init__(self): self._parent_pid = os.getppid() self._parent_alive = True @@ -145,7 +145,7 @@ def get_worker_info(): return _worker_info -class WorkerInfo(object): +class WorkerInfo: __initialized = False def __init__(self, **kwargs): @@ -163,7 +163,7 @@ class WorkerInfo(object): return super().__setattr__(key, val) -class _WorkerException(object): +class _WorkerException: def __init__(self, worker_id, exc_info=None): self.worker_id = worker_id exc_info = exc_info or sys.exc_info() diff --git a/python/paddle/fluid/dataset.py b/python/paddle/fluid/dataset.py index 330ab0635d949ab37c31912a1c77e8642ecf7d28..b21550bcc3ad328e5497100bf5f89ced11c63e42 100644 --- a/python/paddle/fluid/dataset.py +++ b/python/paddle/fluid/dataset.py @@ -21,7 +21,7 @@ from ..utils import deprecated __all__ = ['DatasetFactory', 'InMemoryDataset', 'QueueDataset'] -class DatasetFactory(object): +class DatasetFactory: """ DatasetFactory is a factory which create dataset by its name, you can create "QueueDataset" or "InMemoryDataset", or "FileInstantDataset", @@ -64,7 +64,7 @@ class DatasetFactory(object): ) -class DatasetBase(object): +class DatasetBase: """Base dataset class.""" def __init__(self): diff --git a/python/paddle/fluid/device_worker.py b/python/paddle/fluid/device_worker.py index 3cf41b3597fd10a04b82de1f215a968b773b631a..6b3359ac6131157bb24eab69c565587b0dc156f4 100644 --- a/python/paddle/fluid/device_worker.py +++ b/python/paddle/fluid/device_worker.py @@ -23,7 +23,7 @@ __all__ = [ ] -class DeviceWorker(object): +class DeviceWorker: """ DeviceWorker is an abstract class, which generates worker desc. This class is an inner class that we do computation logics within @@ -695,7 +695,7 @@ class HeterSection(DeviceWorker): ) -class DeviceWorkerFactory(object): +class DeviceWorkerFactory: def _create_device_worker(self, worker_type): classname = worker_type.capitalize() return globals()[classname]() diff --git a/python/paddle/fluid/distributed/downpour.py b/python/paddle/fluid/distributed/downpour.py index c710b7337ccb0a51c0d29c5da85ff851a0f71812..1f7e146c3f35b6abf4bb57255096fc81a7fd01b9 100644 --- a/python/paddle/fluid/distributed/downpour.py +++ b/python/paddle/fluid/distributed/downpour.py @@ -25,7 +25,7 @@ from paddle.fluid.distribute_lookup_table import ( from google.protobuf import text_format -class DownpourSGD(object): +class DownpourSGD: r""" Distributed optimizer of downpour stochastic gradient descent Standard implementation of Google's Downpour SGD diff --git a/python/paddle/fluid/distributed/fleet.py b/python/paddle/fluid/distributed/fleet.py index 08b46ce04733ab3c82e3d2bc33ec99656fb95c0f..552dc91598ba470699a9513d879f44b760bd2f8e 100644 --- a/python/paddle/fluid/distributed/fleet.py +++ b/python/paddle/fluid/distributed/fleet.py @@ -18,7 +18,7 @@ from google.protobuf import text_format __all__ = ['Fleet'] -class Fleet(object): +class Fleet: """ """ def __init__(self): diff --git a/python/paddle/fluid/distributed/helper.py b/python/paddle/fluid/distributed/helper.py index 4a1643733393a3e808625c887c5cae7c82e41a6f..93cec48408367b436ca8398c4d5573f8ca3af7ca 100644 --- a/python/paddle/fluid/distributed/helper.py +++ b/python/paddle/fluid/distributed/helper.py @@ -13,7 +13,7 @@ # limitations under the License. -class FileSystem(object): +class FileSystem: """ A file system that support hadoop client desc. @@ -54,7 +54,7 @@ class FileSystem(object): return self.fs_client -class MPIHelper(object): +class MPIHelper: """ MPIHelper is a wrapper of mpi4py, support get_rank get_size etc. Args: diff --git a/python/paddle/fluid/distributed/node.py b/python/paddle/fluid/distributed/node.py index 793787d0fd7b681bbe88956cbffc07fdfb22f6bf..19fa5ec20b96dc229e43a8dedef27087fb797512 100644 --- a/python/paddle/fluid/distributed/node.py +++ b/python/paddle/fluid/distributed/node.py @@ -17,7 +17,7 @@ import ps_pb2 as pslib from functools import reduce -class Server(object): +class Server: """ A Server basic class. """ @@ -26,7 +26,7 @@ class Server(object): pass -class Worker(object): +class Worker: """ A Worker basic class. """ diff --git a/python/paddle/fluid/distributed/ps_instance.py b/python/paddle/fluid/distributed/ps_instance.py index 370e1b19647245f56b4b1177781eebace060f263..91388b97b01aa8472eb5460c8b14ad55e0d60437 100644 --- a/python/paddle/fluid/distributed/ps_instance.py +++ b/python/paddle/fluid/distributed/ps_instance.py @@ -14,7 +14,7 @@ from .helper import MPIHelper -class PaddlePSInstance(object): +class PaddlePSInstance: """ PaddlePSInstance class is used to generate A instance of server or worker Args: diff --git a/python/paddle/fluid/dygraph/amp/auto_cast.py b/python/paddle/fluid/dygraph/amp/auto_cast.py index bb1acc7c09bfcc7610bb622c14cf0fb21dfa875f..721d10d76b128639fa4d1b124bc002f8df5138af 100644 --- a/python/paddle/fluid/dygraph/amp/auto_cast.py +++ b/python/paddle/fluid/dygraph/amp/auto_cast.py @@ -463,7 +463,7 @@ def amp_guard( tracer._amp_dtype = original_amp_dtype -class StateDictHook(object): +class StateDictHook: def __init__(self, save_dtype): self._save_dtype = save_dtype diff --git a/python/paddle/fluid/dygraph/amp/loss_scaler.py b/python/paddle/fluid/dygraph/amp/loss_scaler.py index c59588e9d03b611fe3240f6edca52b42df9d4e3f..0985237f516dac03d843cee687fbf8dfceb536c1 100644 --- a/python/paddle/fluid/dygraph/amp/loss_scaler.py +++ b/python/paddle/fluid/dygraph/amp/loss_scaler.py @@ -41,7 +41,7 @@ def _refresh_optimizer_state(): return {"state": OptimizerState.INIT} -class AmpScaler(object): +class AmpScaler: """ :api_attr: imperative diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py index 044f9055e7a391227815229fc2adae873e31aab7..5f082acca5d401e02ecdfb6a722b1ee23693a80b 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py @@ -200,7 +200,7 @@ class ForLoopTuplePreTransformer(BaseTransformer): return [assign_node] -class ForNodeVisitor(object): +class ForNodeVisitor: """ This class parses python for statement, get transformed 3 statement components of for node three key statements: diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py b/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py index 2ac1d6403c0f9b220c1196de18e6704dde1e9b0b..72b6cc98839d69c83e04af3c11852b826b84ae6f 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py @@ -60,7 +60,7 @@ translator_logger = TranslatorLogger() CONVERSION_OPTIONS = "An attribute for a function that indicates conversion flags of the function in dynamic-to-static." -class ConversionOptions(object): +class ConversionOptions: """ A container for conversion flags of a function in dynamic-to-static. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/error.py b/python/paddle/fluid/dygraph/dygraph_to_static/error.py index 7e05b5db891f3dd118398e05b304f21a74afdf80..f4a66100807fda87f46084c6d2b63e7f9ae41906 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/error.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/error.py @@ -153,7 +153,7 @@ class TraceBackFrameRange(OriginInfo): return msg + '\n'.join(self.source_code) -class SuggestionDict(object): +class SuggestionDict: def __init__(self): # {(keywords): (suggestions)} self.suggestion_dict = { @@ -174,7 +174,7 @@ class Dy2StKeyError(Exception): pass -class ErrorData(object): +class ErrorData: """ Error data attached to an exception which is raised in un-transformed code. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py b/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py index 039df7e84e433a3ac4c60c7558ebbac4d82f055f..cc77e05dad1174f43ee59af9f8d010d117d893be 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py @@ -30,7 +30,7 @@ from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code from paddle.fluid.dygraph.io import TranslatedLayer -class FunctionSpec(object): +class FunctionSpec: """ Wrapper class for a function for class method. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py b/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py index 6f73a23316d272aaf34ad3116133d139d1dae22b..7b004964a472ddf65e13b3fb3e06044202c2a9a5 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py @@ -36,7 +36,7 @@ def synchronized(func): return wrapper -class TranslatorLogger(object): +class TranslatorLogger: """ class for Logging and debugging during the tranformation from dygraph to static graph. The object of this class is a singleton. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py index 4bfc73a32565fa5a3bfc2fd04ee58c70a850bc24..d6ff463a70d7cd546d16c3defcfa1be82f2ce18f 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py @@ -27,7 +27,7 @@ except: from collections import Sequence -class Location(object): +class Location: """ Location information of source code. """ @@ -53,7 +53,7 @@ class Location(object): return (self.filepath, self.lineno) -class OriginInfo(object): +class OriginInfo: """ Original information of source code. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py index 6eb38302a5841454449c19a7a083772341e29fc0..bc371cc99c9174df08278cd66d7069af486a1ce9 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py @@ -45,7 +45,7 @@ from paddle.fluid.dygraph.amp.auto_cast import ( from paddle import _legacy_C_ops -class NestSequence(object): +class NestSequence: """ A wrapper class that easily to flatten and restore the nest structure of given sequence. @@ -108,7 +108,7 @@ class NestSequence(object): return self.__input_list[item] -class LazyInitialized(object): +class LazyInitialized: """ Descriptor to implement lazy initialization of property. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py index be7dc08403a8341d59a4c9bfc852bb8d1b9e7416..443242563132760b0135e8c8da09edffe31f71d0 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py @@ -68,7 +68,7 @@ __all__ = ['ProgramTranslator', 'convert_to_static'] MAX_TRACED_PROGRAM_COUNT = 10 -class FunctionCache(object): +class FunctionCache: """ Caches the transformed functions to avoid redundant conversions of the same function. """ @@ -158,7 +158,7 @@ def convert_to_static(function): return static_func -class CacheKey(object): +class CacheKey: """ Cached key for ProgramCache. """ @@ -280,7 +280,7 @@ def unwrap_decorators(func): return decorators, cur -class StaticFunction(object): +class StaticFunction: """ Wrapper class to Manage program conversion of decorated function. @@ -847,7 +847,7 @@ def _verify_init_in_dynamic_mode(class_instance): ) -class HookHelper(object): +class HookHelper: """ Only For converting pre/post hooks operation in outermost layer while jit.save. Because hooks in sublayer have been processed automatically. @@ -901,7 +901,7 @@ class HookHelper(object): return outputs -class ConcreteProgram(object): +class ConcreteProgram: __slots__ = [ 'inputs', @@ -1037,7 +1037,7 @@ def _extract_indeed_params_buffers(class_instance): return params + buffers -class ProgramCache(object): +class ProgramCache: """ Wrapper class for the program functions defined by dygraph function. """ @@ -1119,7 +1119,7 @@ def synchronized(func): return lock_func -class ProgramTranslator(object): +class ProgramTranslator: """ Class to translate dygraph function into static graph function. The object of this class is a singleton. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py b/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py index bf07523e12dba245fd0a821d19e02c6f17d1a30f..5b6c3d1261c840100667a109f235f386529d1811 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py @@ -25,7 +25,7 @@ from .utils import ( __all__ = ['AstNodeWrapper', 'NodeVarType', 'StaticAnalysisVisitor'] -class NodeVarType(object): +class NodeVarType: """ Enum class of python variable types. We have to know some variable types during compile time to transfer AST. For example, a string variable and a @@ -112,7 +112,7 @@ class NodeVarType(object): return NodeVarType.UNKNOWN -class AstNodeWrapper(object): +class AstNodeWrapper: """ Wrapper for python gast.node. We need a node wrapper because gast.node doesn't store all required information when we are transforming AST. @@ -127,7 +127,7 @@ class AstNodeWrapper(object): self.node_var_type = {NodeVarType.UNKNOWN} -class AstVarScope(object): +class AstVarScope: """ AstVarScope is a class holding the map from current scope variable to its type. @@ -181,7 +181,7 @@ class AstVarScope(object): return self.parent_scope.get_var_type(var_name) -class AstVarEnv(object): +class AstVarEnv: """ A class maintains scopes and mapping from name strings to type. """ @@ -231,7 +231,7 @@ class AstVarEnv(object): return cur_scope_dict -class StaticAnalysisVisitor(object): +class StaticAnalysisVisitor: """ A class that does static analysis """ diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index 383fb1eff5d4509ff566099ed71b88c03ab38481..eca171cacd3300e1e8863d507f7bab1913d18b41 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -325,7 +325,7 @@ def _change_is_test_status(program_desc, is_test): op._set_attr('is_test', is_test) -class _ProgramHolder(object): +class _ProgramHolder: """ Holds the execution information of a Program. diff --git a/python/paddle/fluid/dygraph/jit.py b/python/paddle/fluid/dygraph/jit.py index 9936b4a23ac92e87402ebe721296570154773f38..c359c6d152f9efbe46c06b312a85e6e6eb3d4f21 100644 --- a/python/paddle/fluid/dygraph/jit.py +++ b/python/paddle/fluid/dygraph/jit.py @@ -334,7 +334,7 @@ def not_to_static(func=None): return func -class _SaveLoadConfig(object): +class _SaveLoadConfig: def __init__(self): self._output_spec = None self._model_filename = None @@ -621,7 +621,7 @@ _save_pre_hooks_lock = threading.Lock() _save_pre_hooks = [] -class HookRemoveHelper(object): +class HookRemoveHelper: """A HookRemoveHelper that can be used to remove hook.""" def __init__(self, hook): @@ -1496,7 +1496,7 @@ def _trace( return original_outputs, program, feed_names, fetch_names, parameters -class TracedLayer(object): +class TracedLayer: """ :api_attr: imperative diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 5187f9ae7229528f974a3d25758f3eee73480ffb..752694b614af70a9317fa9cdbdb66002264cb0c5 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -89,7 +89,7 @@ def _addindent(string, indent): return s1[0] + '\n' + '\n'.join(s2) -class HookRemoveHelper(object): +class HookRemoveHelper: """A HookRemoveHelper that can be used to remove hook.""" next_hook_id = 0 @@ -105,7 +105,7 @@ class HookRemoveHelper(object): del hooks[self._hook_id] -class Layer(object): +class Layer: """ Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on. diff --git a/python/paddle/fluid/dygraph/learning_rate_scheduler.py b/python/paddle/fluid/dygraph/learning_rate_scheduler.py index 3ca63f505cb9a8b6538ee6c18836fd096a1ef411..3afe92cbc62342aa9e34d96fdcb53e22afbc413b 100644 --- a/python/paddle/fluid/dygraph/learning_rate_scheduler.py +++ b/python/paddle/fluid/dygraph/learning_rate_scheduler.py @@ -35,7 +35,7 @@ __all__ = [ ] -class LearningRateDecay(object): +class LearningRateDecay: """ Base class of learning rate decay diff --git a/python/paddle/fluid/dygraph/parallel.py b/python/paddle/fluid/dygraph/parallel.py index a317cf676e4aeddbd454fa941f24d524f4d0ff77..85c95c6b2b3e68b60cd1cc01ecaba5fa42964616 100644 --- a/python/paddle/fluid/dygraph/parallel.py +++ b/python/paddle/fluid/dygraph/parallel.py @@ -82,7 +82,7 @@ def prepare_context(strategy=None): return strategy -class ParallelEnv(object): +class ParallelEnv: """ .. note:: This API is not recommended, if you need to get rank and world_size, diff --git a/python/paddle/fluid/dygraph/static_runner.py b/python/paddle/fluid/dygraph/static_runner.py index afa2091caf7b03fd5b95ff6ab1fb52f883fb5569..c0a61fb3c2419e3ad10cf1a738bb081d21ff7c13 100644 --- a/python/paddle/fluid/dygraph/static_runner.py +++ b/python/paddle/fluid/dygraph/static_runner.py @@ -18,7 +18,7 @@ from paddle.fluid.dygraph.io import TranslatedLayer # NOTE: This class will be deprecated later. # It is kept here because PaddleHub is already using this API. -class StaticModelRunner(object): +class StaticModelRunner: """ A Dynamic graph Layer for loading inference program and related parameters, and then performing fine-tune training or inference. diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 51fb34a24e657dc830c5c3c3b41e566489611fac..7c7aa964cf84d5b2e1ee62a52cc04c0d8e0feda5 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -44,7 +44,7 @@ from paddle import _C_ops, _legacy_C_ops _grad_scalar = None -class TensorHookRemoveHelper(object): +class TensorHookRemoveHelper: """ A helper class that for removing Tensor gradient's hook. NOTE(wuweilong):the operation weakref.ref(tensor) will cause some unexpected errors in eager mode. diff --git a/python/paddle/fluid/entry_attr.py b/python/paddle/fluid/entry_attr.py index a86f81e6dd997be68399f2186587b225306c4d78..29ab37a3f3f4cf5c57e4dbb8b96e973efea7a382 100644 --- a/python/paddle/fluid/entry_attr.py +++ b/python/paddle/fluid/entry_attr.py @@ -15,7 +15,7 @@ __all__ = ['ProbabilityEntry', 'CountFilterEntry'] -class EntryAttr(object): +class EntryAttr: """ Examples: .. code-block:: python diff --git a/python/paddle/fluid/evaluator.py b/python/paddle/fluid/evaluator.py index a9d0c3c0594aec052cb41b72f0f9f1ed0ef11d8e..007337b9d0404fa264f7627ced35b5a3980a4701 100644 --- a/python/paddle/fluid/evaluator.py +++ b/python/paddle/fluid/evaluator.py @@ -41,7 +41,7 @@ def _clone_var_(block, var): ) -class Evaluator(object): +class Evaluator: """ Warning: better to use the fluid.metrics.* things, more flexible support via pure Python and Operator, and decoupled diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 2dbf2d5cedafcb440bf275c616614fe85883686e..41440cfdb0e1a9408a03902a1bce1d29b4670c48 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -620,7 +620,7 @@ def _as_lodtensor(data, place, dtype=None): return tensor -class FetchHandler(object): +class FetchHandler: def __init__(self, var_dict=None, period_secs=60): assert var_dict is not None self.var_dict = var_dict @@ -648,7 +648,7 @@ handler = FetchHandlerExample(var_dict=var_dict) ) -class _StandaloneExecutor(object): +class _StandaloneExecutor: def __init__(self, place, main_program, scope): self._place = core.Place() self._place.set_place(place) @@ -736,8 +736,8 @@ class _StandaloneExecutor(object): return res -class _ExecutorCache(object): - class _CachedData(object): +class _ExecutorCache: + class _CachedData: def __init__( self, program, @@ -908,7 +908,7 @@ class _ExecutorCache(object): return new_program, new_exe -class Executor(object): +class Executor: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index e68286844ed6769464a6b740fec5920c55d76603..9f30a4e08a31fcf0b2167af577b20d9a99c5da7f 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1077,7 +1077,7 @@ def mlu_places(device_ids=None): return [core.MLUPlace(dev_id) for dev_id in device_ids] -class NameScope(object): +class NameScope: def __init__(self, name="", parent=None): self._children = dict() self._name = name @@ -2655,7 +2655,7 @@ def get_all_op_protos(): return ret_values -class OpProtoHolder(object): +class OpProtoHolder: """ A global variable to hold all OpProtos from C++ as a map """ @@ -2709,7 +2709,7 @@ class OpProtoHolder(object): } -class Operator(object): +class Operator: """ In Fluid, all the operation are represented by Operator, and Operator is regarded as a build in an instruction of a Block. Users can use the @@ -3550,7 +3550,7 @@ class Operator(object): self.desc.dist_attr = dist_attr -class Block(object): +class Block: """ In Fluid, a Program is consistence of multi-Block, and Block stores VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name. @@ -4342,7 +4342,7 @@ def _apply_pass( return attrs -class IrNode(object): +class IrNode: """ Python IrNode. Beneath it is a core.Node, which is used for Ir Pass. """ @@ -4783,7 +4783,7 @@ class IrOpNode(IrNode): return [IrVarNode(n) for n in self.node.outputs] -class IrGraph(object): +class IrGraph: """ Python IrGraph. Beneath it is a core.Graph, which is used for creating a c++ Ir Pass Graph. An IrGraph is just a graph view of @@ -5218,7 +5218,7 @@ class IrGraph(object): desc._set_attr(name, val) -class Program(object): +class Program: """ Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the control flow op like conditional_block, while :ref:`api_paddle_fluid_layers_While` is included, diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index b7ef1c7531fc00fd47167303500e64adf997f395..0f9bc17edfbf3ef863070593a6e3eadb168fa510 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -25,7 +25,7 @@ def crepr(v): return str(v) -class Rank(object): +class Rank: def __init__(self, kind, name, priority): ''' kind: str @@ -49,7 +49,7 @@ class Rank(object): ) -class Graph(object): +class Graph: rank_counter = 0 def __init__(self, title, **attrs): @@ -145,7 +145,7 @@ class Graph(object): return '\n'.join(reprs) -class Node(object): +class Node: counter = 1 def __init__(self, label, prefix, description="", **attrs): @@ -170,7 +170,7 @@ class Node(object): return reprs -class Edge(object): +class Edge: def __init__(self, source, target, **attrs): ''' Link source to target. @@ -199,7 +199,7 @@ class Edge(object): return repr -class GraphPreviewGenerator(object): +class GraphPreviewGenerator: ''' Generate a graph image for ONNX proto. ''' diff --git a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py index 5fa0ed085b1870c1cc6c6d62d8eedb5b753aa7bd..33c95f03b1f5a981848166a0140b50bb79cb5e83 100644 --- a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py @@ -69,7 +69,7 @@ def _thread_checker(): ), "auto checkpoint must run under main thread" -class AutoCheckpointChecker(object): +class AutoCheckpointChecker: def __init__(self): self._run_env = None self._platform = None diff --git a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py index 21e305afc16bca0dba630113f17ff17fcd6577ee..79161fe0fa0203165b8611c59493765ac988e53e 100644 --- a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py +++ b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py @@ -15,7 +15,7 @@ from ...compiler import CompiledProgram -class SerializableBase(object): +class SerializableBase: def serialize(self, path): raise NotImplementedError @@ -54,7 +54,7 @@ class PaddleModel(SerializableBase): ) -class CheckpointSaver(object): +class CheckpointSaver: def __init__(self, fs): self._fs = fs self._checkpoint_prefix = "__paddle_checkpoint__" diff --git a/python/paddle/fluid/incubate/data_generator/__init__.py b/python/paddle/fluid/incubate/data_generator/__init__.py index 4729f44f2b12c65d16da3b69dd205a65adda3fcb..3e66b75e28faf82607d3baa6477c9364d2e6c096 100644 --- a/python/paddle/fluid/incubate/data_generator/__init__.py +++ b/python/paddle/fluid/incubate/data_generator/__init__.py @@ -18,7 +18,7 @@ import sys __all__ = ['MultiSlotDataGenerator', 'MultiSlotStringDataGenerator'] -class DataGenerator(object): +class DataGenerator: """ DataGenerator is a general Base class for user to inherit A user who wants to define his/her own python processing logic diff --git a/python/paddle/fluid/incubate/fleet/base/fleet_base.py b/python/paddle/fluid/incubate/fleet/base/fleet_base.py index ea03e9305605a19d8ad792180ef297f66784bba0..61b7fe7de50625fabbf1848fec2e96072d8d417e 100644 --- a/python/paddle/fluid/incubate/fleet/base/fleet_base.py +++ b/python/paddle/fluid/incubate/fleet/base/fleet_base.py @@ -30,7 +30,7 @@ __all__ = ['Fleet', 'DistributedOptimizer'] __all__ += mode.__all__ -class Fleet(object): +class Fleet: """ Fleet is the base class, transpiler and pslib are implementation of Fleet. @@ -268,7 +268,7 @@ class Fleet(object): pass -class DistributedOptimizer(object): +class DistributedOptimizer: """ DistributedOptimizer is a wrapper for paddle.fluid.optimizer A user should pass a paddle.fluid.optimizer to DistributedOptimizer diff --git a/python/paddle/fluid/incubate/fleet/base/role_maker.py b/python/paddle/fluid/incubate/fleet/base/role_maker.py index d0e27dc8bdc83c36c39c89c116319a72ae9aacec..3f639d7753b13ac2b0e8e1a3aca8f68120d49d88 100644 --- a/python/paddle/fluid/incubate/fleet/base/role_maker.py +++ b/python/paddle/fluid/incubate/fleet/base/role_maker.py @@ -35,7 +35,7 @@ class Role: XPU = 3 -class MockBarrier(object): +class MockBarrier: """ MockBarrier is a empty impletation for barrier mock as a real barrier for never-barrier in a specific scenario @@ -70,7 +70,7 @@ class MockBarrier(object): return [obj] -class RoleMakerBase(object): +class RoleMakerBase: """ RoleMakerBase is a base class for assigning a role to current process in distributed training. diff --git a/python/paddle/fluid/incubate/fleet/collective/__init__.py b/python/paddle/fluid/incubate/fleet/collective/__init__.py index 51900ccbd90dd023114de50325ce4585e5030457..949ef93a472a30a0c85a38e46edbbaa05befdc81 100644 --- a/python/paddle/fluid/incubate/fleet/collective/__init__.py +++ b/python/paddle/fluid/incubate/fleet/collective/__init__.py @@ -40,12 +40,12 @@ import re import shutil -class LambConfig(object): +class LambConfig: def __init__(self): pass -class DistFCConfig(object): +class DistFCConfig: def __init__(self): pass diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py index d2d32d4529efba95c92b0dc31e7ba38c6cfb2bbf..0958dafd3c2f88ce37049cfbe61624688d79922f 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py @@ -31,7 +31,7 @@ from paddle.fluid.transpiler.distribute_transpiler import ( from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode -class TrainerRuntimeConfig(object): +class TrainerRuntimeConfig: def __init__(self): self.mode = None num_threads = os.getenv("CPU_NUM", "1") @@ -160,7 +160,7 @@ class TrainerRuntimeConfig(object): return self.display(self.get_communicator_flags()) -class PSLibRuntimeConfig(object): +class PSLibRuntimeConfig: def __init__(self): self.runtime_configs = {} @@ -168,7 +168,7 @@ class PSLibRuntimeConfig(object): return self.runtime_configs -class DistributedStrategy(object): +class DistributedStrategy: def __init__(self): self._program_config = DistributeTranspilerConfig() self._trainer_runtime_config = TrainerRuntimeConfig() @@ -456,7 +456,7 @@ class GeoStrategy(DistributedStrategy): self._build_strategy.async_mode = True -class StrategyFactory(object): +class StrategyFactory: def __init_(self): pass diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py index 441246879dfcf23c83badda6c347092fe4d7a3ca..4155413cb59f9b4745522ef86fe56aa83e7ebb75 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py @@ -13,7 +13,7 @@ # limitations under the License. -class PSDispatcher(object): +class PSDispatcher: """ PSDispatcher is the base class for dispatching vars into different pserver instance. diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py index 865ffd2e0fd0c4c1184b56b844c4afbb25292d6c..53460cf036ff217892a504f00831ce2b2686a56f 100755 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py @@ -135,7 +135,7 @@ def Singleton(cls): @Singleton -class CompileTimeStrategy(object): +class CompileTimeStrategy: def __init__(self, main_program, startup_program, strategy, role_maker): self.min_block_size = 81920 diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py index fef6f24570c17bfc28dc87d699891d84c292a59e..f6a05cbf86c6b938e83dd4d4f7c7476fdcb5c1e6 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py @@ -13,7 +13,7 @@ # limitations under the License. -class UnionFind(object): +class UnionFind: """Union-find data structure. Union-find is a data structure that keeps track of a set of elements partitioned diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py index 745c05d986a6e66dbf4dce6089dbbe44d05cc84c..e32b1253d4f33c16105653cf4f4188549b4c361f 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py @@ -52,7 +52,7 @@ def create_var_struct(var): ) -class VarStruct(object): +class VarStruct: """ record part properties of a Variable in python. """ @@ -80,7 +80,7 @@ class VarStruct(object): ) -class VarDistributed(object): +class VarDistributed: """ a class to record the var distributed on parameter servers. the class will record the relationship between origin var and slice var. @@ -187,7 +187,7 @@ class VarDistributed(object): ) -class VarsDistributed(object): +class VarsDistributed: """ a gather about VarDistributed with many methods to find distributed vars. through the class, we can get overview about the distributed parameters on parameter servers. diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py index c9942581e011305acfad9650044c3350dd504348..8e5f39415feeedbaa50bdff8590e9e30f1d5e22b 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py @@ -1027,7 +1027,7 @@ def _fleet_embedding_v2( ) -class fleet_embedding(object): +class fleet_embedding: """ fleet embedding class, it is used as a wrapper Example: diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py index d1c9fae5ccbeb368de74a6dd788995b2af0e3a28..73fcd18bdb7b732d280bd98087d65a5b44716c48 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py @@ -18,7 +18,7 @@ from . import ps_pb2 as pslib from functools import reduce -class Server(object): +class Server: """ A Server basic class it's a base class, does not have implementation @@ -28,7 +28,7 @@ class Server(object): pass -class Worker(object): +class Worker: """ A Worker basic class. it's a base class, does not have implementation diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py index b55431ba0f8a6923179c19ee32cb87738044a6bd..db5b90374ca324cc91c4dd1e0b3763141d18a519 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py @@ -54,7 +54,7 @@ ch.setFormatter(formatter) logger.addHandler(ch) -class DistributedOptimizerImplBase(object): +class DistributedOptimizerImplBase: """ DistributedOptimizerImplBase base class of optimizers diff --git a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py index 8218d941729ee535a6bd9a05d7f78f6fddf525d5..8d5203f201d084960e2c5098b97fef0b631c01b2 100644 --- a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py +++ b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py @@ -39,7 +39,7 @@ _logger = get_logger( fleet = None -class FleetUtil(object): +class FleetUtil: """ FleetUtil provides some common functions for users' convenience. diff --git a/python/paddle/fluid/incubate/fleet/utils/http_server.py b/python/paddle/fluid/incubate/fleet/utils/http_server.py index 10e15dad0d804384321b20553724005906989bdf..79b436ff40c8323de4bfe29324ab3b7de94a25ff 100644 --- a/python/paddle/fluid/incubate/fleet/utils/http_server.py +++ b/python/paddle/fluid/incubate/fleet/utils/http_server.py @@ -126,7 +126,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): self.end_headers() -class KVHTTPServer(HTTPServer, object): +class KVHTTPServer(HTTPServer): """ it is a http server storing kv pairs. """ diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index bb162573e3a80d6b1be6d8db3f791c6ed10ddd8a..b0c573936b8160fdf0446c6235f87666a8a6bb09 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -55,7 +55,7 @@ _global_weight_initializer_ = None _global_bias_initializer_ = None -class Initializer(object): +class Initializer: """Base class for variable initializers Defines the common interface of variable initializers. diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 789c9d0e3187be14a91df262c99b49f15f4d21dd..a308d5e261393424b71b1979769e58ea80ffb2f2 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -87,7 +87,7 @@ _logger = get_logger( ) -class _open_buffer(object): +class _open_buffer: def __init__(self, buffer): self.buffer = buffer diff --git a/python/paddle/fluid/ir.py b/python/paddle/fluid/ir.py index 7dbe815b5e0e01e987eea838302212818e1b1cb6..fb077ed8b5f0dd46a7c3f8d3a97fa97a621c751e 100644 --- a/python/paddle/fluid/ir.py +++ b/python/paddle/fluid/ir.py @@ -140,7 +140,7 @@ def apply_build_strategy( return build_strategy -class RegisterPassHelper(object): +class RegisterPassHelper: _register_helpers = list() def __init__(self, pass_pairs, pass_type=str(), input_specs=dict()): @@ -286,8 +286,8 @@ class RegisterPassHelper(object): return multi_pass_desc.SerializeToString() -class PassDesc(object): - class AttrHelper(object): +class PassDesc: + class AttrHelper: def __init__(self, obj, name, element_index=None): self._obj = obj self._name = name @@ -422,7 +422,7 @@ class PassDesc(object): self._attrs[name] = attr return attr - class OpHelper(object): + class OpHelper: def __init__(self, type=None): self._type = type diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index 1ea8d504add89abd78ba385cd161474ac3bc7bb7..91ec751cc282cc0341e22150103f5710dc320658 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -31,7 +31,7 @@ from .initializer import _global_weight_initializer, _global_bias_initializer __all__ = ['LayerHelperBase'] -class LayerHelperBase(object): +class LayerHelperBase: # global dtype __dtype = "float32" diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 13934b76afd307a0c9aee669cd8628e7df81ea2b..91065258a13ad89808c607928fac6911b36946b6 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -536,7 +536,7 @@ def Assert(cond, data=None, summarize=20, name=None): return op -class BlockGuard(object): +class BlockGuard: """ BlockGuard class. @@ -584,7 +584,7 @@ class BlockGuardWithCompletion(BlockGuard): return super().__exit__(exc_type, exc_val, exc_tb) -class StaticRNNMemoryLink(object): +class StaticRNNMemoryLink: """ StaticRNNMemoryLink class. @@ -607,7 +607,7 @@ class StaticRNNMemoryLink(object): self.mem = mem -class StaticRNN(object): +class StaticRNN: """ :api_attr: Static Graph @@ -1179,7 +1179,7 @@ def get_inputs_outputs_in_block( return inner_inputs, inner_outputs -class While(object): +class While: """ :api_attr: Static Graph @@ -2463,7 +2463,7 @@ class ConditionalBlockGuard(BlockGuard): return super().__exit__(exc_type, exc_val, exc_tb) -class ConditionalBlock(object): +class ConditionalBlock: ''' **ConditionalBlock** @@ -3173,7 +3173,7 @@ def case(pred_fn_pairs, default=None, name=None): return final_fn() -class Switch(object): +class Switch: """ :api_attr: Static Graph @@ -3303,7 +3303,7 @@ class Switch(object): return True -class IfElseBlockGuard(object): +class IfElseBlockGuard: def __init__(self, is_true, ifelse): if not isinstance(ifelse, IfElse): raise TypeError("ifelse must be an instance of IfElse class") @@ -3340,7 +3340,7 @@ class IfElseBlockGuard(object): self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS -class IfElse(object): +class IfElse: """ :api_attr: Static Graph @@ -3530,7 +3530,7 @@ class IfElse(object): return rlist -class DynamicRNN(object): +class DynamicRNN: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/layers/distributions.py b/python/paddle/fluid/layers/distributions.py index 174b2f8e36c08dc65f8641f27dd403661a4318b5..bd0b39caf0d9e5094129319c224c39ad254138b0 100644 --- a/python/paddle/fluid/layers/distributions.py +++ b/python/paddle/fluid/layers/distributions.py @@ -31,7 +31,7 @@ from ..data_feeder import ( __all__ = ['Uniform', 'Normal', 'Categorical', 'MultivariateNormalDiag'] -class Distribution(object): +class Distribution: """ Distribution is the abstract base class for probability distributions. """ diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index e4f765dcc663056cd7b59907aaa116ebd2e5f4c1..1dcc07a20d06c0beab05d8d0d1316543e50f3269 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -172,7 +172,7 @@ class BlockGuardServ(BlockGuard): return super().__exit__(exc_type, exc_val, exc_tb) -class ListenAndServ(object): +class ListenAndServ: """ **ListenAndServ Layer** diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 5623413af01fedd994438094c44932931b67a644..57e8a24e0dc75311703d417ad6c247f870679629 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -14771,7 +14771,7 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): ) -class PyFuncRegistry(object): +class PyFuncRegistry: _register_funcs = [] def __init__(self, func): diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index 797272510f0c128409e51c77489e8a7ca09c6f77..5cfc6e37ce1cc5a8f2b7b6140dda43efa190f1bf 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -62,7 +62,7 @@ __all__ = [ ] -class RNNCell(object): +class RNNCell: """ :api_attr: Static Graph @@ -160,7 +160,7 @@ class RNNCell(object): return True return isinstance(seq, Sequence) and not isinstance(seq, str) - class Shape(object): + class Shape: def __init__(self, shape): self.shape = shape if shape[0] == -1 else ([-1] + list(shape)) @@ -544,7 +544,7 @@ def rnn( ) -class ArrayWrapper(object): +class ArrayWrapper: def __init__(self, x): self.array = [x] @@ -823,7 +823,7 @@ def birnn( return outputs, final_states -class Decoder(object): +class Decoder: """ :api_attr: Static Graph @@ -1869,7 +1869,7 @@ def dynamic_decode( ) -class DecodeHelper(object): +class DecodeHelper: """ DecodeHelper is the base class for any helper instance used in `BasicDecoder`. It provides interface to implement sampling and produce inputs for the next diff --git a/python/paddle/fluid/lazy_init.py b/python/paddle/fluid/lazy_init.py index d2118259d03cf902d687199ba272b0501533507a..6242ad2c4eded0de908913630a11d6351a7c65a0 100644 --- a/python/paddle/fluid/lazy_init.py +++ b/python/paddle/fluid/lazy_init.py @@ -17,7 +17,7 @@ from . import framework __all__ = ["LazyGuard"] -class LazyInitHelper(object): +class LazyInitHelper: """ A Helper Context to trigger switching mode between dygraph and static mode, and holds the startup program resource. @@ -88,7 +88,7 @@ def lazy_init_helper(): return _lazy_init_helper -class LazyGuard(object): +class LazyGuard: """ LazyGuard is a wrapper interface for nn.Layer, it forwards the construct process of user defined Layer. Meanwhile, it provides necessary API to diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index 1f5d941dc0f66ba2f061dd4b2cd3c32492a7059e..5776b4efdf3b905bcc8a5d5bc1ec7d819d14ceea 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -55,7 +55,7 @@ def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) -class MetricBase(object): +class MetricBase: """ In many cases, we usually have to split the test data into mini-batches for evaluating deep neural networks, therefore we need to collect the evaluation results of each @@ -818,7 +818,7 @@ class Auc(MetricBase): ) -class DetectionMAP(object): +class DetectionMAP: """ Calculate the detection mean average precision (mAP). diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index c8b118127cc13b6a3b017716151f68c4b36a82ed..515a5f8e770a14a04d3c165990e34c86c9fe3f1c 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -35,7 +35,7 @@ def is_str(s): return isinstance(s, str) -class OpDescCreationMethod(object): +class OpDescCreationMethod: """ Convert the user's input(only keyword arguments are supported) to OpDesc based on the OpProto. @@ -181,7 +181,7 @@ class OpDescCreationMethod(object): return False -class OpInfo(object): +class OpInfo: def __init__(self, name, method, inputs, outputs, attrs, extra_attrs): self.name = name self.method = method @@ -213,7 +213,7 @@ def create_op_creation_method(op_proto): ) -class OperatorFactory(object): +class OperatorFactory: def __init__(self): self.op_methods = dict() @@ -266,7 +266,7 @@ class OperatorFactory(object): return self.get_op_info(type).extra_attrs -class __RecurrentOp__(object): +class __RecurrentOp__: __proto__ = None type = "recurrent" @@ -287,7 +287,7 @@ class __RecurrentOp__(object): return core.RecurrentOp.create(proto.SerializeToString()) -class __DynamicRecurrentOp__(object): +class __DynamicRecurrentOp__: __proto__ = None type = "dynamic_recurrent" @@ -308,7 +308,7 @@ class __DynamicRecurrentOp__(object): return core.DynamicRecurrentOp.create(proto.SerializeToString()) -class __CondOp__(object): +class __CondOp__: __proto__ = None type = "cond" diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 9f361827f06036b83cefcda737cd82e28f98a199..7eaa38636c2529fb2c0098443bb24c6ebf7dd9b6 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -99,7 +99,7 @@ __all__ = [ ] -class Optimizer(object): +class Optimizer: """Optimizer Base class. Define the common interface of an optimizer. @@ -4617,7 +4617,7 @@ class ModelAverage(Optimizer): executor.run(self.restore_program) -class ExponentialMovingAverage(object): +class ExponentialMovingAverage: r""" :api_attr: Static Graph @@ -4877,7 +4877,7 @@ class ExponentialMovingAverage(object): executor.run(self.restore_program) -class PipelineOptimizer(object): +class PipelineOptimizer: """ :api_attr: Static Graph @@ -7600,7 +7600,7 @@ class RecomputeOptimizer(Optimizer): return optimize_ops, params_grads -class LookaheadOptimizer(object): +class LookaheadOptimizer: r""" :api_attr: Static Graph @@ -7780,7 +7780,7 @@ class LookaheadOptimizer(object): return mini_out -class GradientMergeOptimizer(object): +class GradientMergeOptimizer: """ Gradient Merge, also called as Gradient Accumulation, is a training strategy for larger batches. With this strategy, diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index c0a45b041e6a5d65c1deb18567d928dd72902236..269172ea994e22ff6a6d34da7d97bfd22f113311 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -25,7 +25,7 @@ ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy BuildStrategy = core.ParallelExecutor.BuildStrategy -class ParallelExecutor(object): +class ParallelExecutor: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index e1c8568d93a1eee655a4981ee58850bc21f53bc8..f251a654a992b58fab2beec6bb6ee0ba1817db37 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -22,7 +22,7 @@ __all__ = [ ] -class ParamAttr(object): +class ParamAttr: """ Note: diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 83aedd052a838e2de8c80a3d973d02333ba29d8e..2ad3e5903aab55e99c337bc234e2a869c87b9ca1 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -145,7 +145,7 @@ def _reader_process_loop(batch_reader, data_queue): raise -class DataLoaderBase(object): +class DataLoaderBase: def __init__(self): self._places = None @@ -181,7 +181,7 @@ class DataLoaderBase(object): return arr -class AuToTune(object): +class AuToTune: def __init__(self, loader): self.loader = loader self.max_num_worker = multiprocessing.cpu_count() / 2 @@ -318,7 +318,7 @@ class AuToTune(object): return best_workers -class DataLoader(object): +class DataLoader: """ DataLoader prodives an iterator which iterates given dataset once by the batch_sampler. diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 60112d182e24f5b9338f1760979b365a6555aaf9..1152f0bbd86b6aafeda6ed4f0def651d702ba455 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -22,7 +22,7 @@ from paddle import _C_ops, _legacy_C_ops __all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer'] -class WeightDecayRegularizer(object): +class WeightDecayRegularizer: """Base class for weight decay regularizers Defines the common interface of weight-decay regularizers. diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py index 0c6bf6e9deccef5df5341d260c1cc7afbab3f886..20e4b8312d08df22af03a7d795170411d47c07e3 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py @@ -17,7 +17,7 @@ import paddle from paddle.distributed import fleet -class TestNewGroupAPI(object): +class TestNewGroupAPI: def __init__(self): paddle.distributed.init_parallel_env() topo = fleet.CommunicateTopology( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py b/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py index 9fa469ea5e234aff2e60b3d5019ac544a9e82aad..40924c66bb4279981442f98024beebcb5e700100 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py @@ -16,7 +16,7 @@ import numpy as np import paddle -class TestNewGroupAPI(object): +class TestNewGroupAPI: def __init__(self): paddle.distributed.init_parallel_env() d1 = np.array([1, 2, 3]) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py index c4c10f75a610a0bbf29b8ecb61f7757eb0b83fb9..f5b5903831b24170fe9674c3e905d29db086b3f9 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py @@ -36,7 +36,7 @@ Note(chenweihang): To compare loss of single-card and multi-card """ -class TrainTaskConfig(object): +class TrainTaskConfig: """ TrainTaskConfig """ @@ -61,7 +61,7 @@ class TrainTaskConfig(object): label_smooth_eps = 0.1 -class ModelHyperParams(object): +class ModelHyperParams: # These following five vocabularies related configurations will be set # automatically according to the passed vocabulary path and special tokens. # size of source word dictionary. diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py index 798847ed3e4a11d609b3542d03433ec44424fd70..c2ac727156420a93809edf36a29d67acdd55b6eb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py @@ -612,7 +612,7 @@ class TestAmpDecorator(unittest.TestCase): def test_input_type_exception(self): def test_error_model(): - class MyModel(object): + class MyModel: def __init__(self): print("A fake Model") @@ -631,7 +631,7 @@ class TestAmpDecorator(unittest.TestCase): self.assertRaises(RuntimeError, test_error_distributed_model) def test_error_optimizer(): - class MyOptimizer(object): + class MyOptimizer: def __init__(self): print("A fake Optimizer") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 7957cceb7b8a4c58080940782505829d5707ee78..e2955483935afc71496c1350cd6a415a0a1dfe69 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -611,7 +611,7 @@ class TestAmpDecorator(unittest.TestCase): def test_input_type_exception(self): def test_error_model(): - class MyModel(object): + class MyModel: def __init__(self): print("A fake Model") @@ -630,7 +630,7 @@ class TestAmpDecorator(unittest.TestCase): self.assertRaises(RuntimeError, test_error_distributed_model) def test_error_optimizer(): - class MyOptimizer(object): + class MyOptimizer: def __init__(self): print("A fake Optimizer") diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py b/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py index 868585f81a0bede0809ac4a13336be3146b17dc7..786b81a06e9b58c4169a421fb0d54169eb91e755 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py @@ -19,7 +19,7 @@ import subprocess import tempfile -class TestCollectiveAPIRunnerBase(object): +class TestCollectiveAPIRunnerBase: def check_pass(self, *args, **kwargs): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py b/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py index 5cc96cf1c4e58cc757b9baf61f14e012eb4ea2f0..88d1327eaf390e57af1763355af2ae37dd963c64 100644 --- a/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py +++ b/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py @@ -60,7 +60,7 @@ def load_lr_input_record(sent): return res -class CtrReader(object): +class CtrReader: def __init__(self): pass diff --git a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py index fafb5f19c5f7d062759a84e3cebed4b5e5a4bcbd..2c6fa74b363f37cab557de76776013928b9148f4 100644 --- a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py +++ b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py @@ -108,7 +108,7 @@ def load_lr_input_record(sent): feeding_index = {'dnn_input': 0, 'lr_input': 1, 'click': 2} -class Dataset(object): +class Dataset: def train(self): ''' Load trainset. diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index 4765ee7d82bf53c09f80c9087cc9700801484b87..514fcf4b869be2e3a92cc4e6eb80bdd7d28d70b9 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -36,7 +36,7 @@ fluid.default_main_program().random_seed = 1 # from transformer_config import ModelHyperParams, TrainTaskConfig, merge_cfg_from_list -class TrainTaskConfig(object): +class TrainTaskConfig: # only support GPU currently use_gpu = True # the epoch number to train. @@ -88,7 +88,7 @@ class TrainTaskConfig(object): use_token_batch = False -class InferTaskConfig(object): +class InferTaskConfig: use_gpu = True # the number of examples in one run for sequence generation. batch_size = 10 @@ -105,7 +105,7 @@ class InferTaskConfig(object): model_path = "trained_models/pass_1.infer.model" -class ModelHyperParams(object): +class ModelHyperParams: # These following five vocabularies related configurations will be set # automatically according to the passed vocabulary path and special tokens. # size of source word dictionary. @@ -268,7 +268,7 @@ fast_decoder_data_input_fields = ( # from optim import LearningRateScheduler -class LearningRateScheduler(object): +class LearningRateScheduler: """ Wrapper for learning rate scheduling as described in the Transformer paper. LearningRateScheduler adapts the learning rate externally and the adapted @@ -714,13 +714,13 @@ def train_loop( # import transformer_reader as reader -class SortType(object): +class SortType: GLOBAL = 'global' POOL = 'pool' NONE = "none" -class Converter(object): +class Converter: def __init__(self, vocab, beg, end, unk, delimiter): self._vocab = vocab self._beg = beg @@ -739,7 +739,7 @@ class Converter(object): ) -class ComposedConverter(object): +class ComposedConverter: def __init__(self, converters): self._converters = converters @@ -750,7 +750,7 @@ class ComposedConverter(object): ] -class SentenceBatchCreator(object): +class SentenceBatchCreator: def __init__(self, batch_size): self.batch = [] self._batch_size = batch_size @@ -763,7 +763,7 @@ class SentenceBatchCreator(object): return tmp -class TokenBatchCreator(object): +class TokenBatchCreator: def __init__(self, batch_size): self.batch = [] self.max_len = -1 @@ -782,14 +782,14 @@ class TokenBatchCreator(object): self.batch.append(info) -class SampleInfo(object): +class SampleInfo: def __init__(self, i, max_len, min_len): self.i = i self.min_len = min_len self.max_len = max_len -class MinMaxFilter(object): +class MinMaxFilter: def __init__(self, max_len, min_len, underlying_creator): self._min_len = min_len self._max_len = max_len @@ -806,7 +806,7 @@ class MinMaxFilter(object): return self._creator.batch -class DataReader(object): +class DataReader: """ The data reader loads all data from files and produces batches of data in the way corresponding to settings. diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py index 0cc22162d202c10b5cc4d889a017cf2e69c0953c..79c911c82e4601b21b9488e562c65e3d305acaf8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py @@ -223,7 +223,7 @@ def prepare_batch_data( return res -class DataReader(object): +class DataReader: def __init__( self, batch_size=4096, @@ -339,7 +339,7 @@ class DataReader(object): return wrapper -class ModelHyperParams(object): +class ModelHyperParams: generate_neg_sample = False epoch = 100 max_seq_len = 512 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index a931276c56e9609737754e46585ffa2ec31ce097..2604bdd3a690d42918007a936813291cc224c76c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -355,7 +355,7 @@ def if_with_and_or_4(x, y=None): def if_with_class_var(x, y=None): - class Foo(object): + class Foo: def __init__(self): self.a = 1 self.b = 2 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py index 554cf95e82483ac4b8bdc56d0284f3e6a0171ded..2e49f3778ac27cce38d962103c32aeade1935a2c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py @@ -21,7 +21,7 @@ from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import create_paddle_predictor -class PredictorTools(object): +class PredictorTools: ''' Paddle-Inference predictor ''' diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py index 44a55c79c92e33ead837a5c1eae771213587596b..e93405ebd399f1d48dff187718aeea32dce66b0c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py @@ -91,7 +91,7 @@ def get_data_iter(batch_size, mode='train', cache_num=20): yield (src_ids, src_mask, tar_ids, tar_mask) -class Seq2SeqModelHyperParams(object): +class Seq2SeqModelHyperParams: # Whether use attention model attention = False diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py index 7ea2a97cb0aa3cfc2ac3d645a8dbab1a3735bf9f..0bb08405141c3fc5b328bf31d828b03e23d4f877 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py @@ -21,7 +21,7 @@ from paddle.fluid.dygraph import Embedding, Layer, Linear from paddle.static import Variable -class EmbeddingLayer(object): +class EmbeddingLayer: """ Embedding Layer class """ @@ -53,7 +53,7 @@ class EmbeddingLayer(object): return emb -class FCLayer(object): +class FCLayer: """ Fully Connect Layer class """ @@ -79,7 +79,7 @@ class FCLayer(object): return fc -class ConcatLayer(object): +class ConcatLayer: """ Connection Layer class """ @@ -98,7 +98,7 @@ class ConcatLayer(object): return concat -class ReduceMeanLayer(object): +class ReduceMeanLayer: """ Reduce Mean Layer class """ @@ -117,7 +117,7 @@ class ReduceMeanLayer(object): return mean -class CosSimLayer(object): +class CosSimLayer: """ Cos Similarly Calculate Layer """ @@ -136,7 +136,7 @@ class CosSimLayer(object): return sim -class ElementwiseMaxLayer(object): +class ElementwiseMaxLayer: """ Elementwise Max Layer class """ @@ -155,7 +155,7 @@ class ElementwiseMaxLayer(object): return max -class ElementwiseAddLayer(object): +class ElementwiseAddLayer: """ Elementwise Add Layer class """ @@ -174,7 +174,7 @@ class ElementwiseAddLayer(object): return add -class ElementwiseSubLayer(object): +class ElementwiseSubLayer: """ Elementwise Add Layer class """ @@ -193,7 +193,7 @@ class ElementwiseSubLayer(object): return sub -class ConstantLayer(object): +class ConstantLayer: """ Generate A Constant Layer class """ @@ -215,7 +215,7 @@ class ConstantLayer(object): return constant -class SoftsignLayer(object): +class SoftsignLayer: """ Softsign Layer class """ @@ -439,7 +439,7 @@ class FC(Layer): return self._helper.append_activation(pre_activation, act=self._act) -class HingeLoss(object): +class HingeLoss: """ Hing Loss Calculate class """ diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py index ce4ba45f964e88d330040caffc678ab2a435183e..c4739b2dc37d4b38055f6adfea258a7a50b5eaee 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py @@ -17,7 +17,7 @@ import paddle from paddle.static import Variable -class EmbeddingLayer(object): +class EmbeddingLayer: """ Embedding Layer class """ @@ -50,7 +50,7 @@ class EmbeddingLayer(object): return emb -class FCLayer(object): +class FCLayer: """ Fully Connect Layer class """ @@ -76,7 +76,7 @@ class FCLayer(object): return fc -class ConcatLayer(object): +class ConcatLayer: """ Connection Layer class """ @@ -95,7 +95,7 @@ class ConcatLayer(object): return concat -class ReduceMeanLayer(object): +class ReduceMeanLayer: """ Reduce Mean Layer class """ @@ -114,7 +114,7 @@ class ReduceMeanLayer(object): return mean -class CosSimLayer(object): +class CosSimLayer: """ Cos Similarly Calculate Layer """ @@ -133,7 +133,7 @@ class CosSimLayer(object): return sim -class ElementwiseMaxLayer(object): +class ElementwiseMaxLayer: """ Elementwise Max Layer class """ @@ -152,7 +152,7 @@ class ElementwiseMaxLayer(object): return max -class ElementwiseAddLayer(object): +class ElementwiseAddLayer: """ Elementwise Add Layer class """ @@ -171,7 +171,7 @@ class ElementwiseAddLayer(object): return add -class ElementwiseSubLayer(object): +class ElementwiseSubLayer: """ Elementwise Add Layer class """ @@ -190,7 +190,7 @@ class ElementwiseSubLayer(object): return sub -class ConstantLayer(object): +class ConstantLayer: """ Generate A Constant Layer class """ @@ -212,7 +212,7 @@ class ConstantLayer(object): return constant -class SoftsignLayer(object): +class SoftsignLayer: """ Softsign Layer class """ @@ -425,7 +425,7 @@ class FC(paddle.nn.Layer): return self._helper.append_activation(pre_activation, act=self._act) -class HingeLoss(object): +class HingeLoss: """ Hing Loss Calculate class """ diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index 7e6dad60b29941516be8f72b806ac2231902ed3c..9ce37b565b906aabd811a08d6ef31a1bd2a5a306 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -447,7 +447,7 @@ def bmn_loss_func( return loss, tem_loss, pem_reg_loss, pem_cls_loss -class Args(object): +class Args: epoch = 1 batch_size = 4 learning_rate = 0.1 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py index 45b149617d2bfc614d0ad9466a375c73e5e5d2f7..143b14f7ed641693d2dd1c53bb9c8de224b8e4e7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py @@ -160,7 +160,7 @@ def test_for_in_else(x): def while_loop_class_var(x): - class Foo(object): + class Foo: def __init__(self): self.a = 3 self.b = 4 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index 756da0243b9b122309ad57a180e1c067b1e8b5b2..12a4f48f6445240d012cf2a2332fa3009cfea29b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -480,7 +480,7 @@ class DeConv2D(fluid.dygraph.Layer): return conv -class ImagePool(object): +class ImagePool: def __init__(self, pool_size=50): self.pool = [] self.count = 0 @@ -530,7 +530,7 @@ def reader_creater(): return reader -class Args(object): +class Args: epoch = 1 batch_size = 4 image_shape = [3, IMAGE_SIZE, IMAGE_SIZE] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py index a245b607f881dd1e3e5b5853d0e1bb1617cb95e9..f16d2410cbecc147c722b93f5138333c37366267 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py @@ -44,7 +44,7 @@ def decorated_call_decorated(x): return jit_decorated_func(x) -class DoubleDecorated(object): +class DoubleDecorated: @classmethod @declarative def double_decorated_func1(self, x): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 3026d7e5675d8baf2b115509e86f46d7c31f2560..9d00db1caa660a8673eb842594114cede1ff36f4 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -472,7 +472,7 @@ class LexNet(fluid.dygraph.Layer): return avg_cost, crf_decode -class Args(object): +class Args: epoch = 1 batch_size = 4 vocab_size = 100 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py index 6bc237f3781b62b952e9818aa376337f698bba49..75c59897d8825de30d8ef31857173b9742294225 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py @@ -153,7 +153,7 @@ def while_loop_bool_op2(x): def while_loop_class_var(x): - class Foo(object): + class Foo: def __init__(self): self.a = 3 self.b = 4 @@ -179,7 +179,7 @@ def loop_var_contains_property(x): def for_loop_class_var(max_len): - class Foo(object): + class Foo: def __init__(self): self.a = 3 self.b = 4 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index 2cb5451bfa8d003ac709335288c62d39d514e0ca..083345d9db99a4fbdfae516b9a41fb0660b86ec7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -470,7 +470,7 @@ def fake_data_reader(batch_size, label_size): return reader -class Args(object): +class Args: batch_size = 4 model = "MobileNetV1" lr = 0.001 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 9e79d40df80b23f07ceb688a0ddbdd3ad9133f72..c111e5c4820d09313a44ab1f0d6edb22d10d53e5 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -52,7 +52,7 @@ class Policy(Layer): return log_prob -class Args(object): +class Args: gamma = 0.99 log_interval = 1 train_step = 10 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py index 5c52fade779ebcb210c38f6d95e4f6dc71ddef45..25ca7e08472a0cfedea787f534c54308e3d05fa1 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py @@ -293,7 +293,7 @@ def fake_data_reader(class_num, vocab_size, batch_size, padding_size): return reader -class Args(object): +class Args: epoch = 1 batch_size = 4 class_num = 2 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py index 466c6affcb846fcbc82422d44d822eca2b7d17c7..3e70147d302963161e28a1683eb8ee7ed40a05ed 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py @@ -73,7 +73,7 @@ def fake_vocabulary(): vocab = fake_vocabulary() -class FakeReaderProcessor(object): +class FakeReaderProcessor: def __init__(self, args, vocab): self.vocab = vocab self.seq_len = args.seq_len diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py index bbb408e48c3281fdca73cfdab24d2fdcb42e9945..b1ed858f85a125fd857a2f92b61b618b218c0096 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py @@ -71,7 +71,7 @@ def fake_vocabulary(): vocab = fake_vocabulary() -class FakeReaderProcessor(object): +class FakeReaderProcessor: def __init__(self, args, vocab): self.vocab = vocab self.seq_len = args.seq_len diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index 5923618ca3bcf3382826b59198d090779dab3067..cc307e5a7bb16b8a80de3cadcb65d934d568344c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -214,7 +214,7 @@ class TSM_ResNet(fluid.dygraph.Layer): return y -class FakeDataReader(object): +class FakeDataReader: def __init__(self, mode, cfg): self.format = cfg.MODEL.format self.num_classes = cfg.MODEL.num_classes diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py index eecd6806fa540b2616dd4624c3e69a3277f33f5d..086e56828a3c6b4b4be73122ee34a957b50b3ce4 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py @@ -29,7 +29,7 @@ random.seed(0) np.random.seed(0) -class SmoothedValue(object): +class SmoothedValue: """Track a series of values and provide access to smoothed values over a window or the global series average. """ @@ -46,7 +46,7 @@ class SmoothedValue(object): return self.loss_sum / self.iter_cnt -class FakeDataReader(object): +class FakeDataReader: def __init__(self): self.generator_out = [] self.total_iter = cfg.max_iter diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py index b27a35ae7ec1abf283ea61032096390f601d6f09..ee32e0640ca98527abf03a50079765e20e4c939f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py @@ -565,7 +565,7 @@ class WrapDecoder(Layer): return logits -class CrossEntropyCriterion(object): +class CrossEntropyCriterion: def __init__(self, label_smooth_eps): self.label_smooth_eps = label_smooth_eps diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py index 7e77f1ad033db1ef6a5d18ac3db95c1421cc7395..bb0011de6bc9ed60efdcdab477cba7e079737863 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py @@ -97,7 +97,7 @@ fast_decoder_data_input_fields = ( ) -class ModelHyperParams(object): +class ModelHyperParams: print_step = 2 save_dygraph_model_path = "dygraph_trained_models" save_static_model_path = "static_trained_models" @@ -290,7 +290,7 @@ def get_feed_data_reader(args, mode='train'): return __for_train__ if mode == 'train' else __for_test__ -class InputField(object): +class InputField: def __init__(self, input_slots): self.feed_list = [] for slot in input_slots: diff --git a/python/paddle/fluid/tests/unittests/feed_data_reader.py b/python/paddle/fluid/tests/unittests/feed_data_reader.py index 9a1f15ddff5ec7dea2f1cae7b9160e705d71fec0..ef2e18a42966d840d65844b2134cd7842d410ff4 100644 --- a/python/paddle/fluid/tests/unittests/feed_data_reader.py +++ b/python/paddle/fluid/tests/unittests/feed_data_reader.py @@ -25,7 +25,7 @@ def cyclic_reader(reader): return __reader__ -class FeedDataReader(object): +class FeedDataReader: def __init__(self, feed_list, reader): self._feed_list = [] for var in feed_list: diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py index 5adcf7a29875af054fad9e7b20633de9462cfae7..d15cbfc56f1dcda389ef25ab956f4bcc57da6977 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py @@ -39,7 +39,7 @@ def DataTypeCast(date_type): return np_data_type -class TestCollectiveAPIRunnerBase(object): +class TestCollectiveAPIRunnerBase: def get_model(self, train_prog, startup_prog, rank, indata=None): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py index 3e005bafb4131bc073daea78f9539a82215b9629..f4d41818b3325485a53eeb9d670fa9abf94c8a4b 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py @@ -51,7 +51,7 @@ def DataTypeCast(date_type): return np_data_type -class TestCollectiveRunnerBase(object): +class TestCollectiveRunnerBase: def get_model(self, train_prog, startup_prog, col_type): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py index 57202e62a43330917f26bdc8e4f2382f2753708c..1a7a2f2255145e0963d49c6e257c39796287f3ed 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py @@ -174,7 +174,7 @@ def pool2d_backward_navie( return x_grad -class TestPool2D_Op_Mixin(object): +class TestPool2D_Op_Mixin: def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py index cb7a890adb22271d993c05323c45ae9bd407e1e7..281d2d9de027cfa21796f271deb3ff24af89502d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py @@ -40,7 +40,7 @@ paddle.enable_static() SEED = 10 -class TestSyncBatchNormRunnerBase(object): +class TestSyncBatchNormRunnerBase: def get_model( self, main, diff --git a/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py b/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py index 55bc9dce1863ef15ffd1bd58939e38393270c8ad..3e497ced85d618f7645fce8ae782f654adb468a9 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py @@ -28,7 +28,7 @@ import paddle.fluid.unique_name as nameGen from paddle.fluid import core -class TestCollectiveRunnerBase(object): +class TestCollectiveRunnerBase: def get_model(self, train_prog, startup_prog): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py index 101a749f424913d3e57a138b6b308cb8383319e4..0003d4c44ca6f762bbea3a2383bd4b0f37d1a7c2 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py @@ -45,7 +45,7 @@ paddle.enable_static() SEED = 10 -class TestSyncBatchNormRunnerBase(object): +class TestSyncBatchNormRunnerBase: def get_model( self, main, diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index baa4f26feb81c974115f21ed0912b9491b83a751..d359f3459fadb7b9aed877aeb573a72b12aeddc8 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -1566,7 +1566,7 @@ class OpTest(unittest.TestCase): ) return found[0] - class Checker(object): + class Checker: """base class for check with self.outputs. currently don't support check between checkers. """ diff --git a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py index 1fa54bccd2414933a7548d2f495a6563257e0289..5b8c2e6e6734c5dd33ec470285a2eafb0abbe0b0 100755 --- a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py +++ b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py @@ -46,7 +46,7 @@ def is_distributed_env(): return True -class YamlHelper(object): +class YamlHelper: def load_yaml(self, yaml_file, other_part=None): part_list = ["runner", "hyper_parameters"] if other_part: @@ -317,7 +317,7 @@ def bf16_to_fp32(val): return np.float32(struct.unpack(', and token has # alreay been added, but the token is not added. Transformer requires diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index b2ae6318cc5deb28d39f79e78c06f016c4243ef8..3692ef86279ef08f7f5792f4301716293971b295 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -286,7 +286,7 @@ def pool2D_forward_naive( return out -class TestPool2D_Op_Mixin(object): +class TestPool2D_Op_Mixin: def setUp(self): self.op_type = "pool2d" self.use_cudnn = False diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index 67149212a8ce6a7e364168f9aa1126ddad3d8402..29c12daf55ff3d7b0496273ce057758f7259421f 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -27,7 +27,7 @@ from paddle.fluid.backward import append_backward np.random.seed(123) -class PyRNNBase(object): +class PyRNNBase: def __init__(self, input_shape, output_shape): self.x = np.ones(shape=input_shape).astype("float32") self.y = np.zeros(shape=output_shape).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index 00bf9735fa513616e5d7548164a475b7299d3a27..41e894badd164fb4bef3f53e8b0013a73515e576 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -105,7 +105,7 @@ class DecoderCell(layers.RNNCell): return out, [new_lstm_states, out] -class Encoder(object): +class Encoder: def __init__(self, num_layers, hidden_size, dropout_prob=0.0): self.encoder_cell = EncoderCell(num_layers, hidden_size, dropout_prob) @@ -119,7 +119,7 @@ class Encoder(object): return encoder_output, encoder_final_state -class Decoder(object): +class Decoder: def __init__( self, num_layers, @@ -191,7 +191,7 @@ class Decoder(object): return decoder_output, decoder_final_state, dec_seq_lengths -class Seq2SeqModel(object): +class Seq2SeqModel: """Seq2Seq model: RNN encoder-decoder with attention""" def __init__( @@ -302,7 +302,7 @@ class Seq2SeqModel(object): return probs, samples, sample_length -class PolicyGradient(object): +class PolicyGradient: """policy gradient""" def __init__(self, lr=None): @@ -395,7 +395,7 @@ def reward_func(samples, sample_length): ) -class MLE(object): +class MLE: """teacher-forcing MLE training""" def __init__(self, lr=None): @@ -413,7 +413,7 @@ class MLE(object): return loss -class SeqPGAgent(object): +class SeqPGAgent: def __init__( self, model_cls, diff --git a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py index 624f2f5e6165b646d19f83a8ef3bc3b979b34f6d..b86be49eaed9dc4b1502ed7de61b9eb9cc020482 100644 --- a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py @@ -18,7 +18,7 @@ import paddle paddle.enable_static() -class Decoder(object): +class Decoder: def __init__(self, transitions, use_tag=True): self.transitions = transitions self.use_tag = use_tag diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index 91bebab2f6c346198c995990e05d26b46ce2106e..b3febb9b40daa1cf75deb90196d0caee90a0779b 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -28,7 +28,7 @@ paddle.enable_static() CUDA_BLOCK_SIZE = 32 -class CTCForward(object): +class CTCForward: def __init__( self, softmax, diff --git a/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py b/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py index cd3546ebe3cd07fe6904e10acfc8bef1be7f0e50..59c530b7d8aa932dd864fc8d55a46c8ff4d0f2a5 100755 --- a/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py +++ b/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py @@ -26,7 +26,7 @@ from tokenizer_utils import ( ) -class BasicTokenizer(object): +class BasicTokenizer: """ Runs basic tokenization (punctuation splitting, lower casing, etc.). Args: @@ -165,7 +165,7 @@ class BasicTokenizer(object): return "".join(output) -class WordpieceTokenizer(object): +class WordpieceTokenizer: """ Runs WordPiece tokenization. Args: diff --git a/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py b/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py index 9e60b29ffb7abf34d5a1cd30b5ff5aee75cdfa35..2280292670316b46bee91fe8a9f34758197b8edf 100644 --- a/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py +++ b/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py @@ -147,7 +147,7 @@ def tokenize_chinese_chars(text): return output -class PretrainedTokenizer(object): +class PretrainedTokenizer: """ The base class for all pretrained tokenizers. It mainly provides common methods for loading (construction and loading) and saving pretrained tokenizers. Loading diff --git a/python/paddle/fluid/tests/unittests/utils.py b/python/paddle/fluid/tests/unittests/utils.py index 03993c2355eea3931d8333c14f581f47f03683e4..04c3085c3dffc105e982856ecd382c5c520e4d99 100644 --- a/python/paddle/fluid/tests/unittests/utils.py +++ b/python/paddle/fluid/tests/unittests/utils.py @@ -101,7 +101,7 @@ def load_dygraph_vars_to_scope(model_path, scope, place): load_dict_to_scope(scope, opti_dict) -class DyGraphProgramDescTracerTestHelper(object): +class DyGraphProgramDescTracerTestHelper: def __init__(self, unittest_obj): self.unittest_obj = unittest_obj diff --git a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py index f1276f765a87e644f029a90c2d473d565f127c97..d645462c7dc98b5e464d37060aaecc94def68a80 100644 --- a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py +++ b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py @@ -98,7 +98,7 @@ xpu_test_device_op_white_list = [] xpu_test_device_op_type_white_list = [] -class XPUOpTestWrapper(object): +class XPUOpTestWrapper: def create_classes(self): base_class = None classes = [] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py index 1c00f3b94b8605469cebceed67ad9a2298a75225..b9be6077d4b4ffdae4e628cb4f9bf30ff95a9466 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py @@ -49,7 +49,7 @@ def DataTypeCast(date_type): return np_data_type -class TestCollectiveRunnerBase(object): +class TestCollectiveRunnerBase: def get_model(self, train_prog, startup_prog): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/trainer_desc.py b/python/paddle/fluid/trainer_desc.py index 484042c53a8ce866103b72de75ff3533fe8a588f..96204a2399df63b5b6ee53799c06ee04f4dbf0ab 100644 --- a/python/paddle/fluid/trainer_desc.py +++ b/python/paddle/fluid/trainer_desc.py @@ -26,7 +26,7 @@ __all__ = [ ] -class TrainerDesc(object): +class TrainerDesc: ''' Set proto from python to c++. Can be initialized from train_desc. diff --git a/python/paddle/fluid/trainer_factory.py b/python/paddle/fluid/trainer_factory.py index 761895fe3044fe5684240338c341f1622a2f6a7c..281fbd8693c2d38f4337b01bbb7cc52dec23772f 100644 --- a/python/paddle/fluid/trainer_factory.py +++ b/python/paddle/fluid/trainer_factory.py @@ -45,7 +45,7 @@ from multiprocessing import Process, Manager __all__ = ["TrainerFactory", "FetchHandlerMonitor"] -class TrainerFactory(object): +class TrainerFactory: """ Create trainer and device worker. If opt_info is not None, it will get configs from opt_info, @@ -151,7 +151,7 @@ class TrainerFactory(object): return trainer -class FetchHandlerMonitor(object): +class FetchHandlerMonitor: """ Defination of FetchHandlerMonitor class, it's for fetch handler. diff --git a/python/paddle/fluid/transpiler/collective.py b/python/paddle/fluid/transpiler/collective.py index aedbaeb43cde7274a1adf9f1bdb292a5d7564a89..c22b1746966f141c2eeba08e56e01b0d641f4ddc 100644 --- a/python/paddle/fluid/transpiler/collective.py +++ b/python/paddle/fluid/transpiler/collective.py @@ -31,7 +31,7 @@ __all__ = ['GradAllReduce', 'LocalSGD', 'MultiThread'] OpRole = core.op_proto_and_checker_maker.OpRole -class Collective(object): +class Collective: ''' ''' def __init__(self, nrings): diff --git a/python/paddle/fluid/transpiler/details/ufind.py b/python/paddle/fluid/transpiler/details/ufind.py index fef6f24570c17bfc28dc87d699891d84c292a59e..f6a05cbf86c6b938e83dd4d4f7c7476fdcb5c1e6 100644 --- a/python/paddle/fluid/transpiler/details/ufind.py +++ b/python/paddle/fluid/transpiler/details/ufind.py @@ -13,7 +13,7 @@ # limitations under the License. -class UnionFind(object): +class UnionFind: """Union-find data structure. Union-find is a data structure that keeps track of a set of elements partitioned diff --git a/python/paddle/fluid/transpiler/details/vars_distributed.py b/python/paddle/fluid/transpiler/details/vars_distributed.py index 32a27ed33b0b013166c8810aced35f8779a4e9be..cdd488bfa1d6e00cb4cc01b20047d95dfb1301ec 100644 --- a/python/paddle/fluid/transpiler/details/vars_distributed.py +++ b/python/paddle/fluid/transpiler/details/vars_distributed.py @@ -14,7 +14,7 @@ from paddle.fluid.framework import Variable -class VarStruct(object): +class VarStruct: """ record part properties of a Variable in python. """ @@ -28,7 +28,7 @@ class VarStruct(object): self.persistable = persistable -class VarDistributed(object): +class VarDistributed: """ a class to record the var distributed on parameter servers. the class will record the relationship between origin var and slice var. @@ -146,7 +146,7 @@ class VarDistributed(object): ) -class VarsDistributed(object): +class VarsDistributed: """ a gather about VarDistributed with many methods to find distributed vars. through the class, we can get overview about the distributed parameters on parameter servers. diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 6bdddddd93fe36b24c5d40208ce012e4030f5426..ee98dc94a6f1ef5b036b7203a9e50bb8785c7c3c 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -143,7 +143,7 @@ def slice_variable(var_list, slice_count, min_block_size): return blocks -class DistributeTranspilerConfig(object): +class DistributeTranspilerConfig: """ :api_attr: Static Graph @@ -248,7 +248,7 @@ class DistributeTranspilerConfig(object): self.__sync_mode = value -class ServerRuntimeConfig(object): +class ServerRuntimeConfig: def __init__(self): self._rpc_send_thread_num = int( os.getenv("FLAGS_rpc_send_thread_num", "12") @@ -261,7 +261,7 @@ class ServerRuntimeConfig(object): ) -class DistributeTranspiler(object): +class DistributeTranspiler: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/transpiler/ps_dispatcher.py b/python/paddle/fluid/transpiler/ps_dispatcher.py index 589f2cb26e22f3bd2be92d8732ac82410c2b3a74..c42472f5a15c57957decff2d59ab67e7b36d7e12 100644 --- a/python/paddle/fluid/transpiler/ps_dispatcher.py +++ b/python/paddle/fluid/transpiler/ps_dispatcher.py @@ -13,7 +13,7 @@ # limitations under the License. -class PSDispatcher(object): +class PSDispatcher: """ PSDispatcher is the base class for dispatching vars into different pserver instance. diff --git a/python/paddle/fluid/unique_name.py b/python/paddle/fluid/unique_name.py index 1c0c28dae8bda2e6e1c8d0d5336c7f3c820f7e45..c881196ef56a348345c7776977225cc0bc14deae 100644 --- a/python/paddle/fluid/unique_name.py +++ b/python/paddle/fluid/unique_name.py @@ -18,7 +18,7 @@ from .wrapped_decorator import signature_safe_contextmanager __all__ = ['generate', 'switch', 'guard'] -class UniqueNameGenerator(object): +class UniqueNameGenerator: """ Generate unique name with prefix. @@ -47,7 +47,7 @@ class UniqueNameGenerator(object): return self.prefix + "_".join([key, str(tmp)]) -class DygraphParameterNameChecker(object): +class DygraphParameterNameChecker: """ Check whether the name of parameter is used. """ diff --git a/python/paddle/hapi/callbacks.py b/python/paddle/hapi/callbacks.py index c4ccb0341ebd7fb4cb543693b7f114f6f505aa45..f9d280d76dc54eb3dd89888ea06a7400436ca735 100644 --- a/python/paddle/hapi/callbacks.py +++ b/python/paddle/hapi/callbacks.py @@ -69,7 +69,7 @@ def config_callbacks( return cbk_list -class CallbackList(object): +class CallbackList: def __init__(self, callbacks=None): # copy self.callbacks = [c for c in callbacks] @@ -129,7 +129,7 @@ class CallbackList(object): self._call(name, step, logs) -class Callback(object): +class Callback: """ Base class used to build new callbacks. And new callbacks could also terminate training by setting `model.stop_training=True`. diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 257024d048ae1e3ff16e49f3a2aecbfcbb31c86b..b7813932d86f662fa19da0d802c537392f5d2f35 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -259,7 +259,7 @@ def _update_input_info(inputs): return shapes, dtypes -class StaticGraphAdapter(object): +class StaticGraphAdapter: """ Model traning/inference with a static graph. """ @@ -734,7 +734,7 @@ class StaticGraphAdapter(object): self._compiled_progs[mode] = compiled_prog -class DynamicGraphAdapter(object): +class DynamicGraphAdapter: def __init__(self, model): super().__init__() self.model = model @@ -1006,7 +1006,7 @@ class DynamicGraphAdapter(object): self.model._scaler = None -class Model(object): +class Model: """ An Model object is network with training and inference features. Dynamic graph and static graph are supported at the same time, diff --git a/python/paddle/hapi/progressbar.py b/python/paddle/hapi/progressbar.py index 43b913a6fc120da524288d44562b8423c2c85ac0..77f090471dfb91b788c50b2d6da13b25e22056d6 100644 --- a/python/paddle/hapi/progressbar.py +++ b/python/paddle/hapi/progressbar.py @@ -22,7 +22,7 @@ from collections import namedtuple __all__ = [] -class ProgressBar(object): +class ProgressBar: """progress bar""" def __init__( diff --git a/python/paddle/hapi/static_flops.py b/python/paddle/hapi/static_flops.py index a3b787962fcc4cdb950d2ada35db735eb21f7edc..c72bcc4273c52818935c027dc652c728ac24b547 100644 --- a/python/paddle/hapi/static_flops.py +++ b/python/paddle/hapi/static_flops.py @@ -19,7 +19,7 @@ from paddle.static import Program, Variable __all__ = [] -class VarWrapper(object): +class VarWrapper: def __init__(self, var, graph): assert isinstance(var, Variable) assert isinstance(graph, GraphWrapper) @@ -39,7 +39,7 @@ class VarWrapper(object): return self._var.shape -class OpWrapper(object): +class OpWrapper: def __init__(self, op, graph): assert isinstance(graph, GraphWrapper) self._op = op @@ -69,7 +69,7 @@ class OpWrapper(object): return [self._graph.var(var_name) for var_name in self._op.output(name)] -class GraphWrapper(object): +class GraphWrapper: """ It is a wrapper of paddle.fluid.framework.IrGraph with some special functions for paddle slim framework. @@ -208,7 +208,7 @@ def static_flops(program, print_detail=False): return _graph_flops(graph, detail=print_detail) -class Table(object): +class Table: def __init__(self, table_heads): self.table_heads = table_heads self.table_len = [] diff --git a/python/paddle/incubate/autograd/functional.py b/python/paddle/incubate/autograd/functional.py index 6f34be56e71fe19e72dba8a26291242c407234b8..218c125a92e1a07717bdc8881ff6add051cc6ad0 100644 --- a/python/paddle/incubate/autograd/functional.py +++ b/python/paddle/incubate/autograd/functional.py @@ -168,7 +168,7 @@ def _zeros_like_with_grad(xs): return ys -class Jacobian(object): +class Jacobian: r""" Computes the Jacobian matrix of a given function. @@ -257,7 +257,7 @@ class Jacobian(object): return self._jacobian.shape -class Hessian(object): +class Hessian: """ Computes the Hessian matrix with a given ``func`` with respect to ``xs`` . @@ -328,7 +328,7 @@ class Hessian(object): return self.symbolic.shape -class _Jacobian(object): +class _Jacobian: """The base class for computing Jacobian matrix. ``_Jacobian`` implementes the core logic of multidimensional index and lazy diff --git a/python/paddle/incubate/autograd/primreg.py b/python/paddle/incubate/autograd/primreg.py index 7d81847f9052c640379dff08f31464dda165a446..cce8c49eb4f51facc7e3625b05d32efc88b8cebd 100644 --- a/python/paddle/incubate/autograd/primreg.py +++ b/python/paddle/incubate/autograd/primreg.py @@ -13,7 +13,7 @@ # limitations under the License. -class Registry(object): +class Registry: """A general registry object.""" __slots__ = ['name', 'tab'] diff --git a/python/paddle/incubate/autograd/primx.py b/python/paddle/incubate/autograd/primx.py index cdf16e77b1fbc2b60e95a75d94b16cdfae2eb6af..601b486d354c790b5e1cd6d1d23750fe098f87eb 100644 --- a/python/paddle/incubate/autograd/primx.py +++ b/python/paddle/incubate/autograd/primx.py @@ -115,7 +115,7 @@ def output_vars_on_path(path): return vars -class VarMap(object): +class VarMap: """A general map data structure for linking variables to variables. An example is linking variables to their gradients. @@ -180,7 +180,7 @@ class VarMap(object): # TODO(lml): supporting control flow, nested blocks, and block other than current block of main program. -class Transform(object): +class Transform: """An object that maintains the state of transformations applied to a primitve program.""" diff --git a/python/paddle/incubate/autograd/utils.py b/python/paddle/incubate/autograd/utils.py index 2b8082bf48de7a83b585e38418ca8951fab548ae..5437401aecaab96495961c61f7006cb9b18933b7 100644 --- a/python/paddle/incubate/autograd/utils.py +++ b/python/paddle/incubate/autograd/utils.py @@ -17,7 +17,7 @@ import paddle from paddle.fluid import framework as framework -class PrimOption(object): +class PrimOption: def __init__(self): self.enable_prim = False diff --git a/python/paddle/jit/layer.py b/python/paddle/jit/layer.py index 25d9ca5dbccebe7486e1759905aa48021a646a80..e3204ab65df022ce1527366e8d22b40244746153 100644 --- a/python/paddle/jit/layer.py +++ b/python/paddle/jit/layer.py @@ -17,7 +17,7 @@ from paddle.fluid import core from paddle.fluid.core import Load -class Layer(object): +class Layer: def __init__(self): self.cpp_layer = None # {name: Function} diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index bbada10aaf0a2523f918e8f46a23c9a304b9ba68..dba69b9848a289c54df74cc6b563f9df18afbb5f 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -188,7 +188,7 @@ class RNNCellBase(Layer): return True return isinstance(seq, Sequence) and not isinstance(seq, str) - class Shape(object): + class Shape: def __init__(self, shape): self.shape = shape if shape[0] == -1 else ([-1] + list(shape)) diff --git a/python/paddle/nn/utils/spectral_norm_hook.py b/python/paddle/nn/utils/spectral_norm_hook.py index 288e5ea59c91a8f841d39f24e2f10600365202cb..f035d0b443116059f726f6c6d5fa7f1ca79e0a22 100644 --- a/python/paddle/nn/utils/spectral_norm_hook.py +++ b/python/paddle/nn/utils/spectral_norm_hook.py @@ -26,7 +26,7 @@ def normal_(x, mean=0.0, std=1.0): return x -class SpectralNorm(object): +class SpectralNorm: def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12): self.name = name self.dim = dim diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index cdcb97aa9a0dc25b6b15e9c46463ffccf63e6e4d..2a4d07647929b41624de85bbb82e9e29b8233417 100755 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -95,7 +95,7 @@ def _weight_norm(v, g, dim): return weight -class WeightNorm(object): +class WeightNorm: def __init__(self, name, dim): if dim is None: dim = -1 diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index 3309bb4f8c7817fea737599a7a893c6e4bff1274..6f96e12f995d8852850b1a4e94bf1d522973409a 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -39,7 +39,7 @@ __all__ = [ # noqa ] -class LRScheduler(object): +class LRScheduler: """ LRScheduler Base class. Define the common interface of a learning rate scheduler. diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 783b11b90864137d3b76f602125a90c8fceaa40f..36aa9c151d7d3b7ebc96878b0e860647f3650bbe 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -98,7 +98,7 @@ def append_backward_new( return params_and_grads -class Optimizer(object): +class Optimizer: r"""Optimizer Base class. Define the common interface of an optimizer. diff --git a/python/paddle/profiler/timer.py b/python/paddle/profiler/timer.py index 311fc373c5f7ded55d188343ed181ce94adf127d..8bb49a2f37e8ee999409b708e4ff8bd2b48c70f9 100644 --- a/python/paddle/profiler/timer.py +++ b/python/paddle/profiler/timer.py @@ -16,7 +16,7 @@ import timeit from collections import OrderedDict -class Stack(object): +class Stack: """ The stack in a Last-In/First-Out (LIFO) manner. New element is added at the end and an element is removed from that end. @@ -41,7 +41,7 @@ class Stack(object): return None -class Event(object): +class Event: """ A Event is used to record the cost of every step and the cost of the total steps except skipped steps. @@ -152,7 +152,7 @@ class Event(object): return summary -class Hook(object): +class Hook: """ As the base class. All types of hooks should inherit from it. """ @@ -299,7 +299,7 @@ class TimerHook(Hook): ) -class TimeAverager(object): +class TimeAverager: """ Record the cost of every step and count the average. """ @@ -346,7 +346,7 @@ class TimeAverager(object): return float(self._total_iters) / self._total_time -class Benchmark(object): +class Benchmark: """ A tool for the statistics of model performance. The `before_reader` and `after_reader` are called in the DataLoader to count the cost diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 62083769e56ca6af267832e5c65007c7d75267ee..f6e979dbcbf72f94ffa346054c33f9271fced3e8 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -120,7 +120,7 @@ def data(name, shape, dtype=None, lod_level=0): ) -class InputSpec(object): +class InputSpec: """ InputSpec describes the signature information of the model input, such as ``shape`` , ``dtype`` , ``name`` . diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index 4739628f7176b4023abb618759dce57d8d929f98..bb38d152b56082181b27ffa9277b9f19f47812fe 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -19,7 +19,7 @@ from paddle.fluid.data_feeder import check_type, convert_dtype __all__ = [] -class PrintOptions(object): +class PrintOptions: precision = 8 threshold = 1000 edgeitems = 3 diff --git a/python/paddle/text/datasets/movielens.py b/python/paddle/text/datasets/movielens.py index b01d8e94bb3194701eb9dd2368c9550e6a437a69..c4d0681f42a4ce8f412a9f623451926d701b9366 100644 --- a/python/paddle/text/datasets/movielens.py +++ b/python/paddle/text/datasets/movielens.py @@ -27,7 +27,7 @@ URL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip' MD5 = 'c4d9eecfca2ab87c1945afe126590906' -class MovieInfo(object): +class MovieInfo: """ Movie id, title and categories information are stored in MovieInfo. """ @@ -58,7 +58,7 @@ class MovieInfo(object): return self.__str__() -class UserInfo(object): +class UserInfo: """ User id, gender, age, and job information are stored in UserInfo. """ diff --git a/python/paddle/utils/cpp_extension/cpp_extension.py b/python/paddle/utils/cpp_extension/cpp_extension.py index 3cbfb6d732fd8ec2b015a07f57ccc4d3f60898a0..c05be5f2a4947257efad46d71411e0d28d0064c4 100644 --- a/python/paddle/utils/cpp_extension/cpp_extension.py +++ b/python/paddle/utils/cpp_extension/cpp_extension.py @@ -353,7 +353,7 @@ def _generate_extension_name(sources): return '_'.join(file_prefix) -class BuildExtension(build_ext, object): +class BuildExtension(build_ext): """ Inherited from setuptools.command.build_ext to customize how to apply compilation process with share library. @@ -724,7 +724,7 @@ class BuildExtension(build_ext, object): ) -class EasyInstallCommand(easy_install, object): +class EasyInstallCommand(easy_install): """ Extend easy_intall Command to control the behavior of naming shared library file. @@ -759,7 +759,7 @@ class EasyInstallCommand(easy_install, object): assert os.path.exists(new_so_path) -class BuildCommand(build, object): +class BuildCommand(build): """ Extend build Command to control the behavior of specifying `build_base` root directory. diff --git a/python/paddle/utils/download.py b/python/paddle/utils/download.py index a77d7b60dea5074d166302f50b6ac4c0a2d157d2..660e09e8668d5de2b398749872a40d70200b3037 100644 --- a/python/paddle/utils/download.py +++ b/python/paddle/utils/download.py @@ -27,7 +27,7 @@ try: from tqdm import tqdm except: - class tqdm(object): + class tqdm: def __init__(self, total=None): self.total = total self.n = 0 diff --git a/python/paddle/utils/op_version.py b/python/paddle/utils/op_version.py index 9f9ae4d73c7bb0f0d223e3f8ee7ed96686b0d4a6..793e0b621990c35c5e6cb993bfb50ec6ed7a59e4 100644 --- a/python/paddle/utils/op_version.py +++ b/python/paddle/utils/op_version.py @@ -28,7 +28,7 @@ def Singleton(cls): return _singleton -class OpUpdateInfoHelper(object): +class OpUpdateInfoHelper: def __init__(self, info): self._info = info @@ -47,7 +47,7 @@ class OpUpdateInfoHelper(object): @Singleton -class OpLastCheckpointChecker(object): +class OpLastCheckpointChecker: def __init__(self): self.raw_version_map = core.get_op_version_map() self.checkpoints_map = {} diff --git a/python/paddle/utils/profiler.py b/python/paddle/utils/profiler.py index 27803cfa442dd6effc0afe2a7121c2ed780b9200..625900e87e86849aacf41f876919bc444fc6541a 100644 --- a/python/paddle/utils/profiler.py +++ b/python/paddle/utils/profiler.py @@ -34,7 +34,7 @@ __all__ = [ # noqa ] -class ProfilerOptions(object): +class ProfilerOptions: def __init__(self, options=None): self.options = { 'state': 'All', @@ -74,7 +74,7 @@ class ProfilerOptions(object): _current_profiler = None -class Profiler(object): +class Profiler: def __init__(self, enabled=True, options=None): if options is not None: self.profiler_options = options diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index 78413da4f208554a446ba4b5d2e72fadeef549f2..f5cbd90ffc7b0d8a5023f42122f4bd463f552c7e 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -84,7 +84,7 @@ def _check_input( return value -class Compose(object): +class Compose: """ Composes several transforms together use for composing list of transforms together for a dataset transform. @@ -137,7 +137,7 @@ class Compose(object): return format_string -class BaseTransform(object): +class BaseTransform: """ Base class of all transforms used in computer vision. diff --git a/tools/CrossStackProfiler/CspChromeTraceFormatter.py b/tools/CrossStackProfiler/CspChromeTraceFormatter.py index 1fa8efe9880508dbbedca02541337d7f795a8afa..fb24e0634e546c587fb582637908eafce5970c49 100755 --- a/tools/CrossStackProfiler/CspChromeTraceFormatter.py +++ b/tools/CrossStackProfiler/CspChromeTraceFormatter.py @@ -15,7 +15,7 @@ import json -class ChromeTraceFormatter(object): +class ChromeTraceFormatter: def __init__(self): self._events = [] self._metadata = [] diff --git a/tools/CrossStackProfiler/CspFileReader.py b/tools/CrossStackProfiler/CspFileReader.py index 11dd052283c1564f0cb116787f268bd69f12fce8..55a1722be66b7cc7683cbda06d783cc2e1787002 100755 --- a/tools/CrossStackProfiler/CspFileReader.py +++ b/tools/CrossStackProfiler/CspFileReader.py @@ -73,7 +73,7 @@ FILEORGANIZEFORM = [ ] -class FileReader(object): +class FileReader: def __init__(self, logger, args): self._logger = logger self._args = args diff --git a/tools/CrossStackProfiler/CspReporter.py b/tools/CrossStackProfiler/CspReporter.py index 052ffd6fca19f4b54a5c9c797703f2045275f6a6..999ba4fb3e23368151732e7d249c76bbac223f59 100755 --- a/tools/CrossStackProfiler/CspReporter.py +++ b/tools/CrossStackProfiler/CspReporter.py @@ -64,7 +64,7 @@ def get_argparse(): return parser.parse_args() -class CspReporter(object): +class CspReporter: def __init__(self, args): self._args = args print(self._args) diff --git a/tools/check_ut.py b/tools/check_ut.py index e08c358531f7fd17a27de70fae4260d0c82b92fe..6f1a8ab02ba61e53e2c671bd42294842ddb8db71 100644 --- a/tools/check_ut.py +++ b/tools/check_ut.py @@ -19,7 +19,7 @@ import os.path from github import Github -class PRChecker(object): +class PRChecker: """PR Checker.""" def __init__(self): diff --git a/tools/codestyle/docstring_checker.py b/tools/codestyle/docstring_checker.py index 0d163c20bbfa15474e568b2251a216370659da58..8deeff77348f476e7e5d29216ae0de298beba212 100644 --- a/tools/codestyle/docstring_checker.py +++ b/tools/codestyle/docstring_checker.py @@ -27,7 +27,7 @@ def register(linter): linter.register_checker(DocstringChecker(linter)) -class Docstring(object): +class Docstring: """Docstring class holds the parsed doc string elements.""" def __init__(self): diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index d3a1c41682bd9b2a54fdd36682f9e50765e07c5b..02aaf13a17d31f1d0a7e6dfdfa4c90b53c35e8cb 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -30,7 +30,7 @@ PADDLE_ROOT = PADDLE_ROOT.replace('//', '/') ssl._create_default_https_context = ssl._create_unverified_context -class PRChecker(object): +class PRChecker: """PR Checker.""" def __init__(self): diff --git a/tools/timeline.py b/tools/timeline.py index 5323ea0f907ec603a310538306229fc7db1b1606..a8c6699b270d049f4844cd98a0da60377d6e814e 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -31,7 +31,7 @@ parser.add_argument( args = parser.parse_args() -class _ChromeTraceFormatter(object): +class _ChromeTraceFormatter: def __init__(self): self._events = [] self._metadata = [] @@ -126,7 +126,7 @@ class _ChromeTraceFormatter(object): return json.dumps(trace, separators=(',', ':')) -class Timeline(object): +class Timeline: def __init__(self, profile_dict): self._profile_dict = profile_dict self._pid = 0