From 888272b54742c4ab70eb4152f357c9658fde92f9 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Tue, 8 Nov 2022 11:29:41 +0800 Subject: [PATCH] [CodeStyle][py2][U004] unecessary explicit `object` inheritance in class definition (#47642) * [CodeStyle][py2][U004] unecessary explicit `object` inheritance in class definition * fix an increment --- .../tests/models/efficientnet-b4/net/utils.py | 2 +- paddle/phi/api/yaml/generator/api_base.py | 2 +- python/paddle/audio/functional/window.py | 2 +- python/paddle/autograd/py_layer.py | 6 ++--- python/paddle/dataset/imikolov.py | 2 +- python/paddle/dataset/movielens.py | 4 ++-- .../distributed/auto_parallel/cluster_v2.py | 2 +- .../distributed/auto_parallel/converter.py | 2 +- .../distributed/auto_parallel/cost_model.py | 8 +++---- .../distributed/auto_parallel/dist_context.py | 2 +- .../distributed/auto_parallel/helper.py | 2 +- .../distributed/auto_parallel/interface.py | 2 +- .../distributed/auto_parallel/partitioner.py | 2 +- .../distributed/auto_parallel/process_mesh.py | 2 +- .../distributed/auto_parallel/strategy.py | 2 +- .../distributed/auto_parallel/tuner/config.py | 2 +- .../auto_parallel/tuner/recorder.py | 6 ++--- .../auto_parallel/tuner/storable.py | 2 +- .../auto_parallel/tuner/tunable_space.py | 2 +- .../auto_parallel/tuner/tunable_variable.py | 2 +- .../communication/batch_isend_irecv.py | 2 +- python/paddle/distributed/elastic.py | 2 +- python/paddle/distributed/entry_attr.py | 2 +- .../fleet/base/distributed_strategy.py | 4 ++-- .../fleet/base/meta_optimizer_factory.py | 2 +- .../distributed/fleet/base/role_maker.py | 4 ++-- .../distributed/fleet/base/runtime_factory.py | 2 +- .../fleet/base/strategy_compiler.py | 2 +- .../paddle/distributed/fleet/base/topology.py | 8 +++---- .../distributed/fleet/base/util_factory.py | 4 ++-- .../fleet/data_generator/data_generator.py | 2 +- .../distributed/fleet/dataset/dataset.py | 2 +- .../fleet/dataset/index_dataset.py | 2 +- .../distributed/fleet/elastic/manager.py | 4 ++-- python/paddle/distributed/fleet/fleet.py | 2 +- .../paddle/distributed/fleet/launch_utils.py | 12 +++++----- .../ascend/ascend_optimizer.py | 2 +- .../meta_optimizers/ascend/ascend_parser.py | 6 ++--- .../fleet/meta_optimizers/common.py | 2 +- .../dygraph_sharding_optimizer.py | 2 +- .../meta_optimizers/sharding/fp16_helper.py | 2 +- .../sharding/gradient_clip_helper.py | 2 +- .../sharding/offload_helper.py | 2 +- .../fleet/meta_optimizers/sharding/prune.py | 2 +- .../fleet/meta_optimizers/sharding/shard.py | 4 ++-- .../fleet/meta_optimizers/sharding/utils.py | 2 +- .../sharding/weight_decay_helper.py | 2 +- .../parallel_layers/pp_layers.py | 4 ++-- .../distributed/fleet/runtime/runtime_base.py | 2 +- python/paddle/distributed/fleet/utils/fs.py | 2 +- .../distributed/fleet/utils/http_server.py | 2 +- .../fleet/utils/hybrid_parallel_inference.py | 2 +- .../distributed/launch/context/__init__.py | 2 +- .../distributed/launch/context/device.py | 2 +- .../distributed/launch/context/event.py | 2 +- .../paddle/distributed/launch/context/node.py | 2 +- .../distributed/launch/context/resource.py | 2 +- .../distributed/launch/context/status.py | 2 +- .../launch/controllers/controller.py | 2 +- .../distributed/launch/controllers/master.py | 2 +- .../distributed/launch/controllers/watcher.py | 2 +- .../distributed/launch/job/container.py | 2 +- python/paddle/distributed/launch/job/job.py | 2 +- python/paddle/distributed/launch/job/pod.py | 2 +- .../paddle/distributed/launch/job/status.py | 2 +- .../distributed/launch/utils/kv_client.py | 2 +- .../distributed/launch/utils/kv_server.py | 2 +- .../paddle/distributed/launch/utils/nvsmi.py | 2 +- .../launch/utils/process_context.py | 2 +- .../distributed/passes/auto_parallel_amp.py | 2 +- ...uto_parallel_data_parallel_optimization.py | 2 +- .../distributed/passes/auto_parallel_fp16.py | 2 +- .../passes/auto_parallel_grad_clip.py | 2 +- .../passes/auto_parallel_sharding.py | 2 +- python/paddle/distributed/ps/coordinator.py | 2 +- python/paddle/distributed/ps/the_one_ps.py | 2 +- .../paddle/distributed/ps/utils/ps_factory.py | 2 +- .../ps/utils/ps_program_builder.py | 2 +- python/paddle/distributed/ps/utils/public.py | 2 +- python/paddle/distributed/spawn.py | 4 ++-- .../paddle/distributed/utils/launch_utils.py | 12 +++++----- python/paddle/distribution/constraint.py | 2 +- python/paddle/distribution/distribution.py | 2 +- python/paddle/distribution/kl.py | 2 +- python/paddle/distribution/transform.py | 2 +- python/paddle/distribution/variable.py | 2 +- python/paddle/fluid/average.py | 2 +- python/paddle/fluid/backward.py | 6 ++--- python/paddle/fluid/clip.py | 4 ++-- python/paddle/fluid/communicator.py | 6 ++--- python/paddle/fluid/compiler.py | 8 +++---- .../contrib/decoder/beam_search_decoder.py | 12 +++++----- .../extend_optimizer_with_weight_decay.py | 2 +- .../contrib/mixed_precision/bf16/amp_lists.py | 2 +- .../contrib/mixed_precision/bf16/decorator.py | 2 +- .../contrib/mixed_precision/decorator.py | 2 +- .../contrib/mixed_precision/fp16_lists.py | 2 +- .../contrib/quantize/quantize_transpiler.py | 2 +- .../contrib/slim/quantization/adaround.py | 4 ++-- .../slim/quantization/imperative/ptq.py | 2 +- .../quantization/imperative/ptq_config.py | 2 +- .../quantization/imperative/ptq_registry.py | 4 ++-- .../slim/quantization/imperative/qat.py | 6 ++--- .../post_training_quantization.py | 4 ++-- .../quantization/quant2_int8_mkldnn_pass.py | 2 +- .../quantization/quant_int8_mkldnn_pass.py | 2 +- .../slim/quantization/quantization_pass.py | 24 +++++++++---------- .../quantization/quantize_transpiler_v2.py | 2 +- .../fluid/contrib/slim/quantization/utils.py | 2 +- python/paddle/fluid/contrib/sparsity/asp.py | 6 ++--- python/paddle/fluid/data_feed_desc.py | 2 +- python/paddle/fluid/data_feeder.py | 6 ++--- .../paddle/fluid/dataloader/batch_sampler.py | 2 +- .../fluid/dataloader/dataloader_iter.py | 2 +- python/paddle/fluid/dataloader/dataset.py | 2 +- python/paddle/fluid/dataloader/fetcher.py | 2 +- python/paddle/fluid/dataloader/sampler.py | 2 +- python/paddle/fluid/dataloader/worker.py | 12 +++++----- python/paddle/fluid/dataset.py | 4 ++-- python/paddle/fluid/device_worker.py | 4 ++-- python/paddle/fluid/distributed/downpour.py | 2 +- python/paddle/fluid/distributed/fleet.py | 2 +- python/paddle/fluid/distributed/helper.py | 4 ++-- python/paddle/fluid/distributed/node.py | 4 ++-- .../paddle/fluid/distributed/ps_instance.py | 2 +- python/paddle/fluid/dygraph/amp/auto_cast.py | 2 +- .../paddle/fluid/dygraph/amp/loss_scaler.py | 2 +- .../dygraph_to_static/base_transformer.py | 2 +- .../dygraph_to_static/convert_call_func.py | 2 +- .../fluid/dygraph/dygraph_to_static/error.py | 4 ++-- .../dygraph_to_static/function_spec.py | 2 +- .../dygraph_to_static/logging_utils.py | 2 +- .../dygraph/dygraph_to_static/origin_info.py | 4 ++-- .../dygraph_to_static/partial_program.py | 4 ++-- .../dygraph_to_static/program_translator.py | 14 +++++------ .../dygraph_to_static/static_analysis.py | 10 ++++---- python/paddle/fluid/dygraph/io.py | 2 +- python/paddle/fluid/dygraph/jit.py | 6 ++--- python/paddle/fluid/dygraph/layers.py | 4 ++-- .../fluid/dygraph/learning_rate_scheduler.py | 2 +- python/paddle/fluid/dygraph/parallel.py | 2 +- python/paddle/fluid/dygraph/static_runner.py | 2 +- .../fluid/dygraph/varbase_patch_methods.py | 2 +- python/paddle/fluid/entry_attr.py | 2 +- python/paddle/fluid/evaluator.py | 2 +- python/paddle/fluid/executor.py | 10 ++++---- python/paddle/fluid/framework.py | 14 +++++------ python/paddle/fluid/graphviz.py | 10 ++++---- .../incubate/checkpoint/auto_checkpoint.py | 2 +- .../incubate/checkpoint/checkpoint_saver.py | 4 ++-- .../fluid/incubate/data_generator/__init__.py | 2 +- .../fluid/incubate/fleet/base/fleet_base.py | 4 ++-- .../fluid/incubate/fleet/base/role_maker.py | 4 ++-- .../incubate/fleet/collective/__init__.py | 4 ++-- .../distributed_strategy.py | 8 +++---- .../parameter_server/ir/ps_dispatcher.py | 2 +- .../fleet/parameter_server/ir/public.py | 2 +- .../fleet/parameter_server/ir/ufind.py | 2 +- .../parameter_server/ir/vars_metatools.py | 6 ++--- .../fleet/parameter_server/pslib/__init__.py | 2 +- .../fleet/parameter_server/pslib/node.py | 4 ++-- .../pslib/optimizer_factory.py | 2 +- .../fluid/incubate/fleet/utils/fleet_util.py | 2 +- .../fluid/incubate/fleet/utils/http_server.py | 2 +- python/paddle/fluid/initializer.py | 2 +- python/paddle/fluid/io.py | 2 +- python/paddle/fluid/ir.py | 8 +++---- python/paddle/fluid/layer_helper_base.py | 2 +- python/paddle/fluid/layers/control_flow.py | 18 +++++++------- python/paddle/fluid/layers/distributions.py | 2 +- python/paddle/fluid/layers/io.py | 2 +- python/paddle/fluid/layers/nn.py | 2 +- python/paddle/fluid/layers/rnn.py | 10 ++++---- python/paddle/fluid/lazy_init.py | 4 ++-- python/paddle/fluid/metrics.py | 4 ++-- python/paddle/fluid/op.py | 12 +++++----- python/paddle/fluid/optimizer.py | 10 ++++---- python/paddle/fluid/parallel_executor.py | 2 +- python/paddle/fluid/param_attr.py | 2 +- python/paddle/fluid/reader.py | 6 ++--- python/paddle/fluid/regularizer.py | 2 +- .../hybrid_parallel_communicate_group.py | 2 +- .../unittests/collective/fleet/new_group.py | 2 +- .../fleet/parallel_dygraph_transformer.py | 4 ++-- .../test_imperative_auto_mixed_precision.py | 4 ++-- ...perative_auto_mixed_precision_for_eager.py | 4 ++-- .../multinode/test_collective_multi_nodes.py | 2 +- .../tests/unittests/ctr_dataset_reader.py | 2 +- .../fluid/tests/unittests/dist_ctr_reader.py | 2 +- .../fluid/tests/unittests/dist_transformer.py | 24 +++++++++---------- .../unittests/dygraph_to_static/bert_utils.py | 4 ++-- .../dygraph_to_static/ifelse_simple_func.py | 2 +- .../dygraph_to_static/predictor_utils.py | 2 +- .../dygraph_to_static/seq2seq_utils.py | 2 +- .../dygraph_to_static/simnet_dygraph_model.py | 22 ++++++++--------- .../simnet_dygraph_model_v2.py | 22 ++++++++--------- .../unittests/dygraph_to_static/test_bmn.py | 2 +- .../dygraph_to_static/test_break_continue.py | 2 +- .../dygraph_to_static/test_cycle_gan.py | 4 ++-- .../dygraph_to_static/test_full_name_usage.py | 2 +- .../unittests/dygraph_to_static/test_lac.py | 2 +- .../unittests/dygraph_to_static/test_loop.py | 4 ++-- .../dygraph_to_static/test_mobile_net.py | 2 +- .../test_reinforcement_learning.py | 2 +- .../dygraph_to_static/test_sentiment.py | 2 +- .../dygraph_to_static/test_simnet.py | 2 +- .../dygraph_to_static/test_simnet_v2.py | 2 +- .../unittests/dygraph_to_static/test_tsm.py | 2 +- .../dygraph_to_static/test_yolov3.py | 4 ++-- .../transformer_dygraph_model.py | 2 +- .../dygraph_to_static/transformer_util.py | 4 ++-- .../fluid/tests/unittests/feed_data_reader.py | 2 +- .../mlu/test_collective_api_base_mlu.py | 2 +- .../unittests/mlu/test_collective_base_mlu.py | 2 +- .../tests/unittests/mlu/test_pool2d_op_mlu.py | 2 +- .../mlu/test_sync_batch_norm_base_mlu.py | 2 +- .../unittests/npu/test_collective_base_npu.py | 2 +- .../npu/test_sync_batch_norm_base_npu.py | 2 +- .../paddle/fluid/tests/unittests/op_test.py | 2 +- .../tests/unittests/ps/ps_dnn_trainer.py | 4 ++-- .../tests/unittests/py_precise_roi_pool.py | 2 +- .../fluid/tests/unittests/rnn/rnn_numpy.py | 2 +- .../tests/unittests/spawn_runner_base.py | 2 +- .../tests/unittests/test_activation_op.py | 2 +- .../fluid/tests/unittests/test_argsort_op.py | 2 +- .../test_auto_parallel_partitioner_gpt.py | 4 ++-- .../fluid/tests/unittests/test_backward.py | 2 +- .../tests/unittests/test_chunk_eval_op.py | 2 +- .../unittests/test_collective_api_base.py | 2 +- .../tests/unittests/test_collective_base.py | 2 +- .../tests/unittests/test_crf_decoding_op.py | 2 +- .../fluid/tests/unittests/test_dist_base.py | 4 ++-- .../tests/unittests/test_dist_fleet_base.py | 2 +- .../unittests/test_dist_fleet_heter_base.py | 2 +- .../unittests/test_dynrnn_gradient_check.py | 6 ++--- .../test_eager_deletion_padding_rnn.py | 4 ++-- .../test_eager_deletion_recurrent_op.py | 2 +- .../unittests/test_faster_tokenizer_op.py | 2 +- .../test_global_var_getter_setter.py | 2 +- .../unittests/test_graph_send_ue_recv_op.py | 2 +- .../fluid/tests/unittests/test_hsigmoid_op.py | 4 ++-- .../test_imperative_ocr_attention_model.py | 2 +- ...perative_star_gan_with_gradient_penalty.py | 6 ++--- ..._imperative_transformer_sorted_gradient.py | 6 ++--- .../unittests/test_inference_model_io.py | 2 +- .../unittests/test_linear_chain_crf_op.py | 2 +- .../tests/unittests/test_lr_scheduler.py | 2 +- .../tests/unittests/test_lstm_cudnn_op.py | 2 +- .../fluid/tests/unittests/test_matmul_op.py | 2 +- .../unittests/test_matmul_op_with_head.py | 4 ++-- .../tests/unittests/test_optimizer_grad.py | 2 +- .../unittests/test_paddle_multiprocessing.py | 2 +- .../test_parallel_executor_transformer.py | 2 +- .../fluid/tests/unittests/test_pool2d_op.py | 2 +- .../tests/unittests/test_recurrent_op.py | 2 +- .../tests/unittests/test_rnn_decode_api.py | 12 +++++----- .../tests/unittests/test_viterbi_decode_op.py | 2 +- .../fluid/tests/unittests/test_warpctc_op.py | 2 +- .../unittests/tokenizer/bert_tokenizer.py | 4 ++-- .../unittests/tokenizer/tokenizer_utils.py | 2 +- python/paddle/fluid/tests/unittests/utils.py | 2 +- .../unittests/xpu/get_test_cover_info.py | 2 +- .../unittests/xpu/test_collective_base_xpu.py | 2 +- python/paddle/fluid/trainer_desc.py | 2 +- python/paddle/fluid/trainer_factory.py | 4 ++-- python/paddle/fluid/transpiler/collective.py | 2 +- .../paddle/fluid/transpiler/details/ufind.py | 2 +- .../transpiler/details/vars_distributed.py | 6 ++--- .../fluid/transpiler/distribute_transpiler.py | 6 ++--- .../paddle/fluid/transpiler/ps_dispatcher.py | 2 +- python/paddle/fluid/unique_name.py | 4 ++-- python/paddle/hapi/callbacks.py | 4 ++-- python/paddle/hapi/model.py | 6 ++--- python/paddle/hapi/progressbar.py | 2 +- python/paddle/hapi/static_flops.py | 8 +++---- python/paddle/incubate/autograd/functional.py | 6 ++--- python/paddle/incubate/autograd/primreg.py | 2 +- python/paddle/incubate/autograd/primx.py | 4 ++-- python/paddle/incubate/autograd/utils.py | 2 +- python/paddle/jit/layer.py | 2 +- python/paddle/nn/layer/rnn.py | 2 +- python/paddle/nn/utils/spectral_norm_hook.py | 2 +- python/paddle/nn/utils/weight_norm_hook.py | 2 +- python/paddle/optimizer/lr.py | 2 +- python/paddle/optimizer/optimizer.py | 2 +- python/paddle/profiler/timer.py | 10 ++++---- python/paddle/static/input.py | 2 +- python/paddle/tensor/to_string.py | 2 +- python/paddle/text/datasets/movielens.py | 4 ++-- .../utils/cpp_extension/cpp_extension.py | 6 ++--- python/paddle/utils/download.py | 2 +- python/paddle/utils/op_version.py | 4 ++-- python/paddle/utils/profiler.py | 4 ++-- python/paddle/vision/transforms/transforms.py | 4 ++-- .../CspChromeTraceFormatter.py | 2 +- tools/CrossStackProfiler/CspFileReader.py | 2 +- tools/CrossStackProfiler/CspReporter.py | 2 +- tools/check_ut.py | 2 +- tools/codestyle/docstring_checker.py | 2 +- tools/get_pr_ut.py | 2 +- tools/timeline.py | 4 ++-- 301 files changed, 524 insertions(+), 524 deletions(-) diff --git a/paddle/infrt/tests/models/efficientnet-b4/net/utils.py b/paddle/infrt/tests/models/efficientnet-b4/net/utils.py index 9a06030699..0617870bf0 100644 --- a/paddle/infrt/tests/models/efficientnet-b4/net/utils.py +++ b/paddle/infrt/tests/models/efficientnet-b4/net/utils.py @@ -239,7 +239,7 @@ def efficientnet_params(model_name): return params_dict[model_name] -class BlockDecoder(object): +class BlockDecoder: """Block Decoder for readability, straight from the official TensorFlow repository""" @staticmethod diff --git a/paddle/phi/api/yaml/generator/api_base.py b/paddle/phi/api/yaml/generator/api_base.py index 53b950b63f..696ad8736b 100644 --- a/paddle/phi/api/yaml/generator/api_base.py +++ b/paddle/phi/api/yaml/generator/api_base.py @@ -19,7 +19,7 @@ PREFIX_TENSOR_NAME = 'input_' PREFIX_META_TENSOR_NAME = 'meta_' -class BaseAPI(object): +class BaseAPI: def __init__(self, api_item_yaml): self.api = self.get_api_name(api_item_yaml) diff --git a/python/paddle/audio/functional/window.py b/python/paddle/audio/functional/window.py index 472c56b87a..4836afbb61 100644 --- a/python/paddle/audio/functional/window.py +++ b/python/paddle/audio/functional/window.py @@ -19,7 +19,7 @@ import paddle from paddle import Tensor -class WindowFunctionRegister(object): +class WindowFunctionRegister: def __init__(self): self._functions_dict = dict() diff --git a/python/paddle/autograd/py_layer.py b/python/paddle/autograd/py_layer.py index 2efb1ece83..252cfd5d91 100644 --- a/python/paddle/autograd/py_layer.py +++ b/python/paddle/autograd/py_layer.py @@ -21,7 +21,7 @@ from paddle.fluid import core __all__ = [] -class LegacyPyLayerContext(object): +class LegacyPyLayerContext: """ The object of this class is a context that is used in PyLayer to enhance the function. @@ -131,7 +131,7 @@ def with_mateclass(meta, *bases): return type.__new__(impl, "impl", (), {}) -class CPyLayer(object): +class CPyLayer: @classmethod @dygraph_only def apply(cls, *args, **kwargs): @@ -336,7 +336,7 @@ class LegacyPyLayer(with_mateclass(LayerMeta, CPyLayer)): ) -class EagerPyLayerContext(object): +class EagerPyLayerContext: def save_for_backward(self, *tensors): """ Saves given tensors that backward need. Use ``saved_tensor`` in the `backward` to get the saved tensors. diff --git a/python/paddle/dataset/imikolov.py b/python/paddle/dataset/imikolov.py index 4630d88e21..f618bbc2c4 100644 --- a/python/paddle/dataset/imikolov.py +++ b/python/paddle/dataset/imikolov.py @@ -31,7 +31,7 @@ URL = 'https://dataset.bj.bcebos.com/imikolov%2Fsimple-examples.tgz' MD5 = '30177ea32e27c525793142b6bf2c8e2d' -class DataType(object): +class DataType: NGRAM = 1 SEQ = 2 diff --git a/python/paddle/dataset/movielens.py b/python/paddle/dataset/movielens.py index fd57ad8edf..7a47293c3c 100644 --- a/python/paddle/dataset/movielens.py +++ b/python/paddle/dataset/movielens.py @@ -38,7 +38,7 @@ URL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip' MD5 = 'c4d9eecfca2ab87c1945afe126590906' -class MovieInfo(object): +class MovieInfo: """ Movie id, title and categories information are stored in MovieInfo. """ @@ -69,7 +69,7 @@ class MovieInfo(object): return self.__str__() -class UserInfo(object): +class UserInfo: """ User id, gender, age, and job information are stored in UserInfo. """ diff --git a/python/paddle/distributed/auto_parallel/cluster_v2.py b/python/paddle/distributed/auto_parallel/cluster_v2.py index debcb078f6..1ec2332ad4 100644 --- a/python/paddle/distributed/auto_parallel/cluster_v2.py +++ b/python/paddle/distributed/auto_parallel/cluster_v2.py @@ -116,7 +116,7 @@ class DeviceMesh(core.DeviceMesh): return self._mesh -# class Cluster(object): +# class Cluster: # """ # The cluster represents the hardware resource. # """ diff --git a/python/paddle/distributed/auto_parallel/converter.py b/python/paddle/distributed/auto_parallel/converter.py index d0fae414b1..cc0966be4a 100644 --- a/python/paddle/distributed/auto_parallel/converter.py +++ b/python/paddle/distributed/auto_parallel/converter.py @@ -19,7 +19,7 @@ import numpy as np from ..utils.log_utils import get_logger -class Converter(object): +class Converter: """ Converter is a class object for auto parallel to convert tensors from one parallel strategy to another one. Tensors will merge and slice value diff --git a/python/paddle/distributed/auto_parallel/cost_model.py b/python/paddle/distributed/auto_parallel/cost_model.py index f335244e36..73e899614d 100644 --- a/python/paddle/distributed/auto_parallel/cost_model.py +++ b/python/paddle/distributed/auto_parallel/cost_model.py @@ -35,7 +35,7 @@ class CostNodeType(Enum): NOP = 5 -class Cost(object): +class Cost: def __init__(self): self.runtime = None self.static_mem = None @@ -49,7 +49,7 @@ class CostModelMode(Enum): MIXED = 3 -class CostNode(object): +class CostNode: def __init__(self, node, node_type, id=None): self.id = id self.node = node @@ -172,7 +172,7 @@ class CompOpCostNode(CostNode): self.cost = 0.0 -class PipeEvent(object): +class PipeEvent: def __init__(self, stage_id, event_name, duration, start_time=-1): self.stage_id = stage_id self.name = event_name @@ -181,7 +181,7 @@ class PipeEvent(object): self.e_time = -1 -class CostModel(object): +class CostModel: def __init__( self, mode=CostModelMode.BENCHMARKING, diff --git a/python/paddle/distributed/auto_parallel/dist_context.py b/python/paddle/distributed/auto_parallel/dist_context.py index 4b4fca8730..f410468f45 100644 --- a/python/paddle/distributed/auto_parallel/dist_context.py +++ b/python/paddle/distributed/auto_parallel/dist_context.py @@ -1146,7 +1146,7 @@ class DistributedOperatorContext: return kinputs, koutputs -class BlockState(object): +class BlockState: def __init__(self): self.nblock = 0 self.forward_indices = [] diff --git a/python/paddle/distributed/auto_parallel/helper.py b/python/paddle/distributed/auto_parallel/helper.py index 3c13f3d9ab..31deaea427 100644 --- a/python/paddle/distributed/auto_parallel/helper.py +++ b/python/paddle/distributed/auto_parallel/helper.py @@ -192,7 +192,7 @@ class BuildInfo: self.states = defaultdict(bool) -class ProgramHelper(object): +class ProgramHelper: """ A Helper class for Engine to provides different Program IR according specified 'mode'. """ diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index 124f622d40..d2f7e89414 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -220,7 +220,7 @@ def recompute(op): _g_collections = {} -class CollectionNames(object): +class CollectionNames: FETCHES = "fetches" LOGGING = "logging" diff --git a/python/paddle/distributed/auto_parallel/partitioner.py b/python/paddle/distributed/auto_parallel/partitioner.py index 2a7b7f3e67..6ec52ff697 100644 --- a/python/paddle/distributed/auto_parallel/partitioner.py +++ b/python/paddle/distributed/auto_parallel/partitioner.py @@ -32,7 +32,7 @@ __not_shape_var_type__ = [ ] -class Partitioner(object): +class Partitioner: """ warning:: Partitioner is experimental and subject to change. diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index 9d8a44b92d..1630289dde 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -39,7 +39,7 @@ def reset_current_process_mesh(): _g_current_process_mesh = _g_previous_process_mesh -class ProcessMesh(object): +class ProcessMesh: """ The `Processmesh` object describes the topology of the used processes. diff --git a/python/paddle/distributed/auto_parallel/strategy.py b/python/paddle/distributed/auto_parallel/strategy.py index 12b6a9b78a..dcfd453f63 100644 --- a/python/paddle/distributed/auto_parallel/strategy.py +++ b/python/paddle/distributed/auto_parallel/strategy.py @@ -16,7 +16,7 @@ import copy from . import constants -class BaseConfig(object): +class BaseConfig: def __init__(self, category, config_dict=None): self._category = category self._config_dict = None diff --git a/python/paddle/distributed/auto_parallel/tuner/config.py b/python/paddle/distributed/auto_parallel/tuner/config.py index 4ed6340ecc..7bb9d4f18b 100644 --- a/python/paddle/distributed/auto_parallel/tuner/config.py +++ b/python/paddle/distributed/auto_parallel/tuner/config.py @@ -25,7 +25,7 @@ def _get_pass_config(strategy, pass_name): return config -class TuningConfig(object): +class TuningConfig: """ A uniform config wrap: distributed strategy: the user defined configuration for optimization pass diff --git a/python/paddle/distributed/auto_parallel/tuner/recorder.py b/python/paddle/distributed/auto_parallel/tuner/recorder.py index 2c838cfb14..8174ba22cf 100644 --- a/python/paddle/distributed/auto_parallel/tuner/recorder.py +++ b/python/paddle/distributed/auto_parallel/tuner/recorder.py @@ -18,7 +18,7 @@ import numpy as np -class MetricRecord(object): +class MetricRecord: """ One record for a single metric at a given execution step. """ @@ -62,7 +62,7 @@ class MetricRecord(object): return "MetricRecord(value={}, step={})".format(self.value, self.step) -class MetricRecords(object): +class MetricRecords: """ Records of a single metric across different executions. """ @@ -143,7 +143,7 @@ class MetricRecords(object): return records -class MetricsRecorder(object): +class MetricsRecorder: """ Record the values for all metrics. """ diff --git a/python/paddle/distributed/auto_parallel/tuner/storable.py b/python/paddle/distributed/auto_parallel/tuner/storable.py index fb03070ad0..01e10b4a3b 100644 --- a/python/paddle/distributed/auto_parallel/tuner/storable.py +++ b/python/paddle/distributed/auto_parallel/tuner/storable.py @@ -18,7 +18,7 @@ import json -class Storable(object): +class Storable: def get_state(self): raise NotImplementedError diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py b/python/paddle/distributed/auto_parallel/tuner/tunable_space.py index e3e503401b..a4383f5385 100644 --- a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py +++ b/python/paddle/distributed/auto_parallel/tuner/tunable_space.py @@ -22,7 +22,7 @@ from .tunable_variable import IntRange from .tunable_variable import FloatRange -class TunableSpace(object): +class TunableSpace: """ A TunableSpace is constructed by the tunable variables. """ diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py b/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py index 74594a8b4a..3f45c68c1d 100644 --- a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py +++ b/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py @@ -18,7 +18,7 @@ import numpy as np -class TunableVariable(object): +class TunableVariable: """ Tunablevariable base class. """ diff --git a/python/paddle/distributed/communication/batch_isend_irecv.py b/python/paddle/distributed/communication/batch_isend_irecv.py index 073ccb0b41..d3f0372b68 100644 --- a/python/paddle/distributed/communication/batch_isend_irecv.py +++ b/python/paddle/distributed/communication/batch_isend_irecv.py @@ -22,7 +22,7 @@ from paddle.distributed.communication.group import ( ) -class P2POp(object): +class P2POp: """ A class that makes point-to-point operations for "batch_isend_irecv". diff --git a/python/paddle/distributed/elastic.py b/python/paddle/distributed/elastic.py index 55b73ab315..082fdd3c07 100644 --- a/python/paddle/distributed/elastic.py +++ b/python/paddle/distributed/elastic.py @@ -16,7 +16,7 @@ import argparse import os -class Command(object): +class Command: def __init__(self, server, name): import etcd3 diff --git a/python/paddle/distributed/entry_attr.py b/python/paddle/distributed/entry_attr.py index be54d4ab7b..dcd5153bb5 100644 --- a/python/paddle/distributed/entry_attr.py +++ b/python/paddle/distributed/entry_attr.py @@ -15,7 +15,7 @@ __all__ = [] -class EntryAttr(object): +class EntryAttr: """ Entry Config for paddle.static.nn.sparse_embedding with Parameter Server. diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index c32b1f2d68..5b3b599513 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -65,7 +65,7 @@ def check_configs_key(msg, config, field_name): assert key in key_list, "key:{} not in {}".format(key, field_name) -class DistributedJobInfo(object): +class DistributedJobInfo: """ DistributedJobInfo will serialize all distributed training information Just for inner use: 1) debug 2) replicate experiments @@ -106,7 +106,7 @@ ReduceStrategyFluid = paddle.fluid.BuildStrategy.ReduceStrategy ReduceStrategyFleet = int -class DistributedStrategy(object): +class DistributedStrategy: __lock_attr = False def __init__(self): diff --git a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py index 380a9b8c17..dd4611fc0a 100755 --- a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py +++ b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py @@ -26,7 +26,7 @@ meta_optimizer_names.remove("HybridParallelOptimizer") meta_optimizer_names.remove("HeterParallelOptimizer") -class MetaOptimizerFactory(object): +class MetaOptimizerFactory: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 1fcd18789c..b001c5482f 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -35,7 +35,7 @@ class Role: COORDINATOR = 5 -class Gloo(object): +class Gloo: """ Gloo is a universal class for barrier and collective communication """ @@ -383,7 +383,7 @@ class Gloo(object): return output -class RoleMakerBase(object): +class RoleMakerBase: """ RoleMakerBase is a base class for assigning a role to current process in distributed training. diff --git a/python/paddle/distributed/fleet/base/runtime_factory.py b/python/paddle/distributed/fleet/base/runtime_factory.py index e900640967..5175885903 100644 --- a/python/paddle/distributed/fleet/base/runtime_factory.py +++ b/python/paddle/distributed/fleet/base/runtime_factory.py @@ -17,7 +17,7 @@ from ...ps.the_one_ps import TheOnePSRuntime __all__ = [] -class RuntimeFactory(object): +class RuntimeFactory: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/strategy_compiler.py b/python/paddle/distributed/fleet/base/strategy_compiler.py index 348a79b9d4..f1844a1123 100644 --- a/python/paddle/distributed/fleet/base/strategy_compiler.py +++ b/python/paddle/distributed/fleet/base/strategy_compiler.py @@ -106,7 +106,7 @@ def maximum_path_len_algo(optimizer_list): return candidate -class StrategyCompilerBase(object): +class StrategyCompilerBase: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/topology.py b/python/paddle/distributed/fleet/base/topology.py index 6fa1521d58..335125123c 100644 --- a/python/paddle/distributed/fleet/base/topology.py +++ b/python/paddle/distributed/fleet/base/topology.py @@ -23,7 +23,7 @@ __all__ = ['CommunicateTopology', 'HybridCommunicateGroup'] _HYBRID_PARALLEL_GROUP = None -class ParallelMode(object): +class ParallelMode: """ There are all the parallel modes currently supported: - DATA_PARALLEL: Distribute input data to different devices. @@ -47,7 +47,7 @@ class ParallelMode(object): SHARDING_PARALLEL = 3 -class CommunicateTopology(object): +class CommunicateTopology: def __init__( self, hybrid_group_names=["data", "pipe", "sharding", "model"], @@ -133,7 +133,7 @@ class CommunicateTopology(object): return self.get_rank(**tf) -class HybridCommunicateGroup(object): +class HybridCommunicateGroup: def __init__(self, topology): self.nranks = paddle.distributed.get_world_size() self.global_rank = paddle.distributed.get_rank() @@ -410,7 +410,7 @@ class HybridCommunicateGroup(object): ) -class _CommunicateGroup(object): +class _CommunicateGroup: """tmp for static""" def __init__(self): diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index dcaa256a26..8717619eaf 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -31,7 +31,7 @@ import numpy as np __all__ = [] -class UtilFactory(object): +class UtilFactory: def _create_util(self, context=None): util = UtilBase() if context is not None and "valid_strategy" in context: @@ -41,7 +41,7 @@ class UtilFactory(object): return util -class UtilBase(object): +class UtilBase: def __init__(self): self.role_maker = None self.dist_strategy = None diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index ec6114dd21..abf8f5f49f 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -17,7 +17,7 @@ import sys __all__ = [] -class DataGenerator(object): +class DataGenerator: """ DataGenerator is a general Base class for user to inherit A user who wants to define his/her own python processing logic diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index f5b3140064..870c936e54 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -20,7 +20,7 @@ import paddle.fluid.core as core __all__ = [] -class DatasetBase(object): +class DatasetBase: """Base dataset class.""" def __init__(self): diff --git a/python/paddle/distributed/fleet/dataset/index_dataset.py b/python/paddle/distributed/fleet/dataset/index_dataset.py index 87bf2bc738..7df2931b5d 100644 --- a/python/paddle/distributed/fleet/dataset/index_dataset.py +++ b/python/paddle/distributed/fleet/dataset/index_dataset.py @@ -16,7 +16,7 @@ from paddle.fluid import core __all__ = [] -class Index(object): +class Index: def __init__(self, name): self._name = name diff --git a/python/paddle/distributed/fleet/elastic/manager.py b/python/paddle/distributed/fleet/elastic/manager.py index b23a116422..0344c1d437 100644 --- a/python/paddle/distributed/fleet/elastic/manager.py +++ b/python/paddle/distributed/fleet/elastic/manager.py @@ -52,7 +52,7 @@ class ElasticStatus: EXIT = "exit" -class LauncherInterface(object): +class LauncherInterface: def __init__(self, args): self.args = args self.procs = [] @@ -124,7 +124,7 @@ class LauncherInterface(object): raise NotImplementedError -class ElasticManager(object): +class ElasticManager: def __init__(self, args, etcd_client): self.args = args diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 695f03fe1f..617eb5729a 100644 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -95,7 +95,7 @@ inited_runtime_handler = wrap_decorator(_inited_runtime_handler_) is_non_distributed_check = wrap_decorator(_is_non_distributed_check_) -class Fleet(object): +class Fleet: """ Unified API for distributed training of PaddlePaddle Please reference the https://github.com/PaddlePaddle/PaddleFleetX for details diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 64795140cd..b676eee5bf 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -60,7 +60,7 @@ class DeviceMode: MLU = 4 -class Cluster(object): +class Cluster: def __init__(self, hdfs): self.job_server = None self.pods = [] @@ -133,7 +133,7 @@ class Cluster(object): return None -class JobServer(object): +class JobServer: def __init__(self): self.endpoint = None @@ -147,7 +147,7 @@ class JobServer(object): return not self == j -class Trainer(object): +class Trainer: def __init__(self): self.accelerators = [] self.endpoint = None @@ -179,7 +179,7 @@ class Trainer(object): return self.rank -class Pod(object): +class Pod: def __init__(self): self.rank = None self.id = None @@ -483,7 +483,7 @@ def pretty_print_envs(envs, header=None): return _str -class TrainerProc(object): +class TrainerProc: def __init__(self): self.proc = None self.log_fn = None @@ -1278,7 +1278,7 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode): ) -class ParameterServerLauncher(object): +class ParameterServerLauncher: def __init__(self, args, distribute_mode): self.args = args self.distribute_mode = distribute_mode diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py index ee3f96e243..b0495e13b2 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py @@ -24,7 +24,7 @@ HcomGroupConfig = namedtuple('HcomGroupConfig', ['name', 'nranks', 'rank_ids']) __all__ = [] -class AscendIRParser(object): +class AscendIRParser: def __init__(self, auto_dp=False, world_rank_size=1): self.graph_idx = 0 self.hcom_endpoints = {} diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py index b24e51896f..6158b4a7d4 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py @@ -101,7 +101,7 @@ global_cnt = -1 global_input_cnt = -1 -class AscendHelper(object): +class AscendHelper: def __init__(self): self.dtype2ge_map = { 0: core.GEDataType.DT_BOOL, @@ -136,7 +136,7 @@ class AscendHelper(object): return self.dtype2np_map[index] -class AscendParserFactory(object): +class AscendParserFactory: def __init__(self, graph, var2geop): self.graph = graph self.var2geop = var2geop @@ -149,7 +149,7 @@ class AscendParserFactory(object): raise ValueError("parser class %s does not exist" % parser_class) -class AscendParserBase(object): +class AscendParserBase: def __init__(self, graph, var2geop): self.graph = graph self.var2geop = var2geop diff --git a/python/paddle/distributed/fleet/meta_optimizers/common.py b/python/paddle/distributed/fleet/meta_optimizers/common.py index 7a3c89f1e9..03ed84563b 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/common.py +++ b/python/paddle/distributed/fleet/meta_optimizers/common.py @@ -53,7 +53,7 @@ def is_optimizer_op(op): ) & int(OpRole.Optimize) -class CollectiveHelper(object): +class CollectiveHelper: def __init__(self, role_maker, nrings=1, wait_port=True): self.nrings = nrings self.wait_port = wait_port diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py index 7e5f698386..63037dc6f6 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py @@ -25,7 +25,7 @@ def _is_trainable(param): return not param.stop_gradient -class DygraphShardingOptimizer(object): +class DygraphShardingOptimizer: """ A wrapper for Sharding Optimizer in Dygraph. diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py index e4db252cf7..1c500ea56b 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py @@ -23,7 +23,7 @@ from paddle.fluid import core __all__ = [] -class FP16Utils(object): +class FP16Utils: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py index 563757d35f..288e9d7d8a 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py @@ -17,7 +17,7 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole __all__ = [] -class GradientClipHelper(object): +class GradientClipHelper: def __init__(self, mp_ring_id): self.mp_ring_id = mp_ring_id diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py index ac10bb4238..c1951299c2 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py @@ -44,7 +44,7 @@ class PlaceType: return PlaceType.CPU -class OffloadHelper(object): +class OffloadHelper: cpu_place_type = 0 cuda_place_type = PlaceType.default_device() cuda_pinned_place_type = PlaceType.default_pinned() diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py index 895fd2f7ac..9a264a7dd1 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py @@ -15,7 +15,7 @@ __all__ = [] -class ProgramDeps(object): +class ProgramDeps: def __init__(self, block, start_vars, end_vars): self._block = block # vars where to start to build the deps diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py index d33d04098d..82a7a7494d 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py @@ -22,7 +22,7 @@ from paddle.distributed.fleet.meta_optimizers.sharding.fp16_helper import ( __all__ = [] -class Shard(object): +class Shard: def __init__( self, ): @@ -155,7 +155,7 @@ class Shard(object): return grads_in_shard -class ProgramSegment(object): +class ProgramSegment: def __init__(self, block): self._block = block self._allreduce_vars = [] diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py index ea42130300..9feed7b1e5 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py @@ -408,7 +408,7 @@ def insert_allreduce_ops( return -class FuseHelper(object): +class FuseHelper: @staticmethod def sort_vars_by_dtype(block, vars_name): fp32_vars = [] diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py index 3d5d8aa2a3..0a841cf243 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py @@ -17,7 +17,7 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_VAR_KEY __all__ = [] -class WeightDecayHelper(object): +class WeightDecayHelper: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index 663cd7d281..7ddbb64883 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -53,7 +53,7 @@ from paddle.incubate.distributed.fleet import recompute_hybrid __all__ = [] -class LayerDesc(object): +class LayerDesc: def __init__(self, layer_func, *inputs, **kwargs): self.layer_func = layer_func self.inputs = inputs @@ -89,7 +89,7 @@ class SharedLayerDesc(LayerDesc): self.shared_weight_attr = shared_weight_attr -class SegmentLayers(object): +class SegmentLayers: def __init__( self, layers_desc, diff --git a/python/paddle/distributed/fleet/runtime/runtime_base.py b/python/paddle/distributed/fleet/runtime/runtime_base.py index 2e8bacfbc3..192754d663 100644 --- a/python/paddle/distributed/fleet/runtime/runtime_base.py +++ b/python/paddle/distributed/fleet/runtime/runtime_base.py @@ -15,7 +15,7 @@ __all__ = [] -class RuntimeBase(object): +class RuntimeBase: def __init__(self): pass diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index 8a67301e17..667752e668 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -46,7 +46,7 @@ class FSShellCmdAborted(ExecuteError): pass -class FS(object): +class FS: @abc.abstractmethod def ls_dir(self, fs_path): raise NotImplementedError diff --git a/python/paddle/distributed/fleet/utils/http_server.py b/python/paddle/distributed/fleet/utils/http_server.py index 5602c5f01a..2828b9e5dd 100644 --- a/python/paddle/distributed/fleet/utils/http_server.py +++ b/python/paddle/distributed/fleet/utils/http_server.py @@ -128,7 +128,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): self.end_headers() -class KVHTTPServer(HTTPServer, object): +class KVHTTPServer(HTTPServer): """ it is a http server storing kv pairs. """ diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index a56c71fc40..5ba7c9c297 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -20,7 +20,7 @@ import paddle.distributed.fleet as fleet import numpy as np -class HybridParallelInferenceHelper(object): +class HybridParallelInferenceHelper: """ A helper class to split program for inference with hybrid parallelism. diff --git a/python/paddle/distributed/launch/context/__init__.py b/python/paddle/distributed/launch/context/__init__.py index 037fc0efbc..0d3410e368 100644 --- a/python/paddle/distributed/launch/context/__init__.py +++ b/python/paddle/distributed/launch/context/__init__.py @@ -21,7 +21,7 @@ from .args_envs import parse_args, fetch_envs, env_args_mapping import logging -class Context(object): +class Context: def __init__(self, enable_plugin=True): self.args, self.unknown_args = parse_args() self.envs = fetch_envs() diff --git a/python/paddle/distributed/launch/context/device.py b/python/paddle/distributed/launch/context/device.py index f05bc1b776..2708755c2e 100644 --- a/python/paddle/distributed/launch/context/device.py +++ b/python/paddle/distributed/launch/context/device.py @@ -27,7 +27,7 @@ class DeviceType: CUSTOM_DEVICE = 'custom_device' -class Device(object): +class Device: def __init__(self, dtype=None, memory="", labels=""): self._dtype = dtype self._memory = memory diff --git a/python/paddle/distributed/launch/context/event.py b/python/paddle/distributed/launch/context/event.py index 3859edb83f..7792907e47 100644 --- a/python/paddle/distributed/launch/context/event.py +++ b/python/paddle/distributed/launch/context/event.py @@ -13,7 +13,7 @@ # limitations under the License. -class Event(object): +class Event: def __init__(self, kind="status", message="", fatal=False): self.kind = kind self.message = message diff --git a/python/paddle/distributed/launch/context/node.py b/python/paddle/distributed/launch/context/node.py index b8c1a2a14f..04748d2935 100644 --- a/python/paddle/distributed/launch/context/node.py +++ b/python/paddle/distributed/launch/context/node.py @@ -21,7 +21,7 @@ import struct from contextlib import closing -class Node(object): +class Node: def __init__(self): # self.device = Device.detect_device() self.device = Device.parse_device() diff --git a/python/paddle/distributed/launch/context/resource.py b/python/paddle/distributed/launch/context/resource.py index b7baf6fd7b..3b4d16e7b5 100644 --- a/python/paddle/distributed/launch/context/resource.py +++ b/python/paddle/distributed/launch/context/resource.py @@ -13,6 +13,6 @@ # limitations under the License. -class Resource(object): +class Resource: def __init__(self): self.devices = [] diff --git a/python/paddle/distributed/launch/context/status.py b/python/paddle/distributed/launch/context/status.py index b87b7b3fb8..460f14f0cb 100644 --- a/python/paddle/distributed/launch/context/status.py +++ b/python/paddle/distributed/launch/context/status.py @@ -13,7 +13,7 @@ # limitations under the License. -class Status(object): +class Status: UNINIT = "uninit" READY = "ready" RUNNING = "running" diff --git a/python/paddle/distributed/launch/controllers/controller.py b/python/paddle/distributed/launch/controllers/controller.py index 9ff18d5b5d..73954adaab 100644 --- a/python/paddle/distributed/launch/controllers/controller.py +++ b/python/paddle/distributed/launch/controllers/controller.py @@ -31,7 +31,7 @@ class ControleMode: RPC = "rpc" -class ControllerBase(object): +class ControllerBase: def __init__(self, ctx): signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGABRT, self.signal_handler) diff --git a/python/paddle/distributed/launch/controllers/master.py b/python/paddle/distributed/launch/controllers/master.py index 9c3f0a8501..37c6f7a508 100644 --- a/python/paddle/distributed/launch/controllers/master.py +++ b/python/paddle/distributed/launch/controllers/master.py @@ -24,7 +24,7 @@ import random ETCD_PROTOCAL = 'etcd://' -class Master(object): +class Master: ''' Master is a distributed store design to exchange info among nodes ''' diff --git a/python/paddle/distributed/launch/controllers/watcher.py b/python/paddle/distributed/launch/controllers/watcher.py index a9c1f50966..c76a428d74 100644 --- a/python/paddle/distributed/launch/controllers/watcher.py +++ b/python/paddle/distributed/launch/controllers/watcher.py @@ -19,7 +19,7 @@ import os from threading import Thread -class Watcher(object): +class Watcher: def __init__(self, ctx): self.ctx = ctx diff --git a/python/paddle/distributed/launch/job/container.py b/python/paddle/distributed/launch/job/container.py index 6eb313ea57..c78c3323a8 100644 --- a/python/paddle/distributed/launch/job/container.py +++ b/python/paddle/distributed/launch/job/container.py @@ -20,7 +20,7 @@ import os import sys -class Container(object): +class Container: ''' TODO(kuizhiqing) A container can be run by process/thread or just a callable function ''' diff --git a/python/paddle/distributed/launch/job/job.py b/python/paddle/distributed/launch/job/job.py index f5c805e31b..261e6ee7f2 100644 --- a/python/paddle/distributed/launch/job/job.py +++ b/python/paddle/distributed/launch/job/job.py @@ -19,7 +19,7 @@ class JobMode: HETER = 'heter' -class Job(object): +class Job: def __init__(self, jid='default', mode=JobMode.COLLECTIVE, nnodes="1"): self._mode = mode self._id = jid diff --git a/python/paddle/distributed/launch/job/pod.py b/python/paddle/distributed/launch/job/pod.py index b65aad6e0f..a322bcdccf 100644 --- a/python/paddle/distributed/launch/job/pod.py +++ b/python/paddle/distributed/launch/job/pod.py @@ -20,7 +20,7 @@ import random import time -class PodSepc(object): +class PodSepc: def __init__(self): self._name = ''.join( random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6) diff --git a/python/paddle/distributed/launch/job/status.py b/python/paddle/distributed/launch/job/status.py index 88fd09bbf2..d8a346ea29 100644 --- a/python/paddle/distributed/launch/job/status.py +++ b/python/paddle/distributed/launch/job/status.py @@ -13,7 +13,7 @@ # limitations under the License. -class Status(object): +class Status: UNINIT = "uninit" READY = "ready" RUNNING = "running" diff --git a/python/paddle/distributed/launch/utils/kv_client.py b/python/paddle/distributed/launch/utils/kv_client.py index 8ed46053de..b60970382a 100644 --- a/python/paddle/distributed/launch/utils/kv_client.py +++ b/python/paddle/distributed/launch/utils/kv_client.py @@ -16,7 +16,7 @@ import requests import time -class KVClient(object): +class KVClient: def __init__(self, endpoint='localhost:2379'): self.endpoint = ( endpoint diff --git a/python/paddle/distributed/launch/utils/kv_server.py b/python/paddle/distributed/launch/utils/kv_server.py index d27836f1bf..90dcbcf937 100644 --- a/python/paddle/distributed/launch/utils/kv_server.py +++ b/python/paddle/distributed/launch/utils/kv_server.py @@ -67,7 +67,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): return -class KVServer(HTTPServer, object): +class KVServer(HTTPServer): def __init__(self, port): super().__init__(('', port), KVHandler) self.kv_lock = threading.Lock() diff --git a/python/paddle/distributed/launch/utils/nvsmi.py b/python/paddle/distributed/launch/utils/nvsmi.py index d1a14f1166..762870baa1 100644 --- a/python/paddle/distributed/launch/utils/nvsmi.py +++ b/python/paddle/distributed/launch/utils/nvsmi.py @@ -18,7 +18,7 @@ import json import shutil -class Info(object): +class Info: def __repr__(self): return str(self.__dict__) diff --git a/python/paddle/distributed/launch/utils/process_context.py b/python/paddle/distributed/launch/utils/process_context.py index 682a857f2e..3a8c528517 100644 --- a/python/paddle/distributed/launch/utils/process_context.py +++ b/python/paddle/distributed/launch/utils/process_context.py @@ -16,7 +16,7 @@ import subprocess import os, sys, signal, time -class ProcessContext(object): +class ProcessContext: def __init__( self, cmd, diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index 85f8ec2e1f..d15d38abee 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -54,7 +54,7 @@ from ..auto_parallel.utils import is_forward_op, is_backward_op, is_loss_op world_process_group = get_world_process_group() -class AMPState(object): +class AMPState: def __init__(self, block): self._block = block self._op_fp16_dict = ( diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index c2779d3557..ec3d799ee8 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -572,7 +572,7 @@ class DataParallelOptimizationPass(PassBase): self._logger.info("individual gradient {}".format(individual_grads)) -class GradientsGroup(object): +class GradientsGroup: def __init__(self, ops, max_group_size): self.max_group_size = max_group_size self.ops = ops diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index cdb5f42212..a952986c21 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -126,7 +126,7 @@ def _keep_fp32_output(op, out_name): return False -class FP16State(object): +class FP16State: def __init__( self, program, diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index d570bf9c3f..73432baa1d 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -143,7 +143,7 @@ def _is_about_global_norm( return rank_id in complete_param_ranks -class ClipHelper(object): +class ClipHelper: def __init__(self, params_grads, rank_id, block, dist_context): params, _ = zip(*params_grads) self.params = list(params) diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index 290a1741ca..a80af73c2b 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -850,7 +850,7 @@ def shard_parameters(params, group_size): return mapping -class ShardingInfo(object): +class ShardingInfo: def __init__(self, group, rank, params_grads): self.group = group self.params_grads = dict([(p.name, (p, g)) for p, g in params_grads]) diff --git a/python/paddle/distributed/ps/coordinator.py b/python/paddle/distributed/ps/coordinator.py index d73193845e..612502edad 100755 --- a/python/paddle/distributed/ps/coordinator.py +++ b/python/paddle/distributed/ps/coordinator.py @@ -353,7 +353,7 @@ class FLClient(FLClientBase): f.write(str(self.train_statical_info)) -class Coordinator(object): +class Coordinator: def __init__(self, ps_hosts): self._communicator = FLCommunicator(ps_hosts) self._client_selector = None diff --git a/python/paddle/distributed/ps/the_one_ps.py b/python/paddle/distributed/ps/the_one_ps.py index b4881959ee..3dbbae8647 100755 --- a/python/paddle/distributed/ps/the_one_ps.py +++ b/python/paddle/distributed/ps/the_one_ps.py @@ -914,7 +914,7 @@ class fsClient: proto.hadoop_bin = self.fs_client_param.hadoop_bin -class PsDescBuilder(object): +class PsDescBuilder: def __init__(self, context): self.context = context self.is_sync = context['is_sync'] diff --git a/python/paddle/distributed/ps/utils/ps_factory.py b/python/paddle/distributed/ps/utils/ps_factory.py index 0726fe15dc..0161352ec9 100755 --- a/python/paddle/distributed/ps/utils/ps_factory.py +++ b/python/paddle/distributed/ps/utils/ps_factory.py @@ -27,7 +27,7 @@ __all__ = [ ] -class PsProgramBuilderFactory(object): +class PsProgramBuilderFactory: def __init__(self): pass diff --git a/python/paddle/distributed/ps/utils/ps_program_builder.py b/python/paddle/distributed/ps/utils/ps_program_builder.py index a07e2ebe14..1831f7061b 100755 --- a/python/paddle/distributed/ps/utils/ps_program_builder.py +++ b/python/paddle/distributed/ps/utils/ps_program_builder.py @@ -19,7 +19,7 @@ from paddle.distributed.fleet.base.private_helper_function import ( from paddle.distributed.passes import new_pass -class PsProgramBuilder(object): +class PsProgramBuilder: def __init__(self, pass_ctx): self.pass_ctx = pass_ctx self.attrs = self.pass_ctx._attrs diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 578d664dc4..3b3a44d498 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -88,7 +88,7 @@ class DistributedMode: NU = 5 -class TrainerRuntimeConfig(object): +class TrainerRuntimeConfig: def __init__(self, valid_strategy): self.mode = None num_threads = os.getenv("CPU_NUM", "1") diff --git a/python/paddle/distributed/spawn.py b/python/paddle/distributed/spawn.py index 2df9118ac4..a371f5d559 100644 --- a/python/paddle/distributed/spawn.py +++ b/python/paddle/distributed/spawn.py @@ -43,7 +43,7 @@ from paddle.fluid.framework import set_flags __all__ = [] -class ParallelEnvArgs(object): +class ParallelEnvArgs: def __init__(self): # Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17.. self.cluster_node_ips = None @@ -412,7 +412,7 @@ def _func_wrapper(func, args, error_queue, return_queue, env_dict, backend): sys.exit(1) -class MultiprocessContext(object): +class MultiprocessContext: def __init__(self, processes, error_queues, return_queues): _py_supported_check() self.error_queues = error_queues diff --git a/python/paddle/distributed/utils/launch_utils.py b/python/paddle/distributed/utils/launch_utils.py index 975f5d4935..0c1ab76e55 100644 --- a/python/paddle/distributed/utils/launch_utils.py +++ b/python/paddle/distributed/utils/launch_utils.py @@ -99,7 +99,7 @@ def get_gpus(selected_gpus): return gpus -class Hdfs(object): +class Hdfs: def __init__(self): self.hdfs_ugi = None self.hdfs_name = None @@ -128,7 +128,7 @@ class Hdfs(object): return not self == n -class Cluster(object): +class Cluster: def __init__(self, hdfs): self.job_server = None self.pods = [] @@ -194,7 +194,7 @@ class Cluster(object): return None -class JobServer(object): +class JobServer: def __init__(self): self.endpoint = None @@ -208,7 +208,7 @@ class JobServer(object): return not self == j -class Trainer(object): +class Trainer: def __init__(self): self.gpus = [] self.endpoint = None @@ -239,7 +239,7 @@ class Trainer(object): return self.rank -class Pod(object): +class Pod: def __init__(self): self.rank = None self.id = None @@ -454,7 +454,7 @@ def _prepare_trainer_env(cluster, trainer, backend=None): return proc_env -class TrainerProc(object): +class TrainerProc: def __init__(self): self.proc = None self.log_fn = None diff --git a/python/paddle/distribution/constraint.py b/python/paddle/distribution/constraint.py index 46f919f385..092faf1693 100644 --- a/python/paddle/distribution/constraint.py +++ b/python/paddle/distribution/constraint.py @@ -14,7 +14,7 @@ import paddle -class Constraint(object): +class Constraint: """Constraint condition for random variable.""" def __call__(self, value): diff --git a/python/paddle/distribution/distribution.py b/python/paddle/distribution/distribution.py index 92dd306ce0..27febe07a3 100644 --- a/python/paddle/distribution/distribution.py +++ b/python/paddle/distribution/distribution.py @@ -33,7 +33,7 @@ from paddle.fluid.framework import ( from paddle.fluid.layers import tensor -class Distribution(object): +class Distribution: """ The abstract base class for probability distributions. Functions are implemented in specific distributions. diff --git a/python/paddle/distribution/kl.py b/python/paddle/distribution/kl.py index 4f95366391..cf88576298 100644 --- a/python/paddle/distribution/kl.py +++ b/python/paddle/distribution/kl.py @@ -127,7 +127,7 @@ def _dispatch(cls_p, cls_q): @functools.total_ordering -class _Compare(object): +class _Compare: def __init__(self, *classes): self.classes = classes diff --git a/python/paddle/distribution/transform.py b/python/paddle/distribution/transform.py index f13c224691..db55eca2d1 100644 --- a/python/paddle/distribution/transform.py +++ b/python/paddle/distribution/transform.py @@ -58,7 +58,7 @@ class Type(enum.Enum): return _type in (cls.BIJECTION, cls.INJECTION) -class Transform(object): +class Transform: r"""Base class for the transformations of random variables. ``Transform`` can be used to represent any differentiable and injective diff --git a/python/paddle/distribution/variable.py b/python/paddle/distribution/variable.py index e7aa1e1a68..99cafc5ea7 100644 --- a/python/paddle/distribution/variable.py +++ b/python/paddle/distribution/variable.py @@ -15,7 +15,7 @@ from paddle.distribution import constraint -class Variable(object): +class Variable: """Random variable of probability distribution. Args: diff --git a/python/paddle/fluid/average.py b/python/paddle/fluid/average.py index 2d83246df7..32db4ba0a9 100644 --- a/python/paddle/fluid/average.py +++ b/python/paddle/fluid/average.py @@ -39,7 +39,7 @@ def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) -class WeightedAverage(object): +class WeightedAverage: """ Calculate weighted average. diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index e73e2fe1ab..279ac48045 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -41,7 +41,7 @@ _logger = log_helper.get_logger( ) -class ProgramStats(object): +class ProgramStats: def __init__(self, block, ops): self.block = block self.ops = ops @@ -789,7 +789,7 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set): (set[core.OpDesc]): A set of OpDescs which should be pruned. """ - class Var(object): + class Var: def __init__(self, var_name): self.var_name = var_name self.gen_op = None @@ -804,7 +804,7 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set): assert isinstance(op, Op) self.pendding_ops.append(op) - class Op(object): + class Op: def __init__(self, op_desc): self.op_desc = op_desc self.inputs = [] diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 1ad98da957..68a2f8a0de 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -92,7 +92,7 @@ def _squared_l2_norm(x): return out -class BaseErrorClipAttr(object): +class BaseErrorClipAttr: def __str__(self): raise NotImplementedError() @@ -177,7 +177,7 @@ def error_clip_callback(block, context): error_clip._append_clip_op(block, grad_n) -class ClipGradBase(object): +class ClipGradBase: def __init__(self): super().__init__() diff --git a/python/paddle/fluid/communicator.py b/python/paddle/fluid/communicator.py index bd43dce837..55733c87f6 100755 --- a/python/paddle/fluid/communicator.py +++ b/python/paddle/fluid/communicator.py @@ -38,7 +38,7 @@ from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode __all__ = ['Communicator', 'FLCommunicator', 'LargeScaleKV'] -class Communicator(object): +class Communicator: def __init__(self, mode, kwargs=None, envs=None): """ Communicator is used for async distribute training in distribute_transpiler mode. @@ -246,7 +246,7 @@ class FLCommunicator(Communicator): ## only for coordinator return info_mp -class LargeScaleKV(object): +class LargeScaleKV: def __init__(self): self.scale_kv = core.LargeScaleKV() @@ -260,7 +260,7 @@ class LargeScaleKV(object): return self.scale_kv.size(varname) -class HeterClient(object): +class HeterClient: def __init__(self, endpoint, previous_endpoint, trainer_id): self.heter_client_ = core.HeterClient( endpoint, previous_endpoint, trainer_id diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 1adef41f86..47110b1e0b 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -105,7 +105,7 @@ def _should_broadcast_or_not_exists(program, var_name): return not is_distributed -class CompiledProgram(object): +class CompiledProgram: """ :api_attr: Static Graph @@ -567,7 +567,7 @@ class CompiledProgram(object): return place_list -class IpuDynamicPatcher(object): +class IpuDynamicPatcher: """ Patcher for IPU dynamic2static support. """ @@ -777,7 +777,7 @@ class IpuDynamicPatcher(object): setattr(module, key, attr) -class IpuStrategy(object): +class IpuStrategy: """ Help users precisely control the graph building in :code:`paddle.static.IpuCompiledProgram` . @@ -1237,7 +1237,7 @@ class IpuStrategy(object): return self.get_option('enable_fp16') -class IpuCompiledProgram(object): +class IpuCompiledProgram: """ The IpuCompiledProgram is used to transform a program to a ipu-target program, such as forward graph extraction, computing graph transformation, useless scale Ops clean, etc. diff --git a/python/paddle/fluid/contrib/decoder/beam_search_decoder.py b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py index 429feda47b..717d31c2fe 100644 --- a/python/paddle/fluid/contrib/decoder/beam_search_decoder.py +++ b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py @@ -37,7 +37,7 @@ class _DecoderType: BEAM_SEARCH = 2 -class InitState(object): +class InitState: """ The initial hidden state object. The state objects holds a variable, and may use it to initialize the hidden state cell of RNN. Usually used as input to @@ -98,7 +98,7 @@ class InitState(object): return self._need_reorder -class _MemoryState(object): +class _MemoryState: def __init__(self, state_name, rnn_obj, init_state): self._state_name = state_name # each is a rnn.memory self._rnn_obj = rnn_obj @@ -113,7 +113,7 @@ class _MemoryState(object): self._rnn_obj.update_memory(self._state_mem, state) -class _ArrayState(object): +class _ArrayState: def __init__(self, state_name, block, init_state): self._state_name = state_name self._block = block @@ -161,7 +161,7 @@ class _ArrayState(object): layers.array_write(state, array=self._state_array, i=self._counter) -class StateCell(object): +class StateCell: """ The state cell class stores the hidden state of the RNN cell. A typical RNN cell has one or more hidden states, and one or more step inputs. This class @@ -401,7 +401,7 @@ class StateCell(object): return self._cur_states[self._out_state] -class TrainingDecoder(object): +class TrainingDecoder: """ A decoder that can only be used for training. The decoder could be initialized with a `StateCell` object. The computation within the RNN cell @@ -547,7 +547,7 @@ class TrainingDecoder(object): ) -class BeamSearchDecoder(object): +class BeamSearchDecoder: """ A beam search decoder that can be used for inference. The decoder should be initialized with a `StateCell` object. The decode process can be defined diff --git a/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py b/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py index e85f3e45a2..53a010c23c 100644 --- a/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py +++ b/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py @@ -17,7 +17,7 @@ from paddle.fluid import framework as framework __all__ = ["extend_with_decoupled_weight_decay"] -class DecoupledWeightDecay(object): +class DecoupledWeightDecay: def __init__(self, coeff=0.0, apply_decay_param_fun=None, **kwargs): if not isinstance(coeff, float) and not isinstance( coeff, framework.Variable diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py index 33694f4d12..180e28ddab 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py @@ -24,7 +24,7 @@ from ..fp16_lists import ( __all__ = ["AutoMixedPrecisionListsBF16"] -class AutoMixedPrecisionListsBF16(object): +class AutoMixedPrecisionListsBF16: """ AutoMixedPrecisionListsBF16 is a class for fp32/bf16 op types list. The lists are used for an algorithm which determines op's execution mode (fp32 or bf16).It can update pre-defined diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py b/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py index 9110686582..dd1b07bfff 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py @@ -31,7 +31,7 @@ import warnings __all__ = ["decorate_bf16"] -class OptimizerWithMixedPrecision(object): +class OptimizerWithMixedPrecision: """ Optimizer with mixed-precision (MP) training. This is a wrapper of a common optimizer, plus the support of mixed-precision pre-training. The object diff --git a/python/paddle/fluid/contrib/mixed_precision/decorator.py b/python/paddle/fluid/contrib/mixed_precision/decorator.py index 75554ff9c8..6b9f3f6eaa 100644 --- a/python/paddle/fluid/contrib/mixed_precision/decorator.py +++ b/python/paddle/fluid/contrib/mixed_precision/decorator.py @@ -34,7 +34,7 @@ import paddle __all__ = ["decorate"] -class OptimizerWithMixedPrecision(object): +class OptimizerWithMixedPrecision: """ Optimizer with mixed-precision (MP) training. This is a wrapper of a common optimizer, plus the support of mixed-precision pre-training. The object diff --git a/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py b/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py index ef8f222bac..101af59861 100644 --- a/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py +++ b/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py @@ -26,7 +26,7 @@ _extra_unsupported_fp16_list = { } -class AutoMixedPrecisionLists(object): +class AutoMixedPrecisionLists: """ AutoMixedPrecisionLists is a class for black/white list. It can update pre-defined black list and white list according to users' custom black diff --git a/python/paddle/fluid/contrib/quantize/quantize_transpiler.py b/python/paddle/fluid/contrib/quantize/quantize_transpiler.py index 6e225fdbcc..edd07c0ba9 100644 --- a/python/paddle/fluid/contrib/quantize/quantize_transpiler.py +++ b/python/paddle/fluid/contrib/quantize/quantize_transpiler.py @@ -81,7 +81,7 @@ def quant(x, scale, num_bits): return y -class QuantizeTranspiler(object): +class QuantizeTranspiler: def __init__( self, weight_bits=8, diff --git a/python/paddle/fluid/contrib/slim/quantization/adaround.py b/python/paddle/fluid/contrib/slim/quantization/adaround.py index 9dd00ddadc..278994ef31 100644 --- a/python/paddle/fluid/contrib/slim/quantization/adaround.py +++ b/python/paddle/fluid/contrib/slim/quantization/adaround.py @@ -51,7 +51,7 @@ def compute_soft_rounding_np(alpha_v): ) -class AdaRoundLoss(object): +class AdaRoundLoss: def __init__(self, reg_param=0.01, default_beta_range=(20, 2)): self.default_reg_param = reg_param self.default_beta_range = default_beta_range @@ -111,7 +111,7 @@ class AdaRoundLoss(object): return beta -class AdaRound(object): +class AdaRound: def __init__( self, scale, diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py index 79f0a2bf35..358e08cf58 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py @@ -36,7 +36,7 @@ _logger = get_logger( ) -class ImperativePTQ(object): +class ImperativePTQ: """ Static post training quantization. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py index b02de6cee4..88eb998c0e 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py @@ -22,7 +22,7 @@ from .ptq_quantizer import * __all__ = ['PTQConfig', 'default_ptq_config'] -class PTQConfig(object): +class PTQConfig: """ The PTQ config shows how to quantize the inputs and outputs. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py index cb56bfcb47..d8df91f78f 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py @@ -17,7 +17,7 @@ import paddle __all__ = ['PTQRegistry'] -class LayerInfo(object): +class LayerInfo: """ Store the argnames of the inputs and outputs. """ @@ -63,7 +63,7 @@ QUANT_LAYERS_INFO = [ SIMULATED_LAYERS = [paddle.nn.Conv2D, paddle.nn.Linear] -class PTQRegistry(object): +class PTQRegistry: """ Register the supported layers for PTQ and provide layers info. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py index aa768ab974..9e64ecd96b 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py @@ -57,7 +57,7 @@ def lazy_import_fleet(layer_name_map, fake_quant_input_layers): return layer_name_map, fake_quant_input_layers -class ImperativeQuantAware(object): +class ImperativeQuantAware: """ Applying quantization aware training (QAT) to the dgraph model. """ @@ -304,7 +304,7 @@ class ImperativeQuantAware(object): ) -class ImperativeQuantizeInputs(object): +class ImperativeQuantizeInputs: """ Based on the input params, add the quant_dequant computational logic both for activation inputs and weight inputs. @@ -448,7 +448,7 @@ class ImperativeQuantizeInputs(object): return quant_layers.__dict__[quant_layer_name](layer, **self._kwargs) -class ImperativeQuantizeOutputs(object): +class ImperativeQuantizeOutputs: """ Calculate the output scales for target layers. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py index 3db16060e0..68df2a8adc 100644 --- a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py +++ b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py @@ -112,7 +112,7 @@ def _apply_pass( return graph -class PostTrainingQuantization(object): +class PostTrainingQuantization: """ Utilizing post training quantization methon to quantize the FP32 model, and it uses calibrate data to get the quantization information for all @@ -1481,7 +1481,7 @@ class PostTrainingQuantizationProgram(PostTrainingQuantization): self._fetch_list = fetch_list -class WeightQuantization(object): +class WeightQuantization: _supported_quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul'] _supported_weight_quantize_type = ['channel_wise_abs_max', 'abs_max'] diff --git a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py index fcc2daff20..a617bac359 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py @@ -22,7 +22,7 @@ __all__ = ['Quant2Int8MkldnnPass'] OpRole = core.op_proto_and_checker_maker.OpRole -class Quant2Int8MkldnnPass(object): +class Quant2Int8MkldnnPass: """ Transform a quant model IrGraph into MKL-DNN supported INT8 IrGraph. The pass consists of the following transformations: diff --git a/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py index 73c611db01..25278fc691 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py @@ -21,7 +21,7 @@ from ....framework import _get_paddle_place __all__ = ['QuantInt8MkldnnPass'] -class QuantInt8MkldnnPass(object): +class QuantInt8MkldnnPass: """ Convert QuantizationFreezePass generated IrGraph to MKL-DNN supported INT8 IrGraph. Following transformations did in this pass: diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 020bdcec48..f0caabd6f4 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -107,7 +107,7 @@ def _check_grandchild_op_node(op_node, grandchild_op_name): return False -class QuantizationTransformPass(object): +class QuantizationTransformPass: """ Quantize the ops that have weights. Add quant and dequant ops for the quantized ops's inputs. @@ -1068,7 +1068,7 @@ class QuantizationTransformPass(object): return is_skip -class QuantizationFreezePass(object): +class QuantizationFreezePass: def __init__( self, scope, @@ -1444,7 +1444,7 @@ class QuantizationFreezePass(object): ) -class ConvertToInt8Pass(object): +class ConvertToInt8Pass: def __init__(self, scope, place, quantizable_op_type=None): """ Convert the weights into int8_t type. @@ -1537,7 +1537,7 @@ class ConvertToInt8Pass(object): graph.safe_remove_nodes(all_unused_vars) -class TransformForMobilePass(object): +class TransformForMobilePass: def __init__(self): """ This pass is used to convert the frozen graph for paddle-mobile execution. @@ -1579,7 +1579,7 @@ class TransformForMobilePass(object): return graph -class OutScaleForTrainingPass(object): +class OutScaleForTrainingPass: def __init__( self, scope=None, @@ -1745,7 +1745,7 @@ class OutScaleForTrainingPass(object): return "%s@scale" % (var_name) -class OutScaleForInferencePass(object): +class OutScaleForInferencePass: def __init__(self, scope=None): """ This pass is used for setting output scales of some operators. @@ -1815,7 +1815,7 @@ class OutScaleForInferencePass(object): return "%s@scale" % (var_name) -class AddQuantDequantPass(object): +class AddQuantDequantPass: """ Quantize the ops that do not have weights, and add quant_dequant op for the quantized ops's inputs. @@ -2087,7 +2087,7 @@ class AddQuantDequantPass(object): return quant_var_node, scale_out_node -class InsertQuantizeLinear(object): +class InsertQuantizeLinear: """ Insert quantize_linear and dequantize_linear op before ops. @@ -2664,7 +2664,7 @@ class QuantizationTransformPassV2(QuantizationTransformPass): return graph -class AddQuantDequantPassV2(object): +class AddQuantDequantPassV2: """ Quantize the ops that do not have weights, and add quant_linear and dequant_linear op for the quantized ops's inputs. It is used in the new format of quantization. @@ -2850,7 +2850,7 @@ class AddQuantDequantPassV2(object): return graph -class ReplaceFakeQuantDequantPass(object): +class ReplaceFakeQuantDequantPass: """ replace quant-dequant ops with quantize_linear and dequantize_linear ops. """ @@ -2987,7 +2987,7 @@ class ReplaceFakeQuantDequantPass(object): return "%s@zero_point" % (var_name) -class QuantWeightPass(object): +class QuantWeightPass: """ quant weights and remove weights input quantize_linear node. for example: `weight -> quant -> dequant -> conv2d` will be frozen into `weight -> dequant -> conv2d`, @@ -3129,7 +3129,7 @@ class QuantWeightPass(object): tensor.set(array, self._place) -class AddQuantDequantForInferencePass(object): +class AddQuantDequantForInferencePass: """ When export quant model, it will traverse to find the output of each op, and then insert the quant/dequant op after it. """ diff --git a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py index dbc6277a3b..64bb1a6c45 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py @@ -49,7 +49,7 @@ def load_variable_data(scope, var_name): return np.array(var_node.get_tensor()) -class QuantizeTranspilerV2(object): +class QuantizeTranspilerV2: def __init__( self, weight_bits=8, diff --git a/python/paddle/fluid/contrib/slim/quantization/utils.py b/python/paddle/fluid/contrib/slim/quantization/utils.py index 9862772c64..5f5fc99b44 100644 --- a/python/paddle/fluid/contrib/slim/quantization/utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/utils.py @@ -472,7 +472,7 @@ def l2_loss(gt, pred): return ((gt - pred) ** 2).mean() -class tqdm(object): +class tqdm: def __init__(self, total, bar_format='Loading|{bar}', ncols=80): self.total = total self.bar_format = bar_format diff --git a/python/paddle/fluid/contrib/sparsity/asp.py b/python/paddle/fluid/contrib/sparsity/asp.py index 11fc3ca259..d2165def6f 100644 --- a/python/paddle/fluid/contrib/sparsity/asp.py +++ b/python/paddle/fluid/contrib/sparsity/asp.py @@ -478,7 +478,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): ) -class ProgramASPInfo(object): +class ProgramASPInfo: r""" ProgramASPInfo is a container to keep ASP relevant information of Pragrom. It contains three inner-variables: 1. __mask_vars (Dictionary): Key is parameter's name and vaule is its corresponding sparse mask Variable object, which is created by `ASPHelper.create_mask_variables`. @@ -516,7 +516,7 @@ class ProgramASPInfo(object): return self.__excluded_layers -class ASPHelper(object): +class ASPHelper: r""" ASPHelper is a collection of Auto SParsity (ASP) functions to enable @@ -917,7 +917,7 @@ class ASPHelper(object): ) -class OptimizerWithSparsityGuarantee(object): +class OptimizerWithSparsityGuarantee: r""" OptimizerWithSparsityGuarantee is a wrapper to decorate `minimize` function of given optimizer by `_minimize` of ASPHelper. The decorated `minimize` function would do three things (exactly same as `ASPHelper._minimize`): diff --git a/python/paddle/fluid/data_feed_desc.py b/python/paddle/fluid/data_feed_desc.py index 7a58c5ef5e..9e6257d96e 100644 --- a/python/paddle/fluid/data_feed_desc.py +++ b/python/paddle/fluid/data_feed_desc.py @@ -18,7 +18,7 @@ from google.protobuf import text_format __all__ = ['DataFeedDesc'] -class DataFeedDesc(object): +class DataFeedDesc: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index e84d41e562..1f900eff0d 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -221,7 +221,7 @@ def check_shape( check_dtype(shape.dtype, 'shape', expected_tensor_dtype, op_name) -class DataToLoDTensorConverter(object): +class DataToLoDTensorConverter: def __init__(self, place, lod_level, shape, dtype): self.place = place self.lod_level = lod_level @@ -280,7 +280,7 @@ class DataToLoDTensorConverter(object): return t -class BatchedTensorProvider(object): +class BatchedTensorProvider: def __init__(self, feed_list, place, batch_size, generator, drop_last): self.place = place self.batch_size = batch_size @@ -319,7 +319,7 @@ class BatchedTensorProvider(object): [c._reset() for c in self.converters] -class DataFeeder(object): +class DataFeeder: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/dataloader/batch_sampler.py b/python/paddle/fluid/dataloader/batch_sampler.py index 624754ae28..5ac1c79d0c 100644 --- a/python/paddle/fluid/dataloader/batch_sampler.py +++ b/python/paddle/fluid/dataloader/batch_sampler.py @@ -159,7 +159,7 @@ class BatchSampler(Sampler): return num_samples // self.batch_size -class _InfiniteIterableSampler(object): +class _InfiniteIterableSampler: def __init__(self, dataset, batch_size=1): assert isinstance( dataset, IterableDataset diff --git a/python/paddle/fluid/dataloader/dataloader_iter.py b/python/paddle/fluid/dataloader/dataloader_iter.py index 06464d528d..c89c3e079f 100644 --- a/python/paddle/fluid/dataloader/dataloader_iter.py +++ b/python/paddle/fluid/dataloader/dataloader_iter.py @@ -90,7 +90,7 @@ def _clear_loader(): CleanupFuncRegistrar.register(_clear_loader) -class _DataLoaderIterBase(object): +class _DataLoaderIterBase: """ Iterator implement of DataLoader, will load and feed mini-batch data by setting in given dataloader. diff --git a/python/paddle/fluid/dataloader/dataset.py b/python/paddle/fluid/dataloader/dataset.py index 5fe52196cd..04e03ec844 100755 --- a/python/paddle/fluid/dataloader/dataset.py +++ b/python/paddle/fluid/dataloader/dataset.py @@ -26,7 +26,7 @@ __all__ = [ ] -class Dataset(object): +class Dataset: """ An abstract class to encapsulate methods and behaviors of datasets. diff --git a/python/paddle/fluid/dataloader/fetcher.py b/python/paddle/fluid/dataloader/fetcher.py index 8d5a908729..b097a315c0 100644 --- a/python/paddle/fluid/dataloader/fetcher.py +++ b/python/paddle/fluid/dataloader/fetcher.py @@ -19,7 +19,7 @@ from collections.abc import Sequence, Mapping _WARNING_TO_LOG = True -class _DatasetFetcher(object): +class _DatasetFetcher: def __init__(self, dataset, auto_collate_batch, collate_fn, drop_last): self.dataset = dataset self.auto_collate_batch = auto_collate_batch diff --git a/python/paddle/fluid/dataloader/sampler.py b/python/paddle/fluid/dataloader/sampler.py index 3626ed63e5..afd8fa7da0 100644 --- a/python/paddle/fluid/dataloader/sampler.py +++ b/python/paddle/fluid/dataloader/sampler.py @@ -23,7 +23,7 @@ __all__ = [ ] -class Sampler(object): +class Sampler: """ An abstract class to encapsulate methods and behaviors of samplers. diff --git a/python/paddle/fluid/dataloader/worker.py b/python/paddle/fluid/dataloader/worker.py index 6016c04054..155208791e 100644 --- a/python/paddle/fluid/dataloader/worker.py +++ b/python/paddle/fluid/dataloader/worker.py @@ -34,16 +34,16 @@ import queue __all__ = ['get_worker_info'] -class _IterableDatasetStopIteration(object): +class _IterableDatasetStopIteration: def __init__(self, worker_id): self.worker_id = worker_id -class _ResumeIteration(object): +class _ResumeIteration: pass -class _DatasetKind(object): +class _DatasetKind: MAP = 0 ITER = 1 @@ -63,7 +63,7 @@ class _DatasetKind(object): raise NotImplementedError("unknown Dataset kind {}".format(kind)) -class ParentWatchDog(object): +class ParentWatchDog: def __init__(self): self._parent_pid = os.getppid() self._parent_alive = True @@ -145,7 +145,7 @@ def get_worker_info(): return _worker_info -class WorkerInfo(object): +class WorkerInfo: __initialized = False def __init__(self, **kwargs): @@ -163,7 +163,7 @@ class WorkerInfo(object): return super().__setattr__(key, val) -class _WorkerException(object): +class _WorkerException: def __init__(self, worker_id, exc_info=None): self.worker_id = worker_id exc_info = exc_info or sys.exc_info() diff --git a/python/paddle/fluid/dataset.py b/python/paddle/fluid/dataset.py index 330ab0635d..b21550bcc3 100644 --- a/python/paddle/fluid/dataset.py +++ b/python/paddle/fluid/dataset.py @@ -21,7 +21,7 @@ from ..utils import deprecated __all__ = ['DatasetFactory', 'InMemoryDataset', 'QueueDataset'] -class DatasetFactory(object): +class DatasetFactory: """ DatasetFactory is a factory which create dataset by its name, you can create "QueueDataset" or "InMemoryDataset", or "FileInstantDataset", @@ -64,7 +64,7 @@ class DatasetFactory(object): ) -class DatasetBase(object): +class DatasetBase: """Base dataset class.""" def __init__(self): diff --git a/python/paddle/fluid/device_worker.py b/python/paddle/fluid/device_worker.py index 3cf41b3597..6b3359ac61 100644 --- a/python/paddle/fluid/device_worker.py +++ b/python/paddle/fluid/device_worker.py @@ -23,7 +23,7 @@ __all__ = [ ] -class DeviceWorker(object): +class DeviceWorker: """ DeviceWorker is an abstract class, which generates worker desc. This class is an inner class that we do computation logics within @@ -695,7 +695,7 @@ class HeterSection(DeviceWorker): ) -class DeviceWorkerFactory(object): +class DeviceWorkerFactory: def _create_device_worker(self, worker_type): classname = worker_type.capitalize() return globals()[classname]() diff --git a/python/paddle/fluid/distributed/downpour.py b/python/paddle/fluid/distributed/downpour.py index c710b7337c..1f7e146c3f 100644 --- a/python/paddle/fluid/distributed/downpour.py +++ b/python/paddle/fluid/distributed/downpour.py @@ -25,7 +25,7 @@ from paddle.fluid.distribute_lookup_table import ( from google.protobuf import text_format -class DownpourSGD(object): +class DownpourSGD: r""" Distributed optimizer of downpour stochastic gradient descent Standard implementation of Google's Downpour SGD diff --git a/python/paddle/fluid/distributed/fleet.py b/python/paddle/fluid/distributed/fleet.py index 08b46ce047..552dc91598 100644 --- a/python/paddle/fluid/distributed/fleet.py +++ b/python/paddle/fluid/distributed/fleet.py @@ -18,7 +18,7 @@ from google.protobuf import text_format __all__ = ['Fleet'] -class Fleet(object): +class Fleet: """ """ def __init__(self): diff --git a/python/paddle/fluid/distributed/helper.py b/python/paddle/fluid/distributed/helper.py index 4a16437333..93cec48408 100644 --- a/python/paddle/fluid/distributed/helper.py +++ b/python/paddle/fluid/distributed/helper.py @@ -13,7 +13,7 @@ # limitations under the License. -class FileSystem(object): +class FileSystem: """ A file system that support hadoop client desc. @@ -54,7 +54,7 @@ class FileSystem(object): return self.fs_client -class MPIHelper(object): +class MPIHelper: """ MPIHelper is a wrapper of mpi4py, support get_rank get_size etc. Args: diff --git a/python/paddle/fluid/distributed/node.py b/python/paddle/fluid/distributed/node.py index 793787d0fd..19fa5ec20b 100644 --- a/python/paddle/fluid/distributed/node.py +++ b/python/paddle/fluid/distributed/node.py @@ -17,7 +17,7 @@ import ps_pb2 as pslib from functools import reduce -class Server(object): +class Server: """ A Server basic class. """ @@ -26,7 +26,7 @@ class Server(object): pass -class Worker(object): +class Worker: """ A Worker basic class. """ diff --git a/python/paddle/fluid/distributed/ps_instance.py b/python/paddle/fluid/distributed/ps_instance.py index 370e1b1964..91388b97b0 100644 --- a/python/paddle/fluid/distributed/ps_instance.py +++ b/python/paddle/fluid/distributed/ps_instance.py @@ -14,7 +14,7 @@ from .helper import MPIHelper -class PaddlePSInstance(object): +class PaddlePSInstance: """ PaddlePSInstance class is used to generate A instance of server or worker Args: diff --git a/python/paddle/fluid/dygraph/amp/auto_cast.py b/python/paddle/fluid/dygraph/amp/auto_cast.py index bb1acc7c09..721d10d76b 100644 --- a/python/paddle/fluid/dygraph/amp/auto_cast.py +++ b/python/paddle/fluid/dygraph/amp/auto_cast.py @@ -463,7 +463,7 @@ def amp_guard( tracer._amp_dtype = original_amp_dtype -class StateDictHook(object): +class StateDictHook: def __init__(self, save_dtype): self._save_dtype = save_dtype diff --git a/python/paddle/fluid/dygraph/amp/loss_scaler.py b/python/paddle/fluid/dygraph/amp/loss_scaler.py index c59588e9d0..0985237f51 100644 --- a/python/paddle/fluid/dygraph/amp/loss_scaler.py +++ b/python/paddle/fluid/dygraph/amp/loss_scaler.py @@ -41,7 +41,7 @@ def _refresh_optimizer_state(): return {"state": OptimizerState.INIT} -class AmpScaler(object): +class AmpScaler: """ :api_attr: imperative diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py index 044f9055e7..5f082acca5 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py @@ -200,7 +200,7 @@ class ForLoopTuplePreTransformer(BaseTransformer): return [assign_node] -class ForNodeVisitor(object): +class ForNodeVisitor: """ This class parses python for statement, get transformed 3 statement components of for node three key statements: diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py b/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py index 2ac1d6403c..72b6cc9883 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py @@ -60,7 +60,7 @@ translator_logger = TranslatorLogger() CONVERSION_OPTIONS = "An attribute for a function that indicates conversion flags of the function in dynamic-to-static." -class ConversionOptions(object): +class ConversionOptions: """ A container for conversion flags of a function in dynamic-to-static. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/error.py b/python/paddle/fluid/dygraph/dygraph_to_static/error.py index 7e05b5db89..f4a6610080 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/error.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/error.py @@ -153,7 +153,7 @@ class TraceBackFrameRange(OriginInfo): return msg + '\n'.join(self.source_code) -class SuggestionDict(object): +class SuggestionDict: def __init__(self): # {(keywords): (suggestions)} self.suggestion_dict = { @@ -174,7 +174,7 @@ class Dy2StKeyError(Exception): pass -class ErrorData(object): +class ErrorData: """ Error data attached to an exception which is raised in un-transformed code. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py b/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py index 039df7e84e..cc77e05dad 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py @@ -30,7 +30,7 @@ from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code from paddle.fluid.dygraph.io import TranslatedLayer -class FunctionSpec(object): +class FunctionSpec: """ Wrapper class for a function for class method. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py b/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py index 6f73a23316..7b004964a4 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py @@ -36,7 +36,7 @@ def synchronized(func): return wrapper -class TranslatorLogger(object): +class TranslatorLogger: """ class for Logging and debugging during the tranformation from dygraph to static graph. The object of this class is a singleton. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py index 4bfc73a325..d6ff463a70 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py @@ -27,7 +27,7 @@ except: from collections import Sequence -class Location(object): +class Location: """ Location information of source code. """ @@ -53,7 +53,7 @@ class Location(object): return (self.filepath, self.lineno) -class OriginInfo(object): +class OriginInfo: """ Original information of source code. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py index 6eb38302a5..bc371cc99c 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py @@ -45,7 +45,7 @@ from paddle.fluid.dygraph.amp.auto_cast import ( from paddle import _legacy_C_ops -class NestSequence(object): +class NestSequence: """ A wrapper class that easily to flatten and restore the nest structure of given sequence. @@ -108,7 +108,7 @@ class NestSequence(object): return self.__input_list[item] -class LazyInitialized(object): +class LazyInitialized: """ Descriptor to implement lazy initialization of property. """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py index be7dc08403..4432425631 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py @@ -68,7 +68,7 @@ __all__ = ['ProgramTranslator', 'convert_to_static'] MAX_TRACED_PROGRAM_COUNT = 10 -class FunctionCache(object): +class FunctionCache: """ Caches the transformed functions to avoid redundant conversions of the same function. """ @@ -158,7 +158,7 @@ def convert_to_static(function): return static_func -class CacheKey(object): +class CacheKey: """ Cached key for ProgramCache. """ @@ -280,7 +280,7 @@ def unwrap_decorators(func): return decorators, cur -class StaticFunction(object): +class StaticFunction: """ Wrapper class to Manage program conversion of decorated function. @@ -847,7 +847,7 @@ def _verify_init_in_dynamic_mode(class_instance): ) -class HookHelper(object): +class HookHelper: """ Only For converting pre/post hooks operation in outermost layer while jit.save. Because hooks in sublayer have been processed automatically. @@ -901,7 +901,7 @@ class HookHelper(object): return outputs -class ConcreteProgram(object): +class ConcreteProgram: __slots__ = [ 'inputs', @@ -1037,7 +1037,7 @@ def _extract_indeed_params_buffers(class_instance): return params + buffers -class ProgramCache(object): +class ProgramCache: """ Wrapper class for the program functions defined by dygraph function. """ @@ -1119,7 +1119,7 @@ def synchronized(func): return lock_func -class ProgramTranslator(object): +class ProgramTranslator: """ Class to translate dygraph function into static graph function. The object of this class is a singleton. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py b/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py index bf07523e12..5b6c3d1261 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py @@ -25,7 +25,7 @@ from .utils import ( __all__ = ['AstNodeWrapper', 'NodeVarType', 'StaticAnalysisVisitor'] -class NodeVarType(object): +class NodeVarType: """ Enum class of python variable types. We have to know some variable types during compile time to transfer AST. For example, a string variable and a @@ -112,7 +112,7 @@ class NodeVarType(object): return NodeVarType.UNKNOWN -class AstNodeWrapper(object): +class AstNodeWrapper: """ Wrapper for python gast.node. We need a node wrapper because gast.node doesn't store all required information when we are transforming AST. @@ -127,7 +127,7 @@ class AstNodeWrapper(object): self.node_var_type = {NodeVarType.UNKNOWN} -class AstVarScope(object): +class AstVarScope: """ AstVarScope is a class holding the map from current scope variable to its type. @@ -181,7 +181,7 @@ class AstVarScope(object): return self.parent_scope.get_var_type(var_name) -class AstVarEnv(object): +class AstVarEnv: """ A class maintains scopes and mapping from name strings to type. """ @@ -231,7 +231,7 @@ class AstVarEnv(object): return cur_scope_dict -class StaticAnalysisVisitor(object): +class StaticAnalysisVisitor: """ A class that does static analysis """ diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index 383fb1eff5..eca171cacd 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -325,7 +325,7 @@ def _change_is_test_status(program_desc, is_test): op._set_attr('is_test', is_test) -class _ProgramHolder(object): +class _ProgramHolder: """ Holds the execution information of a Program. diff --git a/python/paddle/fluid/dygraph/jit.py b/python/paddle/fluid/dygraph/jit.py index 9936b4a23a..c359c6d152 100644 --- a/python/paddle/fluid/dygraph/jit.py +++ b/python/paddle/fluid/dygraph/jit.py @@ -334,7 +334,7 @@ def not_to_static(func=None): return func -class _SaveLoadConfig(object): +class _SaveLoadConfig: def __init__(self): self._output_spec = None self._model_filename = None @@ -621,7 +621,7 @@ _save_pre_hooks_lock = threading.Lock() _save_pre_hooks = [] -class HookRemoveHelper(object): +class HookRemoveHelper: """A HookRemoveHelper that can be used to remove hook.""" def __init__(self, hook): @@ -1496,7 +1496,7 @@ def _trace( return original_outputs, program, feed_names, fetch_names, parameters -class TracedLayer(object): +class TracedLayer: """ :api_attr: imperative diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 5187f9ae72..752694b614 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -89,7 +89,7 @@ def _addindent(string, indent): return s1[0] + '\n' + '\n'.join(s2) -class HookRemoveHelper(object): +class HookRemoveHelper: """A HookRemoveHelper that can be used to remove hook.""" next_hook_id = 0 @@ -105,7 +105,7 @@ class HookRemoveHelper(object): del hooks[self._hook_id] -class Layer(object): +class Layer: """ Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on. diff --git a/python/paddle/fluid/dygraph/learning_rate_scheduler.py b/python/paddle/fluid/dygraph/learning_rate_scheduler.py index 3ca63f505c..3afe92cbc6 100644 --- a/python/paddle/fluid/dygraph/learning_rate_scheduler.py +++ b/python/paddle/fluid/dygraph/learning_rate_scheduler.py @@ -35,7 +35,7 @@ __all__ = [ ] -class LearningRateDecay(object): +class LearningRateDecay: """ Base class of learning rate decay diff --git a/python/paddle/fluid/dygraph/parallel.py b/python/paddle/fluid/dygraph/parallel.py index a317cf676e..85c95c6b2b 100644 --- a/python/paddle/fluid/dygraph/parallel.py +++ b/python/paddle/fluid/dygraph/parallel.py @@ -82,7 +82,7 @@ def prepare_context(strategy=None): return strategy -class ParallelEnv(object): +class ParallelEnv: """ .. note:: This API is not recommended, if you need to get rank and world_size, diff --git a/python/paddle/fluid/dygraph/static_runner.py b/python/paddle/fluid/dygraph/static_runner.py index afa2091caf..c0a61fb3c2 100644 --- a/python/paddle/fluid/dygraph/static_runner.py +++ b/python/paddle/fluid/dygraph/static_runner.py @@ -18,7 +18,7 @@ from paddle.fluid.dygraph.io import TranslatedLayer # NOTE: This class will be deprecated later. # It is kept here because PaddleHub is already using this API. -class StaticModelRunner(object): +class StaticModelRunner: """ A Dynamic graph Layer for loading inference program and related parameters, and then performing fine-tune training or inference. diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 51fb34a24e..7c7aa964cf 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -44,7 +44,7 @@ from paddle import _C_ops, _legacy_C_ops _grad_scalar = None -class TensorHookRemoveHelper(object): +class TensorHookRemoveHelper: """ A helper class that for removing Tensor gradient's hook. NOTE(wuweilong):the operation weakref.ref(tensor) will cause some unexpected errors in eager mode. diff --git a/python/paddle/fluid/entry_attr.py b/python/paddle/fluid/entry_attr.py index a86f81e6dd..29ab37a3f3 100644 --- a/python/paddle/fluid/entry_attr.py +++ b/python/paddle/fluid/entry_attr.py @@ -15,7 +15,7 @@ __all__ = ['ProbabilityEntry', 'CountFilterEntry'] -class EntryAttr(object): +class EntryAttr: """ Examples: .. code-block:: python diff --git a/python/paddle/fluid/evaluator.py b/python/paddle/fluid/evaluator.py index a9d0c3c059..007337b9d0 100644 --- a/python/paddle/fluid/evaluator.py +++ b/python/paddle/fluid/evaluator.py @@ -41,7 +41,7 @@ def _clone_var_(block, var): ) -class Evaluator(object): +class Evaluator: """ Warning: better to use the fluid.metrics.* things, more flexible support via pure Python and Operator, and decoupled diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 2dbf2d5ced..41440cfdb0 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -620,7 +620,7 @@ def _as_lodtensor(data, place, dtype=None): return tensor -class FetchHandler(object): +class FetchHandler: def __init__(self, var_dict=None, period_secs=60): assert var_dict is not None self.var_dict = var_dict @@ -648,7 +648,7 @@ handler = FetchHandlerExample(var_dict=var_dict) ) -class _StandaloneExecutor(object): +class _StandaloneExecutor: def __init__(self, place, main_program, scope): self._place = core.Place() self._place.set_place(place) @@ -736,8 +736,8 @@ class _StandaloneExecutor(object): return res -class _ExecutorCache(object): - class _CachedData(object): +class _ExecutorCache: + class _CachedData: def __init__( self, program, @@ -908,7 +908,7 @@ class _ExecutorCache(object): return new_program, new_exe -class Executor(object): +class Executor: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index e68286844e..9f30a4e08a 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1077,7 +1077,7 @@ def mlu_places(device_ids=None): return [core.MLUPlace(dev_id) for dev_id in device_ids] -class NameScope(object): +class NameScope: def __init__(self, name="", parent=None): self._children = dict() self._name = name @@ -2655,7 +2655,7 @@ def get_all_op_protos(): return ret_values -class OpProtoHolder(object): +class OpProtoHolder: """ A global variable to hold all OpProtos from C++ as a map """ @@ -2709,7 +2709,7 @@ class OpProtoHolder(object): } -class Operator(object): +class Operator: """ In Fluid, all the operation are represented by Operator, and Operator is regarded as a build in an instruction of a Block. Users can use the @@ -3550,7 +3550,7 @@ class Operator(object): self.desc.dist_attr = dist_attr -class Block(object): +class Block: """ In Fluid, a Program is consistence of multi-Block, and Block stores VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name. @@ -4342,7 +4342,7 @@ def _apply_pass( return attrs -class IrNode(object): +class IrNode: """ Python IrNode. Beneath it is a core.Node, which is used for Ir Pass. """ @@ -4783,7 +4783,7 @@ class IrOpNode(IrNode): return [IrVarNode(n) for n in self.node.outputs] -class IrGraph(object): +class IrGraph: """ Python IrGraph. Beneath it is a core.Graph, which is used for creating a c++ Ir Pass Graph. An IrGraph is just a graph view of @@ -5218,7 +5218,7 @@ class IrGraph(object): desc._set_attr(name, val) -class Program(object): +class Program: """ Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the control flow op like conditional_block, while :ref:`api_paddle_fluid_layers_While` is included, diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index b7ef1c7531..0f9bc17edf 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -25,7 +25,7 @@ def crepr(v): return str(v) -class Rank(object): +class Rank: def __init__(self, kind, name, priority): ''' kind: str @@ -49,7 +49,7 @@ class Rank(object): ) -class Graph(object): +class Graph: rank_counter = 0 def __init__(self, title, **attrs): @@ -145,7 +145,7 @@ class Graph(object): return '\n'.join(reprs) -class Node(object): +class Node: counter = 1 def __init__(self, label, prefix, description="", **attrs): @@ -170,7 +170,7 @@ class Node(object): return reprs -class Edge(object): +class Edge: def __init__(self, source, target, **attrs): ''' Link source to target. @@ -199,7 +199,7 @@ class Edge(object): return repr -class GraphPreviewGenerator(object): +class GraphPreviewGenerator: ''' Generate a graph image for ONNX proto. ''' diff --git a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py index 5fa0ed085b..33c95f03b1 100644 --- a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py @@ -69,7 +69,7 @@ def _thread_checker(): ), "auto checkpoint must run under main thread" -class AutoCheckpointChecker(object): +class AutoCheckpointChecker: def __init__(self): self._run_env = None self._platform = None diff --git a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py index 21e305afc1..79161fe0fa 100644 --- a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py +++ b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py @@ -15,7 +15,7 @@ from ...compiler import CompiledProgram -class SerializableBase(object): +class SerializableBase: def serialize(self, path): raise NotImplementedError @@ -54,7 +54,7 @@ class PaddleModel(SerializableBase): ) -class CheckpointSaver(object): +class CheckpointSaver: def __init__(self, fs): self._fs = fs self._checkpoint_prefix = "__paddle_checkpoint__" diff --git a/python/paddle/fluid/incubate/data_generator/__init__.py b/python/paddle/fluid/incubate/data_generator/__init__.py index 4729f44f2b..3e66b75e28 100644 --- a/python/paddle/fluid/incubate/data_generator/__init__.py +++ b/python/paddle/fluid/incubate/data_generator/__init__.py @@ -18,7 +18,7 @@ import sys __all__ = ['MultiSlotDataGenerator', 'MultiSlotStringDataGenerator'] -class DataGenerator(object): +class DataGenerator: """ DataGenerator is a general Base class for user to inherit A user who wants to define his/her own python processing logic diff --git a/python/paddle/fluid/incubate/fleet/base/fleet_base.py b/python/paddle/fluid/incubate/fleet/base/fleet_base.py index ea03e93056..61b7fe7de5 100644 --- a/python/paddle/fluid/incubate/fleet/base/fleet_base.py +++ b/python/paddle/fluid/incubate/fleet/base/fleet_base.py @@ -30,7 +30,7 @@ __all__ = ['Fleet', 'DistributedOptimizer'] __all__ += mode.__all__ -class Fleet(object): +class Fleet: """ Fleet is the base class, transpiler and pslib are implementation of Fleet. @@ -268,7 +268,7 @@ class Fleet(object): pass -class DistributedOptimizer(object): +class DistributedOptimizer: """ DistributedOptimizer is a wrapper for paddle.fluid.optimizer A user should pass a paddle.fluid.optimizer to DistributedOptimizer diff --git a/python/paddle/fluid/incubate/fleet/base/role_maker.py b/python/paddle/fluid/incubate/fleet/base/role_maker.py index d0e27dc8bd..3f639d7753 100644 --- a/python/paddle/fluid/incubate/fleet/base/role_maker.py +++ b/python/paddle/fluid/incubate/fleet/base/role_maker.py @@ -35,7 +35,7 @@ class Role: XPU = 3 -class MockBarrier(object): +class MockBarrier: """ MockBarrier is a empty impletation for barrier mock as a real barrier for never-barrier in a specific scenario @@ -70,7 +70,7 @@ class MockBarrier(object): return [obj] -class RoleMakerBase(object): +class RoleMakerBase: """ RoleMakerBase is a base class for assigning a role to current process in distributed training. diff --git a/python/paddle/fluid/incubate/fleet/collective/__init__.py b/python/paddle/fluid/incubate/fleet/collective/__init__.py index 51900ccbd9..949ef93a47 100644 --- a/python/paddle/fluid/incubate/fleet/collective/__init__.py +++ b/python/paddle/fluid/incubate/fleet/collective/__init__.py @@ -40,12 +40,12 @@ import re import shutil -class LambConfig(object): +class LambConfig: def __init__(self): pass -class DistFCConfig(object): +class DistFCConfig: def __init__(self): pass diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py index d2d32d4529..0958dafd3c 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py @@ -31,7 +31,7 @@ from paddle.fluid.transpiler.distribute_transpiler import ( from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode -class TrainerRuntimeConfig(object): +class TrainerRuntimeConfig: def __init__(self): self.mode = None num_threads = os.getenv("CPU_NUM", "1") @@ -160,7 +160,7 @@ class TrainerRuntimeConfig(object): return self.display(self.get_communicator_flags()) -class PSLibRuntimeConfig(object): +class PSLibRuntimeConfig: def __init__(self): self.runtime_configs = {} @@ -168,7 +168,7 @@ class PSLibRuntimeConfig(object): return self.runtime_configs -class DistributedStrategy(object): +class DistributedStrategy: def __init__(self): self._program_config = DistributeTranspilerConfig() self._trainer_runtime_config = TrainerRuntimeConfig() @@ -456,7 +456,7 @@ class GeoStrategy(DistributedStrategy): self._build_strategy.async_mode = True -class StrategyFactory(object): +class StrategyFactory: def __init_(self): pass diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py index 441246879d..4155413cb5 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py @@ -13,7 +13,7 @@ # limitations under the License. -class PSDispatcher(object): +class PSDispatcher: """ PSDispatcher is the base class for dispatching vars into different pserver instance. diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py index 865ffd2e0f..53460cf036 100755 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py @@ -135,7 +135,7 @@ def Singleton(cls): @Singleton -class CompileTimeStrategy(object): +class CompileTimeStrategy: def __init__(self, main_program, startup_program, strategy, role_maker): self.min_block_size = 81920 diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py index fef6f24570..f6a05cbf86 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py @@ -13,7 +13,7 @@ # limitations under the License. -class UnionFind(object): +class UnionFind: """Union-find data structure. Union-find is a data structure that keeps track of a set of elements partitioned diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py index 745c05d986..e32b1253d4 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py @@ -52,7 +52,7 @@ def create_var_struct(var): ) -class VarStruct(object): +class VarStruct: """ record part properties of a Variable in python. """ @@ -80,7 +80,7 @@ class VarStruct(object): ) -class VarDistributed(object): +class VarDistributed: """ a class to record the var distributed on parameter servers. the class will record the relationship between origin var and slice var. @@ -187,7 +187,7 @@ class VarDistributed(object): ) -class VarsDistributed(object): +class VarsDistributed: """ a gather about VarDistributed with many methods to find distributed vars. through the class, we can get overview about the distributed parameters on parameter servers. diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py index c9942581e0..8e5f39415f 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py @@ -1027,7 +1027,7 @@ def _fleet_embedding_v2( ) -class fleet_embedding(object): +class fleet_embedding: """ fleet embedding class, it is used as a wrapper Example: diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py index d1c9fae5cc..73fcd18bdb 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py @@ -18,7 +18,7 @@ from . import ps_pb2 as pslib from functools import reduce -class Server(object): +class Server: """ A Server basic class it's a base class, does not have implementation @@ -28,7 +28,7 @@ class Server(object): pass -class Worker(object): +class Worker: """ A Worker basic class. it's a base class, does not have implementation diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py index b55431ba0f..db5b90374c 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py @@ -54,7 +54,7 @@ ch.setFormatter(formatter) logger.addHandler(ch) -class DistributedOptimizerImplBase(object): +class DistributedOptimizerImplBase: """ DistributedOptimizerImplBase base class of optimizers diff --git a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py index 8218d94172..8d5203f201 100644 --- a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py +++ b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py @@ -39,7 +39,7 @@ _logger = get_logger( fleet = None -class FleetUtil(object): +class FleetUtil: """ FleetUtil provides some common functions for users' convenience. diff --git a/python/paddle/fluid/incubate/fleet/utils/http_server.py b/python/paddle/fluid/incubate/fleet/utils/http_server.py index 10e15dad0d..79b436ff40 100644 --- a/python/paddle/fluid/incubate/fleet/utils/http_server.py +++ b/python/paddle/fluid/incubate/fleet/utils/http_server.py @@ -126,7 +126,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): self.end_headers() -class KVHTTPServer(HTTPServer, object): +class KVHTTPServer(HTTPServer): """ it is a http server storing kv pairs. """ diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index bb162573e3..b0c573936b 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -55,7 +55,7 @@ _global_weight_initializer_ = None _global_bias_initializer_ = None -class Initializer(object): +class Initializer: """Base class for variable initializers Defines the common interface of variable initializers. diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 789c9d0e31..a308d5e261 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -87,7 +87,7 @@ _logger = get_logger( ) -class _open_buffer(object): +class _open_buffer: def __init__(self, buffer): self.buffer = buffer diff --git a/python/paddle/fluid/ir.py b/python/paddle/fluid/ir.py index 7dbe815b5e..fb077ed8b5 100644 --- a/python/paddle/fluid/ir.py +++ b/python/paddle/fluid/ir.py @@ -140,7 +140,7 @@ def apply_build_strategy( return build_strategy -class RegisterPassHelper(object): +class RegisterPassHelper: _register_helpers = list() def __init__(self, pass_pairs, pass_type=str(), input_specs=dict()): @@ -286,8 +286,8 @@ class RegisterPassHelper(object): return multi_pass_desc.SerializeToString() -class PassDesc(object): - class AttrHelper(object): +class PassDesc: + class AttrHelper: def __init__(self, obj, name, element_index=None): self._obj = obj self._name = name @@ -422,7 +422,7 @@ class PassDesc(object): self._attrs[name] = attr return attr - class OpHelper(object): + class OpHelper: def __init__(self, type=None): self._type = type diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index 1ea8d504ad..91ec751cc2 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -31,7 +31,7 @@ from .initializer import _global_weight_initializer, _global_bias_initializer __all__ = ['LayerHelperBase'] -class LayerHelperBase(object): +class LayerHelperBase: # global dtype __dtype = "float32" diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 13934b76af..91065258a1 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -536,7 +536,7 @@ def Assert(cond, data=None, summarize=20, name=None): return op -class BlockGuard(object): +class BlockGuard: """ BlockGuard class. @@ -584,7 +584,7 @@ class BlockGuardWithCompletion(BlockGuard): return super().__exit__(exc_type, exc_val, exc_tb) -class StaticRNNMemoryLink(object): +class StaticRNNMemoryLink: """ StaticRNNMemoryLink class. @@ -607,7 +607,7 @@ class StaticRNNMemoryLink(object): self.mem = mem -class StaticRNN(object): +class StaticRNN: """ :api_attr: Static Graph @@ -1179,7 +1179,7 @@ def get_inputs_outputs_in_block( return inner_inputs, inner_outputs -class While(object): +class While: """ :api_attr: Static Graph @@ -2463,7 +2463,7 @@ class ConditionalBlockGuard(BlockGuard): return super().__exit__(exc_type, exc_val, exc_tb) -class ConditionalBlock(object): +class ConditionalBlock: ''' **ConditionalBlock** @@ -3173,7 +3173,7 @@ def case(pred_fn_pairs, default=None, name=None): return final_fn() -class Switch(object): +class Switch: """ :api_attr: Static Graph @@ -3303,7 +3303,7 @@ class Switch(object): return True -class IfElseBlockGuard(object): +class IfElseBlockGuard: def __init__(self, is_true, ifelse): if not isinstance(ifelse, IfElse): raise TypeError("ifelse must be an instance of IfElse class") @@ -3340,7 +3340,7 @@ class IfElseBlockGuard(object): self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS -class IfElse(object): +class IfElse: """ :api_attr: Static Graph @@ -3530,7 +3530,7 @@ class IfElse(object): return rlist -class DynamicRNN(object): +class DynamicRNN: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/layers/distributions.py b/python/paddle/fluid/layers/distributions.py index 174b2f8e36..bd0b39caf0 100644 --- a/python/paddle/fluid/layers/distributions.py +++ b/python/paddle/fluid/layers/distributions.py @@ -31,7 +31,7 @@ from ..data_feeder import ( __all__ = ['Uniform', 'Normal', 'Categorical', 'MultivariateNormalDiag'] -class Distribution(object): +class Distribution: """ Distribution is the abstract base class for probability distributions. """ diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index e4f765dcc6..1dcc07a20d 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -172,7 +172,7 @@ class BlockGuardServ(BlockGuard): return super().__exit__(exc_type, exc_val, exc_tb) -class ListenAndServ(object): +class ListenAndServ: """ **ListenAndServ Layer** diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 5623413af0..57e8a24e0d 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -14771,7 +14771,7 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): ) -class PyFuncRegistry(object): +class PyFuncRegistry: _register_funcs = [] def __init__(self, func): diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index 797272510f..5cfc6e37ce 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -62,7 +62,7 @@ __all__ = [ ] -class RNNCell(object): +class RNNCell: """ :api_attr: Static Graph @@ -160,7 +160,7 @@ class RNNCell(object): return True return isinstance(seq, Sequence) and not isinstance(seq, str) - class Shape(object): + class Shape: def __init__(self, shape): self.shape = shape if shape[0] == -1 else ([-1] + list(shape)) @@ -544,7 +544,7 @@ def rnn( ) -class ArrayWrapper(object): +class ArrayWrapper: def __init__(self, x): self.array = [x] @@ -823,7 +823,7 @@ def birnn( return outputs, final_states -class Decoder(object): +class Decoder: """ :api_attr: Static Graph @@ -1869,7 +1869,7 @@ def dynamic_decode( ) -class DecodeHelper(object): +class DecodeHelper: """ DecodeHelper is the base class for any helper instance used in `BasicDecoder`. It provides interface to implement sampling and produce inputs for the next diff --git a/python/paddle/fluid/lazy_init.py b/python/paddle/fluid/lazy_init.py index d2118259d0..6242ad2c4e 100644 --- a/python/paddle/fluid/lazy_init.py +++ b/python/paddle/fluid/lazy_init.py @@ -17,7 +17,7 @@ from . import framework __all__ = ["LazyGuard"] -class LazyInitHelper(object): +class LazyInitHelper: """ A Helper Context to trigger switching mode between dygraph and static mode, and holds the startup program resource. @@ -88,7 +88,7 @@ def lazy_init_helper(): return _lazy_init_helper -class LazyGuard(object): +class LazyGuard: """ LazyGuard is a wrapper interface for nn.Layer, it forwards the construct process of user defined Layer. Meanwhile, it provides necessary API to diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index 1f5d941dc0..5776b4efdf 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -55,7 +55,7 @@ def _is_number_or_matrix_(var): return _is_number_(var) or isinstance(var, np.ndarray) -class MetricBase(object): +class MetricBase: """ In many cases, we usually have to split the test data into mini-batches for evaluating deep neural networks, therefore we need to collect the evaluation results of each @@ -818,7 +818,7 @@ class Auc(MetricBase): ) -class DetectionMAP(object): +class DetectionMAP: """ Calculate the detection mean average precision (mAP). diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index c8b118127c..515a5f8e77 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -35,7 +35,7 @@ def is_str(s): return isinstance(s, str) -class OpDescCreationMethod(object): +class OpDescCreationMethod: """ Convert the user's input(only keyword arguments are supported) to OpDesc based on the OpProto. @@ -181,7 +181,7 @@ class OpDescCreationMethod(object): return False -class OpInfo(object): +class OpInfo: def __init__(self, name, method, inputs, outputs, attrs, extra_attrs): self.name = name self.method = method @@ -213,7 +213,7 @@ def create_op_creation_method(op_proto): ) -class OperatorFactory(object): +class OperatorFactory: def __init__(self): self.op_methods = dict() @@ -266,7 +266,7 @@ class OperatorFactory(object): return self.get_op_info(type).extra_attrs -class __RecurrentOp__(object): +class __RecurrentOp__: __proto__ = None type = "recurrent" @@ -287,7 +287,7 @@ class __RecurrentOp__(object): return core.RecurrentOp.create(proto.SerializeToString()) -class __DynamicRecurrentOp__(object): +class __DynamicRecurrentOp__: __proto__ = None type = "dynamic_recurrent" @@ -308,7 +308,7 @@ class __DynamicRecurrentOp__(object): return core.DynamicRecurrentOp.create(proto.SerializeToString()) -class __CondOp__(object): +class __CondOp__: __proto__ = None type = "cond" diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 9f361827f0..7eaa38636c 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -99,7 +99,7 @@ __all__ = [ ] -class Optimizer(object): +class Optimizer: """Optimizer Base class. Define the common interface of an optimizer. @@ -4617,7 +4617,7 @@ class ModelAverage(Optimizer): executor.run(self.restore_program) -class ExponentialMovingAverage(object): +class ExponentialMovingAverage: r""" :api_attr: Static Graph @@ -4877,7 +4877,7 @@ class ExponentialMovingAverage(object): executor.run(self.restore_program) -class PipelineOptimizer(object): +class PipelineOptimizer: """ :api_attr: Static Graph @@ -7600,7 +7600,7 @@ class RecomputeOptimizer(Optimizer): return optimize_ops, params_grads -class LookaheadOptimizer(object): +class LookaheadOptimizer: r""" :api_attr: Static Graph @@ -7780,7 +7780,7 @@ class LookaheadOptimizer(object): return mini_out -class GradientMergeOptimizer(object): +class GradientMergeOptimizer: """ Gradient Merge, also called as Gradient Accumulation, is a training strategy for larger batches. With this strategy, diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index c0a45b041e..269172ea99 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -25,7 +25,7 @@ ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy BuildStrategy = core.ParallelExecutor.BuildStrategy -class ParallelExecutor(object): +class ParallelExecutor: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index e1c8568d93..f251a654a9 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -22,7 +22,7 @@ __all__ = [ ] -class ParamAttr(object): +class ParamAttr: """ Note: diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 83aedd052a..2ad3e5903a 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -145,7 +145,7 @@ def _reader_process_loop(batch_reader, data_queue): raise -class DataLoaderBase(object): +class DataLoaderBase: def __init__(self): self._places = None @@ -181,7 +181,7 @@ class DataLoaderBase(object): return arr -class AuToTune(object): +class AuToTune: def __init__(self, loader): self.loader = loader self.max_num_worker = multiprocessing.cpu_count() / 2 @@ -318,7 +318,7 @@ class AuToTune(object): return best_workers -class DataLoader(object): +class DataLoader: """ DataLoader prodives an iterator which iterates given dataset once by the batch_sampler. diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 60112d182e..1152f0bbd8 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -22,7 +22,7 @@ from paddle import _C_ops, _legacy_C_ops __all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer'] -class WeightDecayRegularizer(object): +class WeightDecayRegularizer: """Base class for weight decay regularizers Defines the common interface of weight-decay regularizers. diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py index 0c6bf6e9de..20e4b8312d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py @@ -17,7 +17,7 @@ import paddle from paddle.distributed import fleet -class TestNewGroupAPI(object): +class TestNewGroupAPI: def __init__(self): paddle.distributed.init_parallel_env() topo = fleet.CommunicateTopology( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py b/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py index 9fa469ea5e..40924c66bb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py @@ -16,7 +16,7 @@ import numpy as np import paddle -class TestNewGroupAPI(object): +class TestNewGroupAPI: def __init__(self): paddle.distributed.init_parallel_env() d1 = np.array([1, 2, 3]) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py index c4c10f75a6..f5b5903831 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py @@ -36,7 +36,7 @@ Note(chenweihang): To compare loss of single-card and multi-card """ -class TrainTaskConfig(object): +class TrainTaskConfig: """ TrainTaskConfig """ @@ -61,7 +61,7 @@ class TrainTaskConfig(object): label_smooth_eps = 0.1 -class ModelHyperParams(object): +class ModelHyperParams: # These following five vocabularies related configurations will be set # automatically according to the passed vocabulary path and special tokens. # size of source word dictionary. diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py index 798847ed3e..c2ac727156 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py @@ -612,7 +612,7 @@ class TestAmpDecorator(unittest.TestCase): def test_input_type_exception(self): def test_error_model(): - class MyModel(object): + class MyModel: def __init__(self): print("A fake Model") @@ -631,7 +631,7 @@ class TestAmpDecorator(unittest.TestCase): self.assertRaises(RuntimeError, test_error_distributed_model) def test_error_optimizer(): - class MyOptimizer(object): + class MyOptimizer: def __init__(self): print("A fake Optimizer") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 7957cceb7b..e295548393 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -611,7 +611,7 @@ class TestAmpDecorator(unittest.TestCase): def test_input_type_exception(self): def test_error_model(): - class MyModel(object): + class MyModel: def __init__(self): print("A fake Model") @@ -630,7 +630,7 @@ class TestAmpDecorator(unittest.TestCase): self.assertRaises(RuntimeError, test_error_distributed_model) def test_error_optimizer(): - class MyOptimizer(object): + class MyOptimizer: def __init__(self): print("A fake Optimizer") diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py b/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py index 868585f81a..786b81a06e 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py @@ -19,7 +19,7 @@ import subprocess import tempfile -class TestCollectiveAPIRunnerBase(object): +class TestCollectiveAPIRunnerBase: def check_pass(self, *args, **kwargs): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py b/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py index 5cc96cf1c4..88d1327eaf 100644 --- a/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py +++ b/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py @@ -60,7 +60,7 @@ def load_lr_input_record(sent): return res -class CtrReader(object): +class CtrReader: def __init__(self): pass diff --git a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py index fafb5f19c5..2c6fa74b36 100644 --- a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py +++ b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py @@ -108,7 +108,7 @@ def load_lr_input_record(sent): feeding_index = {'dnn_input': 0, 'lr_input': 1, 'click': 2} -class Dataset(object): +class Dataset: def train(self): ''' Load trainset. diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index 4765ee7d82..514fcf4b86 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -36,7 +36,7 @@ fluid.default_main_program().random_seed = 1 # from transformer_config import ModelHyperParams, TrainTaskConfig, merge_cfg_from_list -class TrainTaskConfig(object): +class TrainTaskConfig: # only support GPU currently use_gpu = True # the epoch number to train. @@ -88,7 +88,7 @@ class TrainTaskConfig(object): use_token_batch = False -class InferTaskConfig(object): +class InferTaskConfig: use_gpu = True # the number of examples in one run for sequence generation. batch_size = 10 @@ -105,7 +105,7 @@ class InferTaskConfig(object): model_path = "trained_models/pass_1.infer.model" -class ModelHyperParams(object): +class ModelHyperParams: # These following five vocabularies related configurations will be set # automatically according to the passed vocabulary path and special tokens. # size of source word dictionary. @@ -268,7 +268,7 @@ fast_decoder_data_input_fields = ( # from optim import LearningRateScheduler -class LearningRateScheduler(object): +class LearningRateScheduler: """ Wrapper for learning rate scheduling as described in the Transformer paper. LearningRateScheduler adapts the learning rate externally and the adapted @@ -714,13 +714,13 @@ def train_loop( # import transformer_reader as reader -class SortType(object): +class SortType: GLOBAL = 'global' POOL = 'pool' NONE = "none" -class Converter(object): +class Converter: def __init__(self, vocab, beg, end, unk, delimiter): self._vocab = vocab self._beg = beg @@ -739,7 +739,7 @@ class Converter(object): ) -class ComposedConverter(object): +class ComposedConverter: def __init__(self, converters): self._converters = converters @@ -750,7 +750,7 @@ class ComposedConverter(object): ] -class SentenceBatchCreator(object): +class SentenceBatchCreator: def __init__(self, batch_size): self.batch = [] self._batch_size = batch_size @@ -763,7 +763,7 @@ class SentenceBatchCreator(object): return tmp -class TokenBatchCreator(object): +class TokenBatchCreator: def __init__(self, batch_size): self.batch = [] self.max_len = -1 @@ -782,14 +782,14 @@ class TokenBatchCreator(object): self.batch.append(info) -class SampleInfo(object): +class SampleInfo: def __init__(self, i, max_len, min_len): self.i = i self.min_len = min_len self.max_len = max_len -class MinMaxFilter(object): +class MinMaxFilter: def __init__(self, max_len, min_len, underlying_creator): self._min_len = min_len self._max_len = max_len @@ -806,7 +806,7 @@ class MinMaxFilter(object): return self._creator.batch -class DataReader(object): +class DataReader: """ The data reader loads all data from files and produces batches of data in the way corresponding to settings. diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py index 0cc22162d2..79c911c82e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py @@ -223,7 +223,7 @@ def prepare_batch_data( return res -class DataReader(object): +class DataReader: def __init__( self, batch_size=4096, @@ -339,7 +339,7 @@ class DataReader(object): return wrapper -class ModelHyperParams(object): +class ModelHyperParams: generate_neg_sample = False epoch = 100 max_seq_len = 512 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index a931276c56..2604bdd3a6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -355,7 +355,7 @@ def if_with_and_or_4(x, y=None): def if_with_class_var(x, y=None): - class Foo(object): + class Foo: def __init__(self): self.a = 1 self.b = 2 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py index 554cf95e82..2e49f3778a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py @@ -21,7 +21,7 @@ from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import create_paddle_predictor -class PredictorTools(object): +class PredictorTools: ''' Paddle-Inference predictor ''' diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py index 44a55c79c9..e93405ebd3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py @@ -91,7 +91,7 @@ def get_data_iter(batch_size, mode='train', cache_num=20): yield (src_ids, src_mask, tar_ids, tar_mask) -class Seq2SeqModelHyperParams(object): +class Seq2SeqModelHyperParams: # Whether use attention model attention = False diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py index 7ea2a97cb0..0bb0840514 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py @@ -21,7 +21,7 @@ from paddle.fluid.dygraph import Embedding, Layer, Linear from paddle.static import Variable -class EmbeddingLayer(object): +class EmbeddingLayer: """ Embedding Layer class """ @@ -53,7 +53,7 @@ class EmbeddingLayer(object): return emb -class FCLayer(object): +class FCLayer: """ Fully Connect Layer class """ @@ -79,7 +79,7 @@ class FCLayer(object): return fc -class ConcatLayer(object): +class ConcatLayer: """ Connection Layer class """ @@ -98,7 +98,7 @@ class ConcatLayer(object): return concat -class ReduceMeanLayer(object): +class ReduceMeanLayer: """ Reduce Mean Layer class """ @@ -117,7 +117,7 @@ class ReduceMeanLayer(object): return mean -class CosSimLayer(object): +class CosSimLayer: """ Cos Similarly Calculate Layer """ @@ -136,7 +136,7 @@ class CosSimLayer(object): return sim -class ElementwiseMaxLayer(object): +class ElementwiseMaxLayer: """ Elementwise Max Layer class """ @@ -155,7 +155,7 @@ class ElementwiseMaxLayer(object): return max -class ElementwiseAddLayer(object): +class ElementwiseAddLayer: """ Elementwise Add Layer class """ @@ -174,7 +174,7 @@ class ElementwiseAddLayer(object): return add -class ElementwiseSubLayer(object): +class ElementwiseSubLayer: """ Elementwise Add Layer class """ @@ -193,7 +193,7 @@ class ElementwiseSubLayer(object): return sub -class ConstantLayer(object): +class ConstantLayer: """ Generate A Constant Layer class """ @@ -215,7 +215,7 @@ class ConstantLayer(object): return constant -class SoftsignLayer(object): +class SoftsignLayer: """ Softsign Layer class """ @@ -439,7 +439,7 @@ class FC(Layer): return self._helper.append_activation(pre_activation, act=self._act) -class HingeLoss(object): +class HingeLoss: """ Hing Loss Calculate class """ diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py index ce4ba45f96..c4739b2dc3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py @@ -17,7 +17,7 @@ import paddle from paddle.static import Variable -class EmbeddingLayer(object): +class EmbeddingLayer: """ Embedding Layer class """ @@ -50,7 +50,7 @@ class EmbeddingLayer(object): return emb -class FCLayer(object): +class FCLayer: """ Fully Connect Layer class """ @@ -76,7 +76,7 @@ class FCLayer(object): return fc -class ConcatLayer(object): +class ConcatLayer: """ Connection Layer class """ @@ -95,7 +95,7 @@ class ConcatLayer(object): return concat -class ReduceMeanLayer(object): +class ReduceMeanLayer: """ Reduce Mean Layer class """ @@ -114,7 +114,7 @@ class ReduceMeanLayer(object): return mean -class CosSimLayer(object): +class CosSimLayer: """ Cos Similarly Calculate Layer """ @@ -133,7 +133,7 @@ class CosSimLayer(object): return sim -class ElementwiseMaxLayer(object): +class ElementwiseMaxLayer: """ Elementwise Max Layer class """ @@ -152,7 +152,7 @@ class ElementwiseMaxLayer(object): return max -class ElementwiseAddLayer(object): +class ElementwiseAddLayer: """ Elementwise Add Layer class """ @@ -171,7 +171,7 @@ class ElementwiseAddLayer(object): return add -class ElementwiseSubLayer(object): +class ElementwiseSubLayer: """ Elementwise Add Layer class """ @@ -190,7 +190,7 @@ class ElementwiseSubLayer(object): return sub -class ConstantLayer(object): +class ConstantLayer: """ Generate A Constant Layer class """ @@ -212,7 +212,7 @@ class ConstantLayer(object): return constant -class SoftsignLayer(object): +class SoftsignLayer: """ Softsign Layer class """ @@ -425,7 +425,7 @@ class FC(paddle.nn.Layer): return self._helper.append_activation(pre_activation, act=self._act) -class HingeLoss(object): +class HingeLoss: """ Hing Loss Calculate class """ diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index 7e6dad60b2..9ce37b565b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -447,7 +447,7 @@ def bmn_loss_func( return loss, tem_loss, pem_reg_loss, pem_cls_loss -class Args(object): +class Args: epoch = 1 batch_size = 4 learning_rate = 0.1 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py index 45b149617d..143b14f7ed 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py @@ -160,7 +160,7 @@ def test_for_in_else(x): def while_loop_class_var(x): - class Foo(object): + class Foo: def __init__(self): self.a = 3 self.b = 4 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index 756da0243b..12a4f48f64 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -480,7 +480,7 @@ class DeConv2D(fluid.dygraph.Layer): return conv -class ImagePool(object): +class ImagePool: def __init__(self, pool_size=50): self.pool = [] self.count = 0 @@ -530,7 +530,7 @@ def reader_creater(): return reader -class Args(object): +class Args: epoch = 1 batch_size = 4 image_shape = [3, IMAGE_SIZE, IMAGE_SIZE] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py index a245b607f8..f16d2410cb 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py @@ -44,7 +44,7 @@ def decorated_call_decorated(x): return jit_decorated_func(x) -class DoubleDecorated(object): +class DoubleDecorated: @classmethod @declarative def double_decorated_func1(self, x): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 3026d7e567..9d00db1caa 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -472,7 +472,7 @@ class LexNet(fluid.dygraph.Layer): return avg_cost, crf_decode -class Args(object): +class Args: epoch = 1 batch_size = 4 vocab_size = 100 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py index 6bc237f378..75c59897d8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py @@ -153,7 +153,7 @@ def while_loop_bool_op2(x): def while_loop_class_var(x): - class Foo(object): + class Foo: def __init__(self): self.a = 3 self.b = 4 @@ -179,7 +179,7 @@ def loop_var_contains_property(x): def for_loop_class_var(max_len): - class Foo(object): + class Foo: def __init__(self): self.a = 3 self.b = 4 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index 2cb5451bfa..083345d9db 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -470,7 +470,7 @@ def fake_data_reader(batch_size, label_size): return reader -class Args(object): +class Args: batch_size = 4 model = "MobileNetV1" lr = 0.001 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 9e79d40df8..c111e5c482 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -52,7 +52,7 @@ class Policy(Layer): return log_prob -class Args(object): +class Args: gamma = 0.99 log_interval = 1 train_step = 10 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py index 5c52fade77..25ca7e0847 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py @@ -293,7 +293,7 @@ def fake_data_reader(class_num, vocab_size, batch_size, padding_size): return reader -class Args(object): +class Args: epoch = 1 batch_size = 4 class_num = 2 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py index 466c6affcb..3e70147d30 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py @@ -73,7 +73,7 @@ def fake_vocabulary(): vocab = fake_vocabulary() -class FakeReaderProcessor(object): +class FakeReaderProcessor: def __init__(self, args, vocab): self.vocab = vocab self.seq_len = args.seq_len diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py index bbb408e48c..b1ed858f85 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py @@ -71,7 +71,7 @@ def fake_vocabulary(): vocab = fake_vocabulary() -class FakeReaderProcessor(object): +class FakeReaderProcessor: def __init__(self, args, vocab): self.vocab = vocab self.seq_len = args.seq_len diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index 5923618ca3..cc307e5a7b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -214,7 +214,7 @@ class TSM_ResNet(fluid.dygraph.Layer): return y -class FakeDataReader(object): +class FakeDataReader: def __init__(self, mode, cfg): self.format = cfg.MODEL.format self.num_classes = cfg.MODEL.num_classes diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py index eecd6806fa..086e56828a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py @@ -29,7 +29,7 @@ random.seed(0) np.random.seed(0) -class SmoothedValue(object): +class SmoothedValue: """Track a series of values and provide access to smoothed values over a window or the global series average. """ @@ -46,7 +46,7 @@ class SmoothedValue(object): return self.loss_sum / self.iter_cnt -class FakeDataReader(object): +class FakeDataReader: def __init__(self): self.generator_out = [] self.total_iter = cfg.max_iter diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py index b27a35ae7e..ee32e0640c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py @@ -565,7 +565,7 @@ class WrapDecoder(Layer): return logits -class CrossEntropyCriterion(object): +class CrossEntropyCriterion: def __init__(self, label_smooth_eps): self.label_smooth_eps = label_smooth_eps diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py index 7e77f1ad03..bb0011de6b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py @@ -97,7 +97,7 @@ fast_decoder_data_input_fields = ( ) -class ModelHyperParams(object): +class ModelHyperParams: print_step = 2 save_dygraph_model_path = "dygraph_trained_models" save_static_model_path = "static_trained_models" @@ -290,7 +290,7 @@ def get_feed_data_reader(args, mode='train'): return __for_train__ if mode == 'train' else __for_test__ -class InputField(object): +class InputField: def __init__(self, input_slots): self.feed_list = [] for slot in input_slots: diff --git a/python/paddle/fluid/tests/unittests/feed_data_reader.py b/python/paddle/fluid/tests/unittests/feed_data_reader.py index 9a1f15ddff..ef2e18a429 100644 --- a/python/paddle/fluid/tests/unittests/feed_data_reader.py +++ b/python/paddle/fluid/tests/unittests/feed_data_reader.py @@ -25,7 +25,7 @@ def cyclic_reader(reader): return __reader__ -class FeedDataReader(object): +class FeedDataReader: def __init__(self, feed_list, reader): self._feed_list = [] for var in feed_list: diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py index 5adcf7a298..d15cbfc56f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py @@ -39,7 +39,7 @@ def DataTypeCast(date_type): return np_data_type -class TestCollectiveAPIRunnerBase(object): +class TestCollectiveAPIRunnerBase: def get_model(self, train_prog, startup_prog, rank, indata=None): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py index 3e005bafb4..f4d41818b3 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py @@ -51,7 +51,7 @@ def DataTypeCast(date_type): return np_data_type -class TestCollectiveRunnerBase(object): +class TestCollectiveRunnerBase: def get_model(self, train_prog, startup_prog, col_type): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py index 57202e62a4..1a7a2f2255 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py @@ -174,7 +174,7 @@ def pool2d_backward_navie( return x_grad -class TestPool2D_Op_Mixin(object): +class TestPool2D_Op_Mixin: def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py index cb7a890adb..281d2d9de0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py @@ -40,7 +40,7 @@ paddle.enable_static() SEED = 10 -class TestSyncBatchNormRunnerBase(object): +class TestSyncBatchNormRunnerBase: def get_model( self, main, diff --git a/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py b/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py index 55bc9dce18..3e497ced85 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py @@ -28,7 +28,7 @@ import paddle.fluid.unique_name as nameGen from paddle.fluid import core -class TestCollectiveRunnerBase(object): +class TestCollectiveRunnerBase: def get_model(self, train_prog, startup_prog): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py index 101a749f42..0003d4c44c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py @@ -45,7 +45,7 @@ paddle.enable_static() SEED = 10 -class TestSyncBatchNormRunnerBase(object): +class TestSyncBatchNormRunnerBase: def get_model( self, main, diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index baa4f26feb..d359f3459f 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -1566,7 +1566,7 @@ class OpTest(unittest.TestCase): ) return found[0] - class Checker(object): + class Checker: """base class for check with self.outputs. currently don't support check between checkers. """ diff --git a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py index 1fa54bccd2..5b8c2e6e67 100755 --- a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py +++ b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py @@ -46,7 +46,7 @@ def is_distributed_env(): return True -class YamlHelper(object): +class YamlHelper: def load_yaml(self, yaml_file, other_part=None): part_list = ["runner", "hyper_parameters"] if other_part: @@ -317,7 +317,7 @@ def bf16_to_fp32(val): return np.float32(struct.unpack(', and token has # alreay been added, but the token is not added. Transformer requires diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index b2ae6318cc..3692ef8627 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -286,7 +286,7 @@ def pool2D_forward_naive( return out -class TestPool2D_Op_Mixin(object): +class TestPool2D_Op_Mixin: def setUp(self): self.op_type = "pool2d" self.use_cudnn = False diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index 67149212a8..29c12daf55 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -27,7 +27,7 @@ from paddle.fluid.backward import append_backward np.random.seed(123) -class PyRNNBase(object): +class PyRNNBase: def __init__(self, input_shape, output_shape): self.x = np.ones(shape=input_shape).astype("float32") self.y = np.zeros(shape=output_shape).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index 00bf9735fa..41e894badd 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -105,7 +105,7 @@ class DecoderCell(layers.RNNCell): return out, [new_lstm_states, out] -class Encoder(object): +class Encoder: def __init__(self, num_layers, hidden_size, dropout_prob=0.0): self.encoder_cell = EncoderCell(num_layers, hidden_size, dropout_prob) @@ -119,7 +119,7 @@ class Encoder(object): return encoder_output, encoder_final_state -class Decoder(object): +class Decoder: def __init__( self, num_layers, @@ -191,7 +191,7 @@ class Decoder(object): return decoder_output, decoder_final_state, dec_seq_lengths -class Seq2SeqModel(object): +class Seq2SeqModel: """Seq2Seq model: RNN encoder-decoder with attention""" def __init__( @@ -302,7 +302,7 @@ class Seq2SeqModel(object): return probs, samples, sample_length -class PolicyGradient(object): +class PolicyGradient: """policy gradient""" def __init__(self, lr=None): @@ -395,7 +395,7 @@ def reward_func(samples, sample_length): ) -class MLE(object): +class MLE: """teacher-forcing MLE training""" def __init__(self, lr=None): @@ -413,7 +413,7 @@ class MLE(object): return loss -class SeqPGAgent(object): +class SeqPGAgent: def __init__( self, model_cls, diff --git a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py index 624f2f5e61..b86be49eae 100644 --- a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py @@ -18,7 +18,7 @@ import paddle paddle.enable_static() -class Decoder(object): +class Decoder: def __init__(self, transitions, use_tag=True): self.transitions = transitions self.use_tag = use_tag diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index 91bebab2f6..b3febb9b40 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -28,7 +28,7 @@ paddle.enable_static() CUDA_BLOCK_SIZE = 32 -class CTCForward(object): +class CTCForward: def __init__( self, softmax, diff --git a/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py b/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py index cd3546ebe3..59c530b7d8 100755 --- a/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py +++ b/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py @@ -26,7 +26,7 @@ from tokenizer_utils import ( ) -class BasicTokenizer(object): +class BasicTokenizer: """ Runs basic tokenization (punctuation splitting, lower casing, etc.). Args: @@ -165,7 +165,7 @@ class BasicTokenizer(object): return "".join(output) -class WordpieceTokenizer(object): +class WordpieceTokenizer: """ Runs WordPiece tokenization. Args: diff --git a/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py b/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py index 9e60b29ffb..2280292670 100644 --- a/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py +++ b/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py @@ -147,7 +147,7 @@ def tokenize_chinese_chars(text): return output -class PretrainedTokenizer(object): +class PretrainedTokenizer: """ The base class for all pretrained tokenizers. It mainly provides common methods for loading (construction and loading) and saving pretrained tokenizers. Loading diff --git a/python/paddle/fluid/tests/unittests/utils.py b/python/paddle/fluid/tests/unittests/utils.py index 03993c2355..04c3085c3d 100644 --- a/python/paddle/fluid/tests/unittests/utils.py +++ b/python/paddle/fluid/tests/unittests/utils.py @@ -101,7 +101,7 @@ def load_dygraph_vars_to_scope(model_path, scope, place): load_dict_to_scope(scope, opti_dict) -class DyGraphProgramDescTracerTestHelper(object): +class DyGraphProgramDescTracerTestHelper: def __init__(self, unittest_obj): self.unittest_obj = unittest_obj diff --git a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py index f1276f765a..d645462c7d 100644 --- a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py +++ b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py @@ -98,7 +98,7 @@ xpu_test_device_op_white_list = [] xpu_test_device_op_type_white_list = [] -class XPUOpTestWrapper(object): +class XPUOpTestWrapper: def create_classes(self): base_class = None classes = [] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py index 1c00f3b94b..b9be6077d4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py @@ -49,7 +49,7 @@ def DataTypeCast(date_type): return np_data_type -class TestCollectiveRunnerBase(object): +class TestCollectiveRunnerBase: def get_model(self, train_prog, startup_prog): raise NotImplementedError( "get model should be implemented by child class." diff --git a/python/paddle/fluid/trainer_desc.py b/python/paddle/fluid/trainer_desc.py index 484042c53a..96204a2399 100644 --- a/python/paddle/fluid/trainer_desc.py +++ b/python/paddle/fluid/trainer_desc.py @@ -26,7 +26,7 @@ __all__ = [ ] -class TrainerDesc(object): +class TrainerDesc: ''' Set proto from python to c++. Can be initialized from train_desc. diff --git a/python/paddle/fluid/trainer_factory.py b/python/paddle/fluid/trainer_factory.py index 761895fe30..281fbd8693 100644 --- a/python/paddle/fluid/trainer_factory.py +++ b/python/paddle/fluid/trainer_factory.py @@ -45,7 +45,7 @@ from multiprocessing import Process, Manager __all__ = ["TrainerFactory", "FetchHandlerMonitor"] -class TrainerFactory(object): +class TrainerFactory: """ Create trainer and device worker. If opt_info is not None, it will get configs from opt_info, @@ -151,7 +151,7 @@ class TrainerFactory(object): return trainer -class FetchHandlerMonitor(object): +class FetchHandlerMonitor: """ Defination of FetchHandlerMonitor class, it's for fetch handler. diff --git a/python/paddle/fluid/transpiler/collective.py b/python/paddle/fluid/transpiler/collective.py index aedbaeb43c..c22b174696 100644 --- a/python/paddle/fluid/transpiler/collective.py +++ b/python/paddle/fluid/transpiler/collective.py @@ -31,7 +31,7 @@ __all__ = ['GradAllReduce', 'LocalSGD', 'MultiThread'] OpRole = core.op_proto_and_checker_maker.OpRole -class Collective(object): +class Collective: ''' ''' def __init__(self, nrings): diff --git a/python/paddle/fluid/transpiler/details/ufind.py b/python/paddle/fluid/transpiler/details/ufind.py index fef6f24570..f6a05cbf86 100644 --- a/python/paddle/fluid/transpiler/details/ufind.py +++ b/python/paddle/fluid/transpiler/details/ufind.py @@ -13,7 +13,7 @@ # limitations under the License. -class UnionFind(object): +class UnionFind: """Union-find data structure. Union-find is a data structure that keeps track of a set of elements partitioned diff --git a/python/paddle/fluid/transpiler/details/vars_distributed.py b/python/paddle/fluid/transpiler/details/vars_distributed.py index 32a27ed33b..cdd488bfa1 100644 --- a/python/paddle/fluid/transpiler/details/vars_distributed.py +++ b/python/paddle/fluid/transpiler/details/vars_distributed.py @@ -14,7 +14,7 @@ from paddle.fluid.framework import Variable -class VarStruct(object): +class VarStruct: """ record part properties of a Variable in python. """ @@ -28,7 +28,7 @@ class VarStruct(object): self.persistable = persistable -class VarDistributed(object): +class VarDistributed: """ a class to record the var distributed on parameter servers. the class will record the relationship between origin var and slice var. @@ -146,7 +146,7 @@ class VarDistributed(object): ) -class VarsDistributed(object): +class VarsDistributed: """ a gather about VarDistributed with many methods to find distributed vars. through the class, we can get overview about the distributed parameters on parameter servers. diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 6bdddddd93..ee98dc94a6 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -143,7 +143,7 @@ def slice_variable(var_list, slice_count, min_block_size): return blocks -class DistributeTranspilerConfig(object): +class DistributeTranspilerConfig: """ :api_attr: Static Graph @@ -248,7 +248,7 @@ class DistributeTranspilerConfig(object): self.__sync_mode = value -class ServerRuntimeConfig(object): +class ServerRuntimeConfig: def __init__(self): self._rpc_send_thread_num = int( os.getenv("FLAGS_rpc_send_thread_num", "12") @@ -261,7 +261,7 @@ class ServerRuntimeConfig(object): ) -class DistributeTranspiler(object): +class DistributeTranspiler: """ :api_attr: Static Graph diff --git a/python/paddle/fluid/transpiler/ps_dispatcher.py b/python/paddle/fluid/transpiler/ps_dispatcher.py index 589f2cb26e..c42472f5a1 100644 --- a/python/paddle/fluid/transpiler/ps_dispatcher.py +++ b/python/paddle/fluid/transpiler/ps_dispatcher.py @@ -13,7 +13,7 @@ # limitations under the License. -class PSDispatcher(object): +class PSDispatcher: """ PSDispatcher is the base class for dispatching vars into different pserver instance. diff --git a/python/paddle/fluid/unique_name.py b/python/paddle/fluid/unique_name.py index 1c0c28dae8..c881196ef5 100644 --- a/python/paddle/fluid/unique_name.py +++ b/python/paddle/fluid/unique_name.py @@ -18,7 +18,7 @@ from .wrapped_decorator import signature_safe_contextmanager __all__ = ['generate', 'switch', 'guard'] -class UniqueNameGenerator(object): +class UniqueNameGenerator: """ Generate unique name with prefix. @@ -47,7 +47,7 @@ class UniqueNameGenerator(object): return self.prefix + "_".join([key, str(tmp)]) -class DygraphParameterNameChecker(object): +class DygraphParameterNameChecker: """ Check whether the name of parameter is used. """ diff --git a/python/paddle/hapi/callbacks.py b/python/paddle/hapi/callbacks.py index c4ccb0341e..f9d280d76d 100644 --- a/python/paddle/hapi/callbacks.py +++ b/python/paddle/hapi/callbacks.py @@ -69,7 +69,7 @@ def config_callbacks( return cbk_list -class CallbackList(object): +class CallbackList: def __init__(self, callbacks=None): # copy self.callbacks = [c for c in callbacks] @@ -129,7 +129,7 @@ class CallbackList(object): self._call(name, step, logs) -class Callback(object): +class Callback: """ Base class used to build new callbacks. And new callbacks could also terminate training by setting `model.stop_training=True`. diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 257024d048..b7813932d8 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -259,7 +259,7 @@ def _update_input_info(inputs): return shapes, dtypes -class StaticGraphAdapter(object): +class StaticGraphAdapter: """ Model traning/inference with a static graph. """ @@ -734,7 +734,7 @@ class StaticGraphAdapter(object): self._compiled_progs[mode] = compiled_prog -class DynamicGraphAdapter(object): +class DynamicGraphAdapter: def __init__(self, model): super().__init__() self.model = model @@ -1006,7 +1006,7 @@ class DynamicGraphAdapter(object): self.model._scaler = None -class Model(object): +class Model: """ An Model object is network with training and inference features. Dynamic graph and static graph are supported at the same time, diff --git a/python/paddle/hapi/progressbar.py b/python/paddle/hapi/progressbar.py index 43b913a6fc..77f090471d 100644 --- a/python/paddle/hapi/progressbar.py +++ b/python/paddle/hapi/progressbar.py @@ -22,7 +22,7 @@ from collections import namedtuple __all__ = [] -class ProgressBar(object): +class ProgressBar: """progress bar""" def __init__( diff --git a/python/paddle/hapi/static_flops.py b/python/paddle/hapi/static_flops.py index a3b787962f..c72bcc4273 100644 --- a/python/paddle/hapi/static_flops.py +++ b/python/paddle/hapi/static_flops.py @@ -19,7 +19,7 @@ from paddle.static import Program, Variable __all__ = [] -class VarWrapper(object): +class VarWrapper: def __init__(self, var, graph): assert isinstance(var, Variable) assert isinstance(graph, GraphWrapper) @@ -39,7 +39,7 @@ class VarWrapper(object): return self._var.shape -class OpWrapper(object): +class OpWrapper: def __init__(self, op, graph): assert isinstance(graph, GraphWrapper) self._op = op @@ -69,7 +69,7 @@ class OpWrapper(object): return [self._graph.var(var_name) for var_name in self._op.output(name)] -class GraphWrapper(object): +class GraphWrapper: """ It is a wrapper of paddle.fluid.framework.IrGraph with some special functions for paddle slim framework. @@ -208,7 +208,7 @@ def static_flops(program, print_detail=False): return _graph_flops(graph, detail=print_detail) -class Table(object): +class Table: def __init__(self, table_heads): self.table_heads = table_heads self.table_len = [] diff --git a/python/paddle/incubate/autograd/functional.py b/python/paddle/incubate/autograd/functional.py index 6f34be56e7..218c125a92 100644 --- a/python/paddle/incubate/autograd/functional.py +++ b/python/paddle/incubate/autograd/functional.py @@ -168,7 +168,7 @@ def _zeros_like_with_grad(xs): return ys -class Jacobian(object): +class Jacobian: r""" Computes the Jacobian matrix of a given function. @@ -257,7 +257,7 @@ class Jacobian(object): return self._jacobian.shape -class Hessian(object): +class Hessian: """ Computes the Hessian matrix with a given ``func`` with respect to ``xs`` . @@ -328,7 +328,7 @@ class Hessian(object): return self.symbolic.shape -class _Jacobian(object): +class _Jacobian: """The base class for computing Jacobian matrix. ``_Jacobian`` implementes the core logic of multidimensional index and lazy diff --git a/python/paddle/incubate/autograd/primreg.py b/python/paddle/incubate/autograd/primreg.py index 7d81847f90..cce8c49eb4 100644 --- a/python/paddle/incubate/autograd/primreg.py +++ b/python/paddle/incubate/autograd/primreg.py @@ -13,7 +13,7 @@ # limitations under the License. -class Registry(object): +class Registry: """A general registry object.""" __slots__ = ['name', 'tab'] diff --git a/python/paddle/incubate/autograd/primx.py b/python/paddle/incubate/autograd/primx.py index cdf16e77b1..601b486d35 100644 --- a/python/paddle/incubate/autograd/primx.py +++ b/python/paddle/incubate/autograd/primx.py @@ -115,7 +115,7 @@ def output_vars_on_path(path): return vars -class VarMap(object): +class VarMap: """A general map data structure for linking variables to variables. An example is linking variables to their gradients. @@ -180,7 +180,7 @@ class VarMap(object): # TODO(lml): supporting control flow, nested blocks, and block other than current block of main program. -class Transform(object): +class Transform: """An object that maintains the state of transformations applied to a primitve program.""" diff --git a/python/paddle/incubate/autograd/utils.py b/python/paddle/incubate/autograd/utils.py index 2b8082bf48..5437401aec 100644 --- a/python/paddle/incubate/autograd/utils.py +++ b/python/paddle/incubate/autograd/utils.py @@ -17,7 +17,7 @@ import paddle from paddle.fluid import framework as framework -class PrimOption(object): +class PrimOption: def __init__(self): self.enable_prim = False diff --git a/python/paddle/jit/layer.py b/python/paddle/jit/layer.py index 25d9ca5dbc..e3204ab65d 100644 --- a/python/paddle/jit/layer.py +++ b/python/paddle/jit/layer.py @@ -17,7 +17,7 @@ from paddle.fluid import core from paddle.fluid.core import Load -class Layer(object): +class Layer: def __init__(self): self.cpp_layer = None # {name: Function} diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index bbada10aaf..dba69b9848 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -188,7 +188,7 @@ class RNNCellBase(Layer): return True return isinstance(seq, Sequence) and not isinstance(seq, str) - class Shape(object): + class Shape: def __init__(self, shape): self.shape = shape if shape[0] == -1 else ([-1] + list(shape)) diff --git a/python/paddle/nn/utils/spectral_norm_hook.py b/python/paddle/nn/utils/spectral_norm_hook.py index 288e5ea59c..f035d0b443 100644 --- a/python/paddle/nn/utils/spectral_norm_hook.py +++ b/python/paddle/nn/utils/spectral_norm_hook.py @@ -26,7 +26,7 @@ def normal_(x, mean=0.0, std=1.0): return x -class SpectralNorm(object): +class SpectralNorm: def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12): self.name = name self.dim = dim diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index cdcb97aa9a..2a4d076479 100755 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -95,7 +95,7 @@ def _weight_norm(v, g, dim): return weight -class WeightNorm(object): +class WeightNorm: def __init__(self, name, dim): if dim is None: dim = -1 diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index 3309bb4f8c..6f96e12f99 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -39,7 +39,7 @@ __all__ = [ # noqa ] -class LRScheduler(object): +class LRScheduler: """ LRScheduler Base class. Define the common interface of a learning rate scheduler. diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 783b11b908..36aa9c151d 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -98,7 +98,7 @@ def append_backward_new( return params_and_grads -class Optimizer(object): +class Optimizer: r"""Optimizer Base class. Define the common interface of an optimizer. diff --git a/python/paddle/profiler/timer.py b/python/paddle/profiler/timer.py index 311fc373c5..8bb49a2f37 100644 --- a/python/paddle/profiler/timer.py +++ b/python/paddle/profiler/timer.py @@ -16,7 +16,7 @@ import timeit from collections import OrderedDict -class Stack(object): +class Stack: """ The stack in a Last-In/First-Out (LIFO) manner. New element is added at the end and an element is removed from that end. @@ -41,7 +41,7 @@ class Stack(object): return None -class Event(object): +class Event: """ A Event is used to record the cost of every step and the cost of the total steps except skipped steps. @@ -152,7 +152,7 @@ class Event(object): return summary -class Hook(object): +class Hook: """ As the base class. All types of hooks should inherit from it. """ @@ -299,7 +299,7 @@ class TimerHook(Hook): ) -class TimeAverager(object): +class TimeAverager: """ Record the cost of every step and count the average. """ @@ -346,7 +346,7 @@ class TimeAverager(object): return float(self._total_iters) / self._total_time -class Benchmark(object): +class Benchmark: """ A tool for the statistics of model performance. The `before_reader` and `after_reader` are called in the DataLoader to count the cost diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 62083769e5..f6e979dbcb 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -120,7 +120,7 @@ def data(name, shape, dtype=None, lod_level=0): ) -class InputSpec(object): +class InputSpec: """ InputSpec describes the signature information of the model input, such as ``shape`` , ``dtype`` , ``name`` . diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index 4739628f71..bb38d152b5 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -19,7 +19,7 @@ from paddle.fluid.data_feeder import check_type, convert_dtype __all__ = [] -class PrintOptions(object): +class PrintOptions: precision = 8 threshold = 1000 edgeitems = 3 diff --git a/python/paddle/text/datasets/movielens.py b/python/paddle/text/datasets/movielens.py index b01d8e94bb..c4d0681f42 100644 --- a/python/paddle/text/datasets/movielens.py +++ b/python/paddle/text/datasets/movielens.py @@ -27,7 +27,7 @@ URL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip' MD5 = 'c4d9eecfca2ab87c1945afe126590906' -class MovieInfo(object): +class MovieInfo: """ Movie id, title and categories information are stored in MovieInfo. """ @@ -58,7 +58,7 @@ class MovieInfo(object): return self.__str__() -class UserInfo(object): +class UserInfo: """ User id, gender, age, and job information are stored in UserInfo. """ diff --git a/python/paddle/utils/cpp_extension/cpp_extension.py b/python/paddle/utils/cpp_extension/cpp_extension.py index 3cbfb6d732..c05be5f2a4 100644 --- a/python/paddle/utils/cpp_extension/cpp_extension.py +++ b/python/paddle/utils/cpp_extension/cpp_extension.py @@ -353,7 +353,7 @@ def _generate_extension_name(sources): return '_'.join(file_prefix) -class BuildExtension(build_ext, object): +class BuildExtension(build_ext): """ Inherited from setuptools.command.build_ext to customize how to apply compilation process with share library. @@ -724,7 +724,7 @@ class BuildExtension(build_ext, object): ) -class EasyInstallCommand(easy_install, object): +class EasyInstallCommand(easy_install): """ Extend easy_intall Command to control the behavior of naming shared library file. @@ -759,7 +759,7 @@ class EasyInstallCommand(easy_install, object): assert os.path.exists(new_so_path) -class BuildCommand(build, object): +class BuildCommand(build): """ Extend build Command to control the behavior of specifying `build_base` root directory. diff --git a/python/paddle/utils/download.py b/python/paddle/utils/download.py index a77d7b60de..660e09e866 100644 --- a/python/paddle/utils/download.py +++ b/python/paddle/utils/download.py @@ -27,7 +27,7 @@ try: from tqdm import tqdm except: - class tqdm(object): + class tqdm: def __init__(self, total=None): self.total = total self.n = 0 diff --git a/python/paddle/utils/op_version.py b/python/paddle/utils/op_version.py index 9f9ae4d73c..793e0b6219 100644 --- a/python/paddle/utils/op_version.py +++ b/python/paddle/utils/op_version.py @@ -28,7 +28,7 @@ def Singleton(cls): return _singleton -class OpUpdateInfoHelper(object): +class OpUpdateInfoHelper: def __init__(self, info): self._info = info @@ -47,7 +47,7 @@ class OpUpdateInfoHelper(object): @Singleton -class OpLastCheckpointChecker(object): +class OpLastCheckpointChecker: def __init__(self): self.raw_version_map = core.get_op_version_map() self.checkpoints_map = {} diff --git a/python/paddle/utils/profiler.py b/python/paddle/utils/profiler.py index 27803cfa44..625900e87e 100644 --- a/python/paddle/utils/profiler.py +++ b/python/paddle/utils/profiler.py @@ -34,7 +34,7 @@ __all__ = [ # noqa ] -class ProfilerOptions(object): +class ProfilerOptions: def __init__(self, options=None): self.options = { 'state': 'All', @@ -74,7 +74,7 @@ class ProfilerOptions(object): _current_profiler = None -class Profiler(object): +class Profiler: def __init__(self, enabled=True, options=None): if options is not None: self.profiler_options = options diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index 78413da4f2..f5cbd90ffc 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -84,7 +84,7 @@ def _check_input( return value -class Compose(object): +class Compose: """ Composes several transforms together use for composing list of transforms together for a dataset transform. @@ -137,7 +137,7 @@ class Compose(object): return format_string -class BaseTransform(object): +class BaseTransform: """ Base class of all transforms used in computer vision. diff --git a/tools/CrossStackProfiler/CspChromeTraceFormatter.py b/tools/CrossStackProfiler/CspChromeTraceFormatter.py index 1fa8efe988..fb24e0634e 100755 --- a/tools/CrossStackProfiler/CspChromeTraceFormatter.py +++ b/tools/CrossStackProfiler/CspChromeTraceFormatter.py @@ -15,7 +15,7 @@ import json -class ChromeTraceFormatter(object): +class ChromeTraceFormatter: def __init__(self): self._events = [] self._metadata = [] diff --git a/tools/CrossStackProfiler/CspFileReader.py b/tools/CrossStackProfiler/CspFileReader.py index 11dd052283..55a1722be6 100755 --- a/tools/CrossStackProfiler/CspFileReader.py +++ b/tools/CrossStackProfiler/CspFileReader.py @@ -73,7 +73,7 @@ FILEORGANIZEFORM = [ ] -class FileReader(object): +class FileReader: def __init__(self, logger, args): self._logger = logger self._args = args diff --git a/tools/CrossStackProfiler/CspReporter.py b/tools/CrossStackProfiler/CspReporter.py index 052ffd6fca..999ba4fb3e 100755 --- a/tools/CrossStackProfiler/CspReporter.py +++ b/tools/CrossStackProfiler/CspReporter.py @@ -64,7 +64,7 @@ def get_argparse(): return parser.parse_args() -class CspReporter(object): +class CspReporter: def __init__(self, args): self._args = args print(self._args) diff --git a/tools/check_ut.py b/tools/check_ut.py index e08c358531..6f1a8ab02b 100644 --- a/tools/check_ut.py +++ b/tools/check_ut.py @@ -19,7 +19,7 @@ import os.path from github import Github -class PRChecker(object): +class PRChecker: """PR Checker.""" def __init__(self): diff --git a/tools/codestyle/docstring_checker.py b/tools/codestyle/docstring_checker.py index 0d163c20bb..8deeff7734 100644 --- a/tools/codestyle/docstring_checker.py +++ b/tools/codestyle/docstring_checker.py @@ -27,7 +27,7 @@ def register(linter): linter.register_checker(DocstringChecker(linter)) -class Docstring(object): +class Docstring: """Docstring class holds the parsed doc string elements.""" def __init__(self): diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index d3a1c41682..02aaf13a17 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -30,7 +30,7 @@ PADDLE_ROOT = PADDLE_ROOT.replace('//', '/') ssl._create_default_https_context = ssl._create_unverified_context -class PRChecker(object): +class PRChecker: """PR Checker.""" def __init__(self): diff --git a/tools/timeline.py b/tools/timeline.py index 5323ea0f90..a8c6699b27 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -31,7 +31,7 @@ parser.add_argument( args = parser.parse_args() -class _ChromeTraceFormatter(object): +class _ChromeTraceFormatter: def __init__(self): self._events = [] self._metadata = [] @@ -126,7 +126,7 @@ class _ChromeTraceFormatter(object): return json.dumps(trace, separators=(',', ':')) -class Timeline(object): +class Timeline: def __init__(self, profile_dict): self._profile_dict = profile_dict self._pid = 0 -- GitLab