未验证 提交 888272b5 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][py2][U004] unecessary explicit `object` inheritance in class definition (#47642)

* [CodeStyle][py2][U004] unecessary explicit `object` inheritance in class definition

* fix an increment
上级 e5bb8785
......@@ -239,7 +239,7 @@ def efficientnet_params(model_name):
return params_dict[model_name]
class BlockDecoder(object):
class BlockDecoder:
"""Block Decoder for readability, straight from the official TensorFlow repository"""
@staticmethod
......
......@@ -19,7 +19,7 @@ PREFIX_TENSOR_NAME = 'input_'
PREFIX_META_TENSOR_NAME = 'meta_'
class BaseAPI(object):
class BaseAPI:
def __init__(self, api_item_yaml):
self.api = self.get_api_name(api_item_yaml)
......
......@@ -19,7 +19,7 @@ import paddle
from paddle import Tensor
class WindowFunctionRegister(object):
class WindowFunctionRegister:
def __init__(self):
self._functions_dict = dict()
......
......@@ -21,7 +21,7 @@ from paddle.fluid import core
__all__ = []
class LegacyPyLayerContext(object):
class LegacyPyLayerContext:
"""
The object of this class is a context that is used in PyLayer to enhance the function.
......@@ -131,7 +131,7 @@ def with_mateclass(meta, *bases):
return type.__new__(impl, "impl", (), {})
class CPyLayer(object):
class CPyLayer:
@classmethod
@dygraph_only
def apply(cls, *args, **kwargs):
......@@ -336,7 +336,7 @@ class LegacyPyLayer(with_mateclass(LayerMeta, CPyLayer)):
)
class EagerPyLayerContext(object):
class EagerPyLayerContext:
def save_for_backward(self, *tensors):
"""
Saves given tensors that backward need. Use ``saved_tensor`` in the `backward` to get the saved tensors.
......
......@@ -31,7 +31,7 @@ URL = 'https://dataset.bj.bcebos.com/imikolov%2Fsimple-examples.tgz'
MD5 = '30177ea32e27c525793142b6bf2c8e2d'
class DataType(object):
class DataType:
NGRAM = 1
SEQ = 2
......
......@@ -38,7 +38,7 @@ URL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip'
MD5 = 'c4d9eecfca2ab87c1945afe126590906'
class MovieInfo(object):
class MovieInfo:
"""
Movie id, title and categories information are stored in MovieInfo.
"""
......@@ -69,7 +69,7 @@ class MovieInfo(object):
return self.__str__()
class UserInfo(object):
class UserInfo:
"""
User id, gender, age, and job information are stored in UserInfo.
"""
......
......@@ -116,7 +116,7 @@ class DeviceMesh(core.DeviceMesh):
return self._mesh
# class Cluster(object):
# class Cluster:
# """
# The cluster represents the hardware resource.
# """
......
......@@ -19,7 +19,7 @@ import numpy as np
from ..utils.log_utils import get_logger
class Converter(object):
class Converter:
"""
Converter is a class object for auto parallel to convert tensors from
one parallel strategy to another one. Tensors will merge and slice value
......
......@@ -35,7 +35,7 @@ class CostNodeType(Enum):
NOP = 5
class Cost(object):
class Cost:
def __init__(self):
self.runtime = None
self.static_mem = None
......@@ -49,7 +49,7 @@ class CostModelMode(Enum):
MIXED = 3
class CostNode(object):
class CostNode:
def __init__(self, node, node_type, id=None):
self.id = id
self.node = node
......@@ -172,7 +172,7 @@ class CompOpCostNode(CostNode):
self.cost = 0.0
class PipeEvent(object):
class PipeEvent:
def __init__(self, stage_id, event_name, duration, start_time=-1):
self.stage_id = stage_id
self.name = event_name
......@@ -181,7 +181,7 @@ class PipeEvent(object):
self.e_time = -1
class CostModel(object):
class CostModel:
def __init__(
self,
mode=CostModelMode.BENCHMARKING,
......
......@@ -1146,7 +1146,7 @@ class DistributedOperatorContext:
return kinputs, koutputs
class BlockState(object):
class BlockState:
def __init__(self):
self.nblock = 0
self.forward_indices = []
......
......@@ -192,7 +192,7 @@ class BuildInfo:
self.states = defaultdict(bool)
class ProgramHelper(object):
class ProgramHelper:
"""
A Helper class for Engine to provides different Program IR according specified 'mode'.
"""
......
......@@ -220,7 +220,7 @@ def recompute(op):
_g_collections = {}
class CollectionNames(object):
class CollectionNames:
FETCHES = "fetches"
LOGGING = "logging"
......
......@@ -32,7 +32,7 @@ __not_shape_var_type__ = [
]
class Partitioner(object):
class Partitioner:
"""
warning:: Partitioner is experimental and subject to change.
......
......@@ -39,7 +39,7 @@ def reset_current_process_mesh():
_g_current_process_mesh = _g_previous_process_mesh
class ProcessMesh(object):
class ProcessMesh:
"""
The `Processmesh` object describes the topology of the used processes.
......
......@@ -16,7 +16,7 @@ import copy
from . import constants
class BaseConfig(object):
class BaseConfig:
def __init__(self, category, config_dict=None):
self._category = category
self._config_dict = None
......
......@@ -25,7 +25,7 @@ def _get_pass_config(strategy, pass_name):
return config
class TuningConfig(object):
class TuningConfig:
"""
A uniform config wrap:
distributed strategy: the user defined configuration for optimization pass
......
......@@ -18,7 +18,7 @@
import numpy as np
class MetricRecord(object):
class MetricRecord:
"""
One record for a single metric at a given execution step.
"""
......@@ -62,7 +62,7 @@ class MetricRecord(object):
return "MetricRecord(value={}, step={})".format(self.value, self.step)
class MetricRecords(object):
class MetricRecords:
"""
Records of a single metric across different executions.
"""
......@@ -143,7 +143,7 @@ class MetricRecords(object):
return records
class MetricsRecorder(object):
class MetricsRecorder:
"""
Record the values for all metrics.
"""
......
......@@ -18,7 +18,7 @@
import json
class Storable(object):
class Storable:
def get_state(self):
raise NotImplementedError
......
......@@ -22,7 +22,7 @@ from .tunable_variable import IntRange
from .tunable_variable import FloatRange
class TunableSpace(object):
class TunableSpace:
"""
A TunableSpace is constructed by the tunable variables.
"""
......
......@@ -18,7 +18,7 @@
import numpy as np
class TunableVariable(object):
class TunableVariable:
"""
Tunablevariable base class.
"""
......
......@@ -22,7 +22,7 @@ from paddle.distributed.communication.group import (
)
class P2POp(object):
class P2POp:
"""
A class that makes point-to-point operations for "batch_isend_irecv".
......
......@@ -16,7 +16,7 @@ import argparse
import os
class Command(object):
class Command:
def __init__(self, server, name):
import etcd3
......
......@@ -15,7 +15,7 @@
__all__ = []
class EntryAttr(object):
class EntryAttr:
"""
Entry Config for paddle.static.nn.sparse_embedding with Parameter Server.
......
......@@ -65,7 +65,7 @@ def check_configs_key(msg, config, field_name):
assert key in key_list, "key:{} not in {}".format(key, field_name)
class DistributedJobInfo(object):
class DistributedJobInfo:
"""
DistributedJobInfo will serialize all distributed training information
Just for inner use: 1) debug 2) replicate experiments
......@@ -106,7 +106,7 @@ ReduceStrategyFluid = paddle.fluid.BuildStrategy.ReduceStrategy
ReduceStrategyFleet = int
class DistributedStrategy(object):
class DistributedStrategy:
__lock_attr = False
def __init__(self):
......
......@@ -26,7 +26,7 @@ meta_optimizer_names.remove("HybridParallelOptimizer")
meta_optimizer_names.remove("HeterParallelOptimizer")
class MetaOptimizerFactory(object):
class MetaOptimizerFactory:
def __init__(self):
pass
......
......@@ -35,7 +35,7 @@ class Role:
COORDINATOR = 5
class Gloo(object):
class Gloo:
"""
Gloo is a universal class for barrier and collective communication
"""
......@@ -383,7 +383,7 @@ class Gloo(object):
return output
class RoleMakerBase(object):
class RoleMakerBase:
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
......
......@@ -17,7 +17,7 @@ from ...ps.the_one_ps import TheOnePSRuntime
__all__ = []
class RuntimeFactory(object):
class RuntimeFactory:
def __init__(self):
pass
......
......@@ -106,7 +106,7 @@ def maximum_path_len_algo(optimizer_list):
return candidate
class StrategyCompilerBase(object):
class StrategyCompilerBase:
def __init__(self):
pass
......
......@@ -23,7 +23,7 @@ __all__ = ['CommunicateTopology', 'HybridCommunicateGroup']
_HYBRID_PARALLEL_GROUP = None
class ParallelMode(object):
class ParallelMode:
"""
There are all the parallel modes currently supported:
- DATA_PARALLEL: Distribute input data to different devices.
......@@ -47,7 +47,7 @@ class ParallelMode(object):
SHARDING_PARALLEL = 3
class CommunicateTopology(object):
class CommunicateTopology:
def __init__(
self,
hybrid_group_names=["data", "pipe", "sharding", "model"],
......@@ -133,7 +133,7 @@ class CommunicateTopology(object):
return self.get_rank(**tf)
class HybridCommunicateGroup(object):
class HybridCommunicateGroup:
def __init__(self, topology):
self.nranks = paddle.distributed.get_world_size()
self.global_rank = paddle.distributed.get_rank()
......@@ -410,7 +410,7 @@ class HybridCommunicateGroup(object):
)
class _CommunicateGroup(object):
class _CommunicateGroup:
"""tmp for static"""
def __init__(self):
......
......@@ -31,7 +31,7 @@ import numpy as np
__all__ = []
class UtilFactory(object):
class UtilFactory:
def _create_util(self, context=None):
util = UtilBase()
if context is not None and "valid_strategy" in context:
......@@ -41,7 +41,7 @@ class UtilFactory(object):
return util
class UtilBase(object):
class UtilBase:
def __init__(self):
self.role_maker = None
self.dist_strategy = None
......
......@@ -17,7 +17,7 @@ import sys
__all__ = []
class DataGenerator(object):
class DataGenerator:
"""
DataGenerator is a general Base class for user to inherit
A user who wants to define his/her own python processing logic
......
......@@ -20,7 +20,7 @@ import paddle.fluid.core as core
__all__ = []
class DatasetBase(object):
class DatasetBase:
"""Base dataset class."""
def __init__(self):
......
......@@ -16,7 +16,7 @@ from paddle.fluid import core
__all__ = []
class Index(object):
class Index:
def __init__(self, name):
self._name = name
......
......@@ -52,7 +52,7 @@ class ElasticStatus:
EXIT = "exit"
class LauncherInterface(object):
class LauncherInterface:
def __init__(self, args):
self.args = args
self.procs = []
......@@ -124,7 +124,7 @@ class LauncherInterface(object):
raise NotImplementedError
class ElasticManager(object):
class ElasticManager:
def __init__(self, args, etcd_client):
self.args = args
......
......@@ -95,7 +95,7 @@ inited_runtime_handler = wrap_decorator(_inited_runtime_handler_)
is_non_distributed_check = wrap_decorator(_is_non_distributed_check_)
class Fleet(object):
class Fleet:
"""
Unified API for distributed training of PaddlePaddle
Please reference the https://github.com/PaddlePaddle/PaddleFleetX for details
......
......@@ -60,7 +60,7 @@ class DeviceMode:
MLU = 4
class Cluster(object):
class Cluster:
def __init__(self, hdfs):
self.job_server = None
self.pods = []
......@@ -133,7 +133,7 @@ class Cluster(object):
return None
class JobServer(object):
class JobServer:
def __init__(self):
self.endpoint = None
......@@ -147,7 +147,7 @@ class JobServer(object):
return not self == j
class Trainer(object):
class Trainer:
def __init__(self):
self.accelerators = []
self.endpoint = None
......@@ -179,7 +179,7 @@ class Trainer(object):
return self.rank
class Pod(object):
class Pod:
def __init__(self):
self.rank = None
self.id = None
......@@ -483,7 +483,7 @@ def pretty_print_envs(envs, header=None):
return _str
class TrainerProc(object):
class TrainerProc:
def __init__(self):
self.proc = None
self.log_fn = None
......@@ -1278,7 +1278,7 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode):
)
class ParameterServerLauncher(object):
class ParameterServerLauncher:
def __init__(self, args, distribute_mode):
self.args = args
self.distribute_mode = distribute_mode
......
......@@ -24,7 +24,7 @@ HcomGroupConfig = namedtuple('HcomGroupConfig', ['name', 'nranks', 'rank_ids'])
__all__ = []
class AscendIRParser(object):
class AscendIRParser:
def __init__(self, auto_dp=False, world_rank_size=1):
self.graph_idx = 0
self.hcom_endpoints = {}
......
......@@ -101,7 +101,7 @@ global_cnt = -1
global_input_cnt = -1
class AscendHelper(object):
class AscendHelper:
def __init__(self):
self.dtype2ge_map = {
0: core.GEDataType.DT_BOOL,
......@@ -136,7 +136,7 @@ class AscendHelper(object):
return self.dtype2np_map[index]
class AscendParserFactory(object):
class AscendParserFactory:
def __init__(self, graph, var2geop):
self.graph = graph
self.var2geop = var2geop
......@@ -149,7 +149,7 @@ class AscendParserFactory(object):
raise ValueError("parser class %s does not exist" % parser_class)
class AscendParserBase(object):
class AscendParserBase:
def __init__(self, graph, var2geop):
self.graph = graph
self.var2geop = var2geop
......
......@@ -53,7 +53,7 @@ def is_optimizer_op(op):
) & int(OpRole.Optimize)
class CollectiveHelper(object):
class CollectiveHelper:
def __init__(self, role_maker, nrings=1, wait_port=True):
self.nrings = nrings
self.wait_port = wait_port
......
......@@ -25,7 +25,7 @@ def _is_trainable(param):
return not param.stop_gradient
class DygraphShardingOptimizer(object):
class DygraphShardingOptimizer:
"""
A wrapper for Sharding Optimizer in Dygraph.
......
......@@ -23,7 +23,7 @@ from paddle.fluid import core
__all__ = []
class FP16Utils(object):
class FP16Utils:
def __init__(self):
pass
......
......@@ -17,7 +17,7 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
__all__ = []
class GradientClipHelper(object):
class GradientClipHelper:
def __init__(self, mp_ring_id):
self.mp_ring_id = mp_ring_id
......
......@@ -44,7 +44,7 @@ class PlaceType:
return PlaceType.CPU
class OffloadHelper(object):
class OffloadHelper:
cpu_place_type = 0
cuda_place_type = PlaceType.default_device()
cuda_pinned_place_type = PlaceType.default_pinned()
......
......@@ -15,7 +15,7 @@
__all__ = []
class ProgramDeps(object):
class ProgramDeps:
def __init__(self, block, start_vars, end_vars):
self._block = block
# vars where to start to build the deps
......
......@@ -22,7 +22,7 @@ from paddle.distributed.fleet.meta_optimizers.sharding.fp16_helper import (
__all__ = []
class Shard(object):
class Shard:
def __init__(
self,
):
......@@ -155,7 +155,7 @@ class Shard(object):
return grads_in_shard
class ProgramSegment(object):
class ProgramSegment:
def __init__(self, block):
self._block = block
self._allreduce_vars = []
......
......@@ -408,7 +408,7 @@ def insert_allreduce_ops(
return
class FuseHelper(object):
class FuseHelper:
@staticmethod
def sort_vars_by_dtype(block, vars_name):
fp32_vars = []
......
......@@ -17,7 +17,7 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_VAR_KEY
__all__ = []
class WeightDecayHelper(object):
class WeightDecayHelper:
def __init__(self):
pass
......
......@@ -53,7 +53,7 @@ from paddle.incubate.distributed.fleet import recompute_hybrid
__all__ = []
class LayerDesc(object):
class LayerDesc:
def __init__(self, layer_func, *inputs, **kwargs):
self.layer_func = layer_func
self.inputs = inputs
......@@ -89,7 +89,7 @@ class SharedLayerDesc(LayerDesc):
self.shared_weight_attr = shared_weight_attr
class SegmentLayers(object):
class SegmentLayers:
def __init__(
self,
layers_desc,
......
......@@ -15,7 +15,7 @@
__all__ = []
class RuntimeBase(object):
class RuntimeBase:
def __init__(self):
pass
......
......@@ -46,7 +46,7 @@ class FSShellCmdAborted(ExecuteError):
pass
class FS(object):
class FS:
@abc.abstractmethod
def ls_dir(self, fs_path):
raise NotImplementedError
......
......@@ -128,7 +128,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
self.end_headers()
class KVHTTPServer(HTTPServer, object):
class KVHTTPServer(HTTPServer):
"""
it is a http server storing kv pairs.
"""
......
......@@ -20,7 +20,7 @@ import paddle.distributed.fleet as fleet
import numpy as np
class HybridParallelInferenceHelper(object):
class HybridParallelInferenceHelper:
"""
A helper class to split program for inference with hybrid parallelism.
......
......@@ -21,7 +21,7 @@ from .args_envs import parse_args, fetch_envs, env_args_mapping
import logging
class Context(object):
class Context:
def __init__(self, enable_plugin=True):
self.args, self.unknown_args = parse_args()
self.envs = fetch_envs()
......
......@@ -27,7 +27,7 @@ class DeviceType:
CUSTOM_DEVICE = 'custom_device'
class Device(object):
class Device:
def __init__(self, dtype=None, memory="", labels=""):
self._dtype = dtype
self._memory = memory
......
......@@ -13,7 +13,7 @@
# limitations under the License.
class Event(object):
class Event:
def __init__(self, kind="status", message="", fatal=False):
self.kind = kind
self.message = message
......
......@@ -21,7 +21,7 @@ import struct
from contextlib import closing
class Node(object):
class Node:
def __init__(self):
# self.device = Device.detect_device()
self.device = Device.parse_device()
......
......@@ -13,6 +13,6 @@
# limitations under the License.
class Resource(object):
class Resource:
def __init__(self):
self.devices = []
......@@ -13,7 +13,7 @@
# limitations under the License.
class Status(object):
class Status:
UNINIT = "uninit"
READY = "ready"
RUNNING = "running"
......
......@@ -31,7 +31,7 @@ class ControleMode:
RPC = "rpc"
class ControllerBase(object):
class ControllerBase:
def __init__(self, ctx):
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGABRT, self.signal_handler)
......
......@@ -24,7 +24,7 @@ import random
ETCD_PROTOCAL = 'etcd://'
class Master(object):
class Master:
'''
Master is a distributed store design to exchange info among nodes
'''
......
......@@ -19,7 +19,7 @@ import os
from threading import Thread
class Watcher(object):
class Watcher:
def __init__(self, ctx):
self.ctx = ctx
......
......@@ -20,7 +20,7 @@ import os
import sys
class Container(object):
class Container:
'''
TODO(kuizhiqing) A container can be run by process/thread or just a callable function
'''
......
......@@ -19,7 +19,7 @@ class JobMode:
HETER = 'heter'
class Job(object):
class Job:
def __init__(self, jid='default', mode=JobMode.COLLECTIVE, nnodes="1"):
self._mode = mode
self._id = jid
......
......@@ -20,7 +20,7 @@ import random
import time
class PodSepc(object):
class PodSepc:
def __init__(self):
self._name = ''.join(
random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6)
......
......@@ -13,7 +13,7 @@
# limitations under the License.
class Status(object):
class Status:
UNINIT = "uninit"
READY = "ready"
RUNNING = "running"
......
......@@ -16,7 +16,7 @@ import requests
import time
class KVClient(object):
class KVClient:
def __init__(self, endpoint='localhost:2379'):
self.endpoint = (
endpoint
......
......@@ -67,7 +67,7 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
return
class KVServer(HTTPServer, object):
class KVServer(HTTPServer):
def __init__(self, port):
super().__init__(('', port), KVHandler)
self.kv_lock = threading.Lock()
......
......@@ -18,7 +18,7 @@ import json
import shutil
class Info(object):
class Info:
def __repr__(self):
return str(self.__dict__)
......
......@@ -16,7 +16,7 @@ import subprocess
import os, sys, signal, time
class ProcessContext(object):
class ProcessContext:
def __init__(
self,
cmd,
......
......@@ -54,7 +54,7 @@ from ..auto_parallel.utils import is_forward_op, is_backward_op, is_loss_op
world_process_group = get_world_process_group()
class AMPState(object):
class AMPState:
def __init__(self, block):
self._block = block
self._op_fp16_dict = (
......
......@@ -572,7 +572,7 @@ class DataParallelOptimizationPass(PassBase):
self._logger.info("individual gradient {}".format(individual_grads))
class GradientsGroup(object):
class GradientsGroup:
def __init__(self, ops, max_group_size):
self.max_group_size = max_group_size
self.ops = ops
......
......@@ -126,7 +126,7 @@ def _keep_fp32_output(op, out_name):
return False
class FP16State(object):
class FP16State:
def __init__(
self,
program,
......
......@@ -143,7 +143,7 @@ def _is_about_global_norm(
return rank_id in complete_param_ranks
class ClipHelper(object):
class ClipHelper:
def __init__(self, params_grads, rank_id, block, dist_context):
params, _ = zip(*params_grads)
self.params = list(params)
......
......@@ -850,7 +850,7 @@ def shard_parameters(params, group_size):
return mapping
class ShardingInfo(object):
class ShardingInfo:
def __init__(self, group, rank, params_grads):
self.group = group
self.params_grads = dict([(p.name, (p, g)) for p, g in params_grads])
......
......@@ -353,7 +353,7 @@ class FLClient(FLClientBase):
f.write(str(self.train_statical_info))
class Coordinator(object):
class Coordinator:
def __init__(self, ps_hosts):
self._communicator = FLCommunicator(ps_hosts)
self._client_selector = None
......
......@@ -914,7 +914,7 @@ class fsClient:
proto.hadoop_bin = self.fs_client_param.hadoop_bin
class PsDescBuilder(object):
class PsDescBuilder:
def __init__(self, context):
self.context = context
self.is_sync = context['is_sync']
......
......@@ -27,7 +27,7 @@ __all__ = [
]
class PsProgramBuilderFactory(object):
class PsProgramBuilderFactory:
def __init__(self):
pass
......
......@@ -19,7 +19,7 @@ from paddle.distributed.fleet.base.private_helper_function import (
from paddle.distributed.passes import new_pass
class PsProgramBuilder(object):
class PsProgramBuilder:
def __init__(self, pass_ctx):
self.pass_ctx = pass_ctx
self.attrs = self.pass_ctx._attrs
......
......@@ -88,7 +88,7 @@ class DistributedMode:
NU = 5
class TrainerRuntimeConfig(object):
class TrainerRuntimeConfig:
def __init__(self, valid_strategy):
self.mode = None
num_threads = os.getenv("CPU_NUM", "1")
......
......@@ -43,7 +43,7 @@ from paddle.fluid.framework import set_flags
__all__ = []
class ParallelEnvArgs(object):
class ParallelEnvArgs:
def __init__(self):
# Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..
self.cluster_node_ips = None
......@@ -412,7 +412,7 @@ def _func_wrapper(func, args, error_queue, return_queue, env_dict, backend):
sys.exit(1)
class MultiprocessContext(object):
class MultiprocessContext:
def __init__(self, processes, error_queues, return_queues):
_py_supported_check()
self.error_queues = error_queues
......
......@@ -99,7 +99,7 @@ def get_gpus(selected_gpus):
return gpus
class Hdfs(object):
class Hdfs:
def __init__(self):
self.hdfs_ugi = None
self.hdfs_name = None
......@@ -128,7 +128,7 @@ class Hdfs(object):
return not self == n
class Cluster(object):
class Cluster:
def __init__(self, hdfs):
self.job_server = None
self.pods = []
......@@ -194,7 +194,7 @@ class Cluster(object):
return None
class JobServer(object):
class JobServer:
def __init__(self):
self.endpoint = None
......@@ -208,7 +208,7 @@ class JobServer(object):
return not self == j
class Trainer(object):
class Trainer:
def __init__(self):
self.gpus = []
self.endpoint = None
......@@ -239,7 +239,7 @@ class Trainer(object):
return self.rank
class Pod(object):
class Pod:
def __init__(self):
self.rank = None
self.id = None
......@@ -454,7 +454,7 @@ def _prepare_trainer_env(cluster, trainer, backend=None):
return proc_env
class TrainerProc(object):
class TrainerProc:
def __init__(self):
self.proc = None
self.log_fn = None
......
......@@ -14,7 +14,7 @@
import paddle
class Constraint(object):
class Constraint:
"""Constraint condition for random variable."""
def __call__(self, value):
......
......@@ -33,7 +33,7 @@ from paddle.fluid.framework import (
from paddle.fluid.layers import tensor
class Distribution(object):
class Distribution:
"""
The abstract base class for probability distributions. Functions are
implemented in specific distributions.
......
......@@ -127,7 +127,7 @@ def _dispatch(cls_p, cls_q):
@functools.total_ordering
class _Compare(object):
class _Compare:
def __init__(self, *classes):
self.classes = classes
......
......@@ -58,7 +58,7 @@ class Type(enum.Enum):
return _type in (cls.BIJECTION, cls.INJECTION)
class Transform(object):
class Transform:
r"""Base class for the transformations of random variables.
``Transform`` can be used to represent any differentiable and injective
......
......@@ -15,7 +15,7 @@
from paddle.distribution import constraint
class Variable(object):
class Variable:
"""Random variable of probability distribution.
Args:
......
......@@ -39,7 +39,7 @@ def _is_number_or_matrix_(var):
return _is_number_(var) or isinstance(var, np.ndarray)
class WeightedAverage(object):
class WeightedAverage:
"""
Calculate weighted average.
......
......@@ -41,7 +41,7 @@ _logger = log_helper.get_logger(
)
class ProgramStats(object):
class ProgramStats:
def __init__(self, block, ops):
self.block = block
self.ops = ops
......@@ -789,7 +789,7 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set):
(set[core.OpDesc]): A set of OpDescs which should be pruned.
"""
class Var(object):
class Var:
def __init__(self, var_name):
self.var_name = var_name
self.gen_op = None
......@@ -804,7 +804,7 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set):
assert isinstance(op, Op)
self.pendding_ops.append(op)
class Op(object):
class Op:
def __init__(self, op_desc):
self.op_desc = op_desc
self.inputs = []
......
......@@ -92,7 +92,7 @@ def _squared_l2_norm(x):
return out
class BaseErrorClipAttr(object):
class BaseErrorClipAttr:
def __str__(self):
raise NotImplementedError()
......@@ -177,7 +177,7 @@ def error_clip_callback(block, context):
error_clip._append_clip_op(block, grad_n)
class ClipGradBase(object):
class ClipGradBase:
def __init__(self):
super().__init__()
......
......@@ -38,7 +38,7 @@ from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
__all__ = ['Communicator', 'FLCommunicator', 'LargeScaleKV']
class Communicator(object):
class Communicator:
def __init__(self, mode, kwargs=None, envs=None):
"""
Communicator is used for async distribute training in distribute_transpiler mode.
......@@ -246,7 +246,7 @@ class FLCommunicator(Communicator): ## only for coordinator
return info_mp
class LargeScaleKV(object):
class LargeScaleKV:
def __init__(self):
self.scale_kv = core.LargeScaleKV()
......@@ -260,7 +260,7 @@ class LargeScaleKV(object):
return self.scale_kv.size(varname)
class HeterClient(object):
class HeterClient:
def __init__(self, endpoint, previous_endpoint, trainer_id):
self.heter_client_ = core.HeterClient(
endpoint, previous_endpoint, trainer_id
......
......@@ -105,7 +105,7 @@ def _should_broadcast_or_not_exists(program, var_name):
return not is_distributed
class CompiledProgram(object):
class CompiledProgram:
"""
:api_attr: Static Graph
......@@ -567,7 +567,7 @@ class CompiledProgram(object):
return place_list
class IpuDynamicPatcher(object):
class IpuDynamicPatcher:
"""
Patcher for IPU dynamic2static support.
"""
......@@ -777,7 +777,7 @@ class IpuDynamicPatcher(object):
setattr(module, key, attr)
class IpuStrategy(object):
class IpuStrategy:
"""
Help users precisely control the graph building in :code:`paddle.static.IpuCompiledProgram` .
......@@ -1237,7 +1237,7 @@ class IpuStrategy(object):
return self.get_option('enable_fp16')
class IpuCompiledProgram(object):
class IpuCompiledProgram:
"""
The IpuCompiledProgram is used to transform a program to a ipu-target program,
such as forward graph extraction, computing graph transformation, useless scale Ops clean, etc.
......
......@@ -37,7 +37,7 @@ class _DecoderType:
BEAM_SEARCH = 2
class InitState(object):
class InitState:
"""
The initial hidden state object. The state objects holds a variable, and may
use it to initialize the hidden state cell of RNN. Usually used as input to
......@@ -98,7 +98,7 @@ class InitState(object):
return self._need_reorder
class _MemoryState(object):
class _MemoryState:
def __init__(self, state_name, rnn_obj, init_state):
self._state_name = state_name # each is a rnn.memory
self._rnn_obj = rnn_obj
......@@ -113,7 +113,7 @@ class _MemoryState(object):
self._rnn_obj.update_memory(self._state_mem, state)
class _ArrayState(object):
class _ArrayState:
def __init__(self, state_name, block, init_state):
self._state_name = state_name
self._block = block
......@@ -161,7 +161,7 @@ class _ArrayState(object):
layers.array_write(state, array=self._state_array, i=self._counter)
class StateCell(object):
class StateCell:
"""
The state cell class stores the hidden state of the RNN cell. A typical RNN
cell has one or more hidden states, and one or more step inputs. This class
......@@ -401,7 +401,7 @@ class StateCell(object):
return self._cur_states[self._out_state]
class TrainingDecoder(object):
class TrainingDecoder:
"""
A decoder that can only be used for training. The decoder could be
initialized with a `StateCell` object. The computation within the RNN cell
......@@ -547,7 +547,7 @@ class TrainingDecoder(object):
)
class BeamSearchDecoder(object):
class BeamSearchDecoder:
"""
A beam search decoder that can be used for inference. The decoder should be
initialized with a `StateCell` object. The decode process can be defined
......
......@@ -17,7 +17,7 @@ from paddle.fluid import framework as framework
__all__ = ["extend_with_decoupled_weight_decay"]
class DecoupledWeightDecay(object):
class DecoupledWeightDecay:
def __init__(self, coeff=0.0, apply_decay_param_fun=None, **kwargs):
if not isinstance(coeff, float) and not isinstance(
coeff, framework.Variable
......
......@@ -24,7 +24,7 @@ from ..fp16_lists import (
__all__ = ["AutoMixedPrecisionListsBF16"]
class AutoMixedPrecisionListsBF16(object):
class AutoMixedPrecisionListsBF16:
"""
AutoMixedPrecisionListsBF16 is a class for fp32/bf16 op types list. The lists are used for an
algorithm which determines op's execution mode (fp32 or bf16).It can update pre-defined
......
......@@ -31,7 +31,7 @@ import warnings
__all__ = ["decorate_bf16"]
class OptimizerWithMixedPrecision(object):
class OptimizerWithMixedPrecision:
"""
Optimizer with mixed-precision (MP) training. This is a wrapper of a common
optimizer, plus the support of mixed-precision pre-training. The object
......
......@@ -34,7 +34,7 @@ import paddle
__all__ = ["decorate"]
class OptimizerWithMixedPrecision(object):
class OptimizerWithMixedPrecision:
"""
Optimizer with mixed-precision (MP) training. This is a wrapper of a common
optimizer, plus the support of mixed-precision pre-training. The object
......
......@@ -26,7 +26,7 @@ _extra_unsupported_fp16_list = {
}
class AutoMixedPrecisionLists(object):
class AutoMixedPrecisionLists:
"""
AutoMixedPrecisionLists is a class for black/white list. It can update
pre-defined black list and white list according to users' custom black
......
......@@ -81,7 +81,7 @@ def quant(x, scale, num_bits):
return y
class QuantizeTranspiler(object):
class QuantizeTranspiler:
def __init__(
self,
weight_bits=8,
......
......@@ -51,7 +51,7 @@ def compute_soft_rounding_np(alpha_v):
)
class AdaRoundLoss(object):
class AdaRoundLoss:
def __init__(self, reg_param=0.01, default_beta_range=(20, 2)):
self.default_reg_param = reg_param
self.default_beta_range = default_beta_range
......@@ -111,7 +111,7 @@ class AdaRoundLoss(object):
return beta
class AdaRound(object):
class AdaRound:
def __init__(
self,
scale,
......
......@@ -36,7 +36,7 @@ _logger = get_logger(
)
class ImperativePTQ(object):
class ImperativePTQ:
"""
Static post training quantization.
"""
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册