未验证 提交 9983892e 编写于 作者: A Ainavo 提交者: GitHub

[CodeStyle][UP004] remove useless object inheritance (#51771)

* add_up004_for_ruff

* 修改配置文件并清除object

* fix md
上级 5786f3e4
......@@ -14,7 +14,7 @@ https://www.tensorflow.org/guide/eager
## API
```python
class Layer(object):
class Layer:
def __call__(inputs):
# build some parameter once.
......@@ -49,7 +49,7 @@ Longer term.
```python
# Parent class.
class PyVarBase(object):
class PyVarBase:
pass
# Current python variable.
......
......@@ -35,6 +35,7 @@ select = [
# Pyupgrade
"UP001",
"UP003",
"UP004",
"UP007",
"UP010",
"UP011",
......
......@@ -523,7 +523,7 @@ def get_available_custom_device():
return core.get_available_custom_device()
class Event(object):
class Event:
'''
A device event wrapper around StreamBase.
Parameters:
......@@ -668,7 +668,7 @@ class Event(object):
return self.event_base
class Stream(object):
class Stream:
'''
A device stream wrapper around StreamBase.
Parameters:
......@@ -936,7 +936,7 @@ def set_stream(stream):
return prev_stream
class stream_guard(object):
class stream_guard:
'''
Notes:
This API only supports dynamic graph mode currently.
......
......@@ -17,7 +17,7 @@ from ..runtime.collective_runtime import CollectiveRuntime
__all__ = []
class RuntimeFactory(object):
class RuntimeFactory:
def __init__(self):
pass
......
......@@ -45,7 +45,7 @@ from ..auto_parallel.utils import is_backward_op, is_forward_op, is_loss_op
world_process_group = get_world_process_group()
class BF16State(object):
class BF16State:
def __init__(self, block):
self._block: Block = block
self._op_bf16_dict = {}
......
......@@ -1787,7 +1787,7 @@ def group_param(sharding_info, fuse_size):
return group_to_param_map, param_to_group_map
class ShardingInfo(object):
class ShardingInfo:
def __init__(self, group, rank, params_grads, partition_algor):
self.group = group
self.params_grads = dict([(p.name, (p, g)) for p, g in params_grads])
......@@ -1869,7 +1869,7 @@ class ShardingInfo(object):
return self.params_grads.get(param_name, None)
class VarGroup(object):
class VarGroup:
def __init__(self, max_size):
self.max_siez = max_size
self.dtype = None
......
......@@ -35,7 +35,7 @@ except ImportError:
VOCAB_SIZE = 30522
class Stack(object):
class Stack:
def __init__(self, axis=0, dtype=None):
self._axis = axis
self._dtype = dtype
......
......@@ -51,7 +51,7 @@ def get_dataset(inputs, config):
return dataset
class Main(object):
class Main:
def __init__(self):
self.metrics = {}
self.input_data = None
......
......@@ -36,7 +36,7 @@ paddle.enable_static()
CUDA_BLOCK_SIZE = 32
class CTCForward(object):
class CTCForward:
def __init__(
self,
softmax,
......
......@@ -1081,7 +1081,7 @@ class ParametersRecorder:
return id(program)
class FallbackProgramLayer(object):
class FallbackProgramLayer:
__slots__ = [
'_instance',
'_dy_func',
......
......@@ -32,7 +32,7 @@ DEFAULT_QAT_LAYER_MAPPINGS: Dict[Layer, Layer] = {
DEFAULT_LEAVES = [nn.ReLU, nn.AvgPool2D]
class SingleLayerConfig(object):
class SingleLayerConfig:
r"""
Configure how to quantize the activations and weights of a single layer.
......@@ -57,7 +57,7 @@ class SingleLayerConfig(object):
return f"activation: {self._activation}\nweight: {self._weight}"
class QuantConfig(object):
class QuantConfig:
r"""
Configure how to quantize a model or a part of the model. It will map each layer to
an instance of SingleLayerConfig by the settings. It provides diverse methods to set
......
......@@ -25,7 +25,7 @@ from .base_quanter import BaseQuanter
from .config import QuantConfig
class Quantization(object, metaclass=abc.ABCMeta):
class Quantization(metaclass=abc.ABCMeta):
r"""
Abstract class used to prepares a copy of the model for quantization calibration or quantization-aware training.
Args:
......
......@@ -15,7 +15,7 @@
import paddle
class KernelInfo(object):
class KernelInfo:
def __init__(self, op_type):
self.op_type = op_type
self.supported_dtypes = set()
......@@ -53,7 +53,7 @@ class KernelInfo(object):
self.supported_dtypes.add(dtype_str)
class KernelRegistryStatistics(object):
class KernelRegistryStatistics:
def __init__(self):
self.num_ops_for_dtypes = {
"all": 0,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册