未验证 提交 81f3f6b5 编写于 作者: A Ainavo 提交者: GitHub

[CodeStyle][UP008] remove super call with parameters (#51812)

* remove super call with parameters

* fix bug
上级 79bc9c0d
...@@ -139,7 +139,7 @@ TODO ...@@ -139,7 +139,7 @@ TODO
import paddle import paddle
class MyLayer(fluid.imperative.Layer): class MyLayer(fluid.imperative.Layer):
def __init__(self): def __init__(self):
super(MyLayer, self).__init__() super().__init__()
def forward(self, inputs): def forward(self, inputs):
x = fluid.layers.relu(inputs) x = fluid.layers.relu(inputs)
...@@ -150,7 +150,7 @@ class MyLayer(fluid.imperative.Layer): ...@@ -150,7 +150,7 @@ class MyLayer(fluid.imperative.Layer):
class MyPyLayer(fluid.imperative.PyLayer): class MyPyLayer(fluid.imperative.PyLayer):
def __init__(self): def __init__(self):
super(MyPyLayer, self).__init__() super().__init__()
@staticmethod @staticmethod
def forward(inputs): def forward(inputs):
...@@ -172,7 +172,7 @@ with fluid.imperative.guard(): ...@@ -172,7 +172,7 @@ with fluid.imperative.guard():
class MLP(fluid.Layer): class MLP(fluid.Layer):
def __init__(self, input_size): def __init__(self, input_size):
super(MLP, self).__init__() super().__init__()
self._linear1 = Linear(input_size, self._linear1 = Linear(input_size,
3, 3,
fluid.ParamAttr( fluid.ParamAttr(
......
...@@ -37,6 +37,7 @@ select = [ ...@@ -37,6 +37,7 @@ select = [
"UP003", "UP003",
"UP004", "UP004",
"UP007", "UP007",
"UP008",
"UP010", "UP010",
"UP011", "UP011",
"UP013", "UP013",
......
...@@ -840,7 +840,7 @@ class Stream: ...@@ -840,7 +840,7 @@ class Stream:
def __eq__(self, o): def __eq__(self, o):
if isinstance(o, Stream): if isinstance(o, Stream):
return super(Stream, self).__eq__(o) return super().__eq__(o)
return False return False
def __hash__(self): def __hash__(self):
......
...@@ -379,7 +379,7 @@ def recompute(function, *args, **kwargs): ...@@ -379,7 +379,7 @@ def recompute(function, *args, **kwargs):
def __init__(self, input_size=10, def __init__(self, input_size=10,
recompute_blocks=[1, 3], recompute_blocks=[1, 3],
recompute_kwargs={}): recompute_kwargs={}):
super(Naive_fc_net, self).__init__() super().__init__()
self.recompute_blocks = recompute_blocks self.recompute_blocks = recompute_blocks
self.recompute_kwargs = recompute_kwargs self.recompute_kwargs = recompute_kwargs
self.runfunc0 = get_fc_block(0, input_size, is_last=False) self.runfunc0 = get_fc_block(0, input_size, is_last=False)
......
...@@ -79,7 +79,7 @@ def recompute(function, *args, **kwargs): ...@@ -79,7 +79,7 @@ def recompute(function, *args, **kwargs):
def __init__(self, input_size=10, def __init__(self, input_size=10,
recompute_blocks=[1, 3], recompute_blocks=[1, 3],
recompute_kwargs={}): recompute_kwargs={}):
super(Naive_fc_net, self).__init__() super().__init__()
self.recompute_blocks = recompute_blocks self.recompute_blocks = recompute_blocks
self.recompute_kwargs = recompute_kwargs self.recompute_kwargs = recompute_kwargs
self.runfunc0 = get_fc_block(0, input_size, is_last=False) self.runfunc0 = get_fc_block(0, input_size, is_last=False)
......
...@@ -61,7 +61,7 @@ class MLP(paddle.nn.Layer): ...@@ -61,7 +61,7 @@ class MLP(paddle.nn.Layer):
class Encoder(paddle.nn.Layer): class Encoder(paddle.nn.Layer):
def __init__(self, encoder): def __init__(self, encoder):
super(Encoder, self).__init__() super().__init__()
self.first_stage = paddle.nn.Linear(1024, 1024) self.first_stage = paddle.nn.Linear(1024, 1024)
self.encoder = encoder self.encoder = encoder
...@@ -73,7 +73,7 @@ class Encoder(paddle.nn.Layer): ...@@ -73,7 +73,7 @@ class Encoder(paddle.nn.Layer):
class Decoder(paddle.nn.Layer): class Decoder(paddle.nn.Layer):
def __init__(self, decoder): def __init__(self, decoder):
super(Decoder, self).__init__() super().__init__()
self.decoder = decoder self.decoder = decoder
self.final_stage = paddle.nn.Linear(1024, 1024) self.final_stage = paddle.nn.Linear(1024, 1024)
self.group_norm = paddle.nn.GroupNorm(64, 1024) self.group_norm = paddle.nn.GroupNorm(64, 1024)
...@@ -87,7 +87,7 @@ class Decoder(paddle.nn.Layer): ...@@ -87,7 +87,7 @@ class Decoder(paddle.nn.Layer):
class SpecialModel(paddle.nn.Layer): class SpecialModel(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(SpecialModel, self).__init__() super().__init__()
self.shared = paddle.nn.Linear(1024, 1024, bias_attr=False) self.shared = paddle.nn.Linear(1024, 1024, bias_attr=False)
self.encoder = Encoder(self.shared) self.encoder = Encoder(self.shared)
self.decoder = Decoder(self.shared) self.decoder = Decoder(self.shared)
......
...@@ -25,7 +25,7 @@ np.random.seed(2022) ...@@ -25,7 +25,7 @@ np.random.seed(2022)
class Model(nn.Layer): class Model(nn.Layer):
def __init__(self): def __init__(self):
super(Model, self).__init__() super().__init__()
self.first_stage = nn.Linear(4096, 4096, bias_attr=False) self.first_stage = nn.Linear(4096, 4096, bias_attr=False)
self.center_stage = nn.Linear(4096, 4096) self.center_stage = nn.Linear(4096, 4096)
self.center_stage.weight.stop_gradient = True self.center_stage.weight.stop_gradient = True
......
...@@ -77,7 +77,7 @@ class MLP_pipe(PipelineLayer): ...@@ -77,7 +77,7 @@ class MLP_pipe(PipelineLayer):
), ),
LayerDesc(Linear, in_features=linear_size, out_features=10), LayerDesc(Linear, in_features=linear_size, out_features=10),
] ]
super(MLP_pipe, self).__init__( super().__init__(
desc, desc,
num_stages=2, num_stages=2,
loss_fn=paddle.nn.CrossEntropyLoss(), loss_fn=paddle.nn.CrossEntropyLoss(),
...@@ -93,7 +93,7 @@ class MLP_Hybrid(paddle.nn.Layer): ...@@ -93,7 +93,7 @@ class MLP_Hybrid(paddle.nn.Layer):
param_attr=None, param_attr=None,
bias_attr=None, bias_attr=None,
): ):
super(MLP_Hybrid, self).__init__() super().__init__()
self.embedding = VocabParallelEmbedding(embedding_size, linear_size) self.embedding = VocabParallelEmbedding(embedding_size, linear_size)
self._linear1 = RowParallelLinear( self._linear1 = RowParallelLinear(
linear_size, linear_size, has_bias=True, input_is_parallel=True linear_size, linear_size, has_bias=True, input_is_parallel=True
...@@ -128,7 +128,7 @@ class MLP(paddle.nn.Layer): ...@@ -128,7 +128,7 @@ class MLP(paddle.nn.Layer):
param_attr=None, param_attr=None,
bias_attr=None, bias_attr=None,
): ):
super(MLP, self).__init__() super().__init__()
self.embedding = paddle.nn.Embedding(embedding_size, linear_size) self.embedding = paddle.nn.Embedding(embedding_size, linear_size)
self._linear1 = Linear(linear_size, linear_size) self._linear1 = Linear(linear_size, linear_size)
self._linear2 = Linear(linear_size, linear_size) self._linear2 = Linear(linear_size, linear_size)
......
...@@ -35,7 +35,7 @@ class MultiHeadAttentionWithMP(paddle.nn.Layer): ...@@ -35,7 +35,7 @@ class MultiHeadAttentionWithMP(paddle.nn.Layer):
pre_ln=True, pre_ln=True,
attn_dropout=True, attn_dropout=True,
): ):
super(MultiHeadAttentionWithMP, self).__init__() super().__init__()
self.embed_dim = embed_dim self.embed_dim = embed_dim
self.kdim = embed_dim self.kdim = embed_dim
self.vdim = embed_dim self.vdim = embed_dim
......
...@@ -23,7 +23,7 @@ from paddle.distributed.fleet.utils import recompute ...@@ -23,7 +23,7 @@ from paddle.distributed.fleet.utils import recompute
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self, block_idx, input_size, is_last=False): def __init__(self, block_idx, input_size, is_last=False):
super(Model, self).__init__() super().__init__()
block_name = "block_" + str(block_idx) block_name = "block_" + str(block_idx)
self.block = paddle.nn.Sequential( self.block = paddle.nn.Sequential(
( (
......
...@@ -37,7 +37,7 @@ def verify_op_count(op_types, op_name, target_count): ...@@ -37,7 +37,7 @@ def verify_op_count(op_types, op_name, target_count):
class MultiFCLayer(nn.Layer): class MultiFCLayer(nn.Layer):
def __init__(self, hidden, Activation): def __init__(self, hidden, Activation):
super(MultiFCLayer, self).__init__() super().__init__()
self.linear1 = paddle.nn.Linear(hidden, 4 * hidden) self.linear1 = paddle.nn.Linear(hidden, 4 * hidden)
self.linear2 = paddle.nn.Linear(4 * hidden, hidden) self.linear2 = paddle.nn.Linear(4 * hidden, hidden)
self.linear3 = paddle.nn.Linear(hidden, hidden) self.linear3 = paddle.nn.Linear(hidden, hidden)
......
...@@ -21,7 +21,7 @@ import paddle ...@@ -21,7 +21,7 @@ import paddle
class Net(paddle.nn.Layer): class Net(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(Net, self).__init__() super().__init__()
@paddle.jit.to_static @paddle.jit.to_static
def forward(self, x): def forward(self, x):
......
...@@ -21,7 +21,7 @@ import paddle ...@@ -21,7 +21,7 @@ import paddle
class Net(paddle.nn.Layer): class Net(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(Net, self).__init__() super().__init__()
self.relu = paddle.nn.functional.relu self.relu = paddle.nn.functional.relu
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
......
...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn): ...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x): def forward(self, x):
......
...@@ -42,7 +42,7 @@ def generate_data(shape, dtype="float32"): ...@@ -42,7 +42,7 @@ def generate_data(shape, dtype="float32"):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self, approximate): def __init__(self, approximate):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
self.approximate = approximate self.approximate = approximate
......
...@@ -42,7 +42,7 @@ def apply_to_static(net, use_cinn): ...@@ -42,7 +42,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(64, 64) self.fc = paddle.nn.Linear(64, 64)
def forward(self, x, w, b): def forward(self, x, w, b):
......
...@@ -45,7 +45,7 @@ class PrimeNet( ...@@ -45,7 +45,7 @@ class PrimeNet(
paddle.nn.Layer, paddle.nn.Layer,
): ):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x): def forward(self, x):
......
...@@ -218,7 +218,7 @@ class TestStaticMethod(TestRecursiveCall2): ...@@ -218,7 +218,7 @@ class TestStaticMethod(TestRecursiveCall2):
class NotToStaticHelper(paddle.nn.Layer): class NotToStaticHelper(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(NotToStaticHelper, self).__init__() super().__init__()
def sum(self, x): def sum(self, x):
if x.shape[0] > 1: if x.shape[0] > 1:
......
...@@ -335,7 +335,7 @@ obj = Obj() ...@@ -335,7 +335,7 @@ obj = Obj()
class Net2: class Net2:
def __init__(self): def __init__(self):
super(Net2, self).__init__() super().__init__()
self.layer1 = paddle.nn.Linear(10, 10) self.layer1 = paddle.nn.Linear(10, 10)
def forward(self, data): def forward(self, data):
......
...@@ -33,7 +33,7 @@ def apply_to_static(support_to_static, model, image_shape=None): ...@@ -33,7 +33,7 @@ def apply_to_static(support_to_static, model, image_shape=None):
class Layer0(nn.Layer): class Layer0(nn.Layer):
def __init__(self, level): def __init__(self, level):
super(Layer0, self).__init__() super().__init__()
self._linear1 = nn.Linear(10, 5) self._linear1 = nn.Linear(10, 5)
self._linear2 = nn.Linear(10, 5) self._linear2 = nn.Linear(10, 5)
self.layer1 = Layer1(level) self.layer1 = Layer1(level)
...@@ -51,7 +51,7 @@ class Layer0(nn.Layer): ...@@ -51,7 +51,7 @@ class Layer0(nn.Layer):
class Layer1(nn.Layer): class Layer1(nn.Layer):
def __init__(self, level): def __init__(self, level):
super(Layer1, self).__init__() super().__init__()
self.level = level self.level = level
self._linear = nn.Linear(5, 2) self._linear = nn.Linear(5, 2)
......
...@@ -23,7 +23,7 @@ import paddle ...@@ -23,7 +23,7 @@ import paddle
class SimpleLayer(paddle.nn.Layer): class SimpleLayer(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(SimpleLayer, self).__init__() super().__init__()
self.conv = paddle.nn.Conv2D( self.conv = paddle.nn.Conv2D(
in_channels=3, out_channels=1, kernel_size=2, stride=1 in_channels=3, out_channels=1, kernel_size=2, stride=1
) )
......
...@@ -55,7 +55,7 @@ def getdtype(dtype="float32"): ...@@ -55,7 +55,7 @@ def getdtype(dtype="float32"):
class BackendPaddle: class BackendPaddle:
def __init__(self): def __init__(self):
super(BackendPaddle, self).__init__() super().__init__()
self.h2d_time = [] self.h2d_time = []
self.compute_time = [] self.compute_time = []
self.d2h_time = [] self.d2h_time = []
...@@ -341,7 +341,7 @@ class ConvBNLayer(paddle.nn.Layer): ...@@ -341,7 +341,7 @@ class ConvBNLayer(paddle.nn.Layer):
class Test(nn.Layer): class Test(nn.Layer):
def __init__(self): def __init__(self):
super(Test, self).__init__() super().__init__()
self.conv = ConvBNLayer( self.conv = ConvBNLayer(
num_channels=3, num_filters=64, filter_size=3, stride=2, act='relu' num_channels=3, num_filters=64, filter_size=3, stride=2, act='relu'
) )
......
...@@ -27,7 +27,7 @@ paddle.enable_static() ...@@ -27,7 +27,7 @@ paddle.enable_static()
class SimpleNet(nn.Layer): class SimpleNet(nn.Layer):
def __init__(self): def __init__(self):
super(SimpleNet, self).__init__() super().__init__()
self.conv1 = nn.Conv2D( self.conv1 = nn.Conv2D(
in_channels=4, in_channels=4,
out_channels=4, out_channels=4,
......
...@@ -350,7 +350,7 @@ def apply_to_static(net, use_cinn): ...@@ -350,7 +350,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.conv = nn.Conv2D(4, 2, (3, 3), bias_attr=False) self.conv = nn.Conv2D(4, 2, (3, 3), bias_attr=False)
self.bn = BatchNorm(2, act="relu") self.bn = BatchNorm(2, act="relu")
self.run_mean = zeros([2]) self.run_mean = zeros([2])
......
...@@ -212,7 +212,7 @@ def apply_to_static(net, use_cinn): ...@@ -212,7 +212,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self, n_shape): def __init__(self, n_shape):
super(PrimeNet, self).__init__() super().__init__()
self.ln = LayerNorm(n_shape) self.ln = LayerNorm(n_shape)
def forward(self, x): def forward(self, x):
......
...@@ -137,7 +137,7 @@ def apply_to_static(net, use_cinn): ...@@ -137,7 +137,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.sf = F.softmax self.sf = F.softmax
def forward(self, x, current_axis): def forward(self, x, current_axis):
......
...@@ -87,7 +87,7 @@ class BertConfig: ...@@ -87,7 +87,7 @@ class BertConfig:
class BertLMPredictionHead(nn.Layer): class BertLMPredictionHead(nn.Layer):
def __init__(self, config: BertConfig, embedding_weights=None): def __init__(self, config: BertConfig, embedding_weights=None):
super(BertLMPredictionHead, self).__init__() super().__init__()
self.transform = nn.Linear(config.hidden_size, config.hidden_size) self.transform = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = getattr(nn.functional, config.hidden_act) self.activation = getattr(nn.functional, config.hidden_act)
...@@ -131,7 +131,7 @@ class BertLMPredictionHead(nn.Layer): ...@@ -131,7 +131,7 @@ class BertLMPredictionHead(nn.Layer):
class BertPretrainingHeads(nn.Layer): class BertPretrainingHeads(nn.Layer):
def __init__(self, config: BertConfig, embedding_weights=None): def __init__(self, config: BertConfig, embedding_weights=None):
super(BertPretrainingHeads, self).__init__() super().__init__()
self.predictions = BertLMPredictionHead(config, embedding_weights) self.predictions = BertLMPredictionHead(config, embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2) self.seq_relationship = nn.Linear(config.hidden_size, 2)
...@@ -143,7 +143,7 @@ class BertPretrainingHeads(nn.Layer): ...@@ -143,7 +143,7 @@ class BertPretrainingHeads(nn.Layer):
class BertEmbeddings(nn.Layer): class BertEmbeddings(nn.Layer):
def __init__(self, config: BertConfig): def __init__(self, config: BertConfig):
super(BertEmbeddings, self).__init__() super().__init__()
self.word_embeddings = nn.Embedding( self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size config.vocab_size, config.hidden_size
...@@ -190,7 +190,7 @@ class BertEmbeddings(nn.Layer): ...@@ -190,7 +190,7 @@ class BertEmbeddings(nn.Layer):
class BertPooler(nn.Layer): class BertPooler(nn.Layer):
def __init__(self, config: BertConfig): def __init__(self, config: BertConfig):
super(BertPooler, self).__init__() super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh() self.activation = nn.Tanh()
...@@ -208,7 +208,7 @@ class BertPooler(nn.Layer): ...@@ -208,7 +208,7 @@ class BertPooler(nn.Layer):
class BertModel(nn.Layer): class BertModel(nn.Layer):
def __init__(self, config: BertConfig, to_static, enable_cinn): def __init__(self, config: BertConfig, to_static, enable_cinn):
super(BertModel, self).__init__() super().__init__()
self.config = config self.config = config
self.pad_token_id = config.pad_token_id self.pad_token_id = config.pad_token_id
self.initializer_range = config.initializer_range self.initializer_range = config.initializer_range
...@@ -372,7 +372,7 @@ class BertModel(nn.Layer): ...@@ -372,7 +372,7 @@ class BertModel(nn.Layer):
class Bert(nn.Layer): class Bert(nn.Layer):
def __init__(self, to_static, enable_cinn): def __init__(self, to_static, enable_cinn):
super(Bert, self).__init__() super().__init__()
config = BertConfig() config = BertConfig()
self.bert = BertModel(config, to_static, enable_cinn) self.bert = BertModel(config, to_static, enable_cinn)
self.cls = BertPretrainingHeads( self.cls = BertPretrainingHeads(
...@@ -434,7 +434,7 @@ class Bert(nn.Layer): ...@@ -434,7 +434,7 @@ class Bert(nn.Layer):
class BertPretrainingCriterion(paddle.nn.Layer): class BertPretrainingCriterion(paddle.nn.Layer):
def __init__(self, vocab_size=VOCAB_SIZE): def __init__(self, vocab_size=VOCAB_SIZE):
super(BertPretrainingCriterion, self).__init__() super().__init__()
# CrossEntropyLoss is expensive since the inner reshape (copy) # CrossEntropyLoss is expensive since the inner reshape (copy)
self.loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=-1) self.loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=-1)
self.vocab_size = vocab_size self.vocab_size = vocab_size
......
...@@ -28,7 +28,7 @@ def apply_to_static(net, use_cinn): ...@@ -28,7 +28,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
def forward(self, x): def forward(self, x):
out = F.softmax(x) out = F.softmax(x)
......
...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn): ...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x, y): def forward(self, x, y):
......
...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn): ...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x, y): def forward(self, x, y):
......
...@@ -30,7 +30,7 @@ def apply_to_static(net, use_cinn): ...@@ -30,7 +30,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x): def forward(self, x):
......
...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn): ...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x, y): def forward(self, x, y):
......
...@@ -31,7 +31,7 @@ def apply_to_static(net, use_cinn): ...@@ -31,7 +31,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x, index, axis): def forward(self, x, index, axis):
......
...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn): ...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x): def forward(self, x):
......
...@@ -34,7 +34,7 @@ def apply_to_static(net, use_cinn): ...@@ -34,7 +34,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x): def forward(self, x):
......
...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn): ...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x, y): def forward(self, x, y):
......
...@@ -34,7 +34,7 @@ def apply_to_static(net, use_cinn): ...@@ -34,7 +34,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
self.fc = paddle.nn.Linear(4, 4) self.fc = paddle.nn.Linear(4, 4)
def forward(self, x): def forward(self, x):
......
...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn): ...@@ -29,7 +29,7 @@ def apply_to_static(net, use_cinn):
class PrimeNet(paddle.nn.Layer): class PrimeNet(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(PrimeNet, self).__init__() super().__init__()
def forward(self, x): def forward(self, x):
out = paddle.transpose(x, [0, 2, 1]) out = paddle.transpose(x, [0, 2, 1])
......
...@@ -222,7 +222,7 @@ def apply_to_static(net, use_cinn): ...@@ -222,7 +222,7 @@ def apply_to_static(net, use_cinn):
class PrimNet(paddle.nn.Layer): class PrimNet(paddle.nn.Layer):
def __init__(self, python_api): def __init__(self, python_api):
super(PrimNet, self).__init__() super().__init__()
self.python_api = python_api self.python_api = python_api
def forward(self, args): def forward(self, args):
......
...@@ -3005,7 +3005,7 @@ class TestPow_ZeroDim(TestPow): ...@@ -3005,7 +3005,7 @@ class TestPow_ZeroDim(TestPow):
self.shape = [] self.shape = []
def setUp(self): def setUp(self):
super(TestPow_ZeroDim, self).setUp() super().setUp()
self.enable_cinn = False self.enable_cinn = False
......
...@@ -33,7 +33,7 @@ class MultiHeadAttention(paddle.nn.Layer): ...@@ -33,7 +33,7 @@ class MultiHeadAttention(paddle.nn.Layer):
pre_ln=True, pre_ln=True,
attn_dropout=True, attn_dropout=True,
): ):
super(MultiHeadAttention, self).__init__() super().__init__()
self.embed_dim = embed_dim self.embed_dim = embed_dim
self.kdim = embed_dim self.kdim = embed_dim
self.vdim = embed_dim self.vdim = embed_dim
......
...@@ -31,7 +31,7 @@ np.random.seed(123) ...@@ -31,7 +31,7 @@ np.random.seed(123)
class Net(paddle.nn.Layer): class Net(paddle.nn.Layer):
def __init__(self, np_w, func): def __init__(self, np_w, func):
super(Net, self).__init__() super().__init__()
self.func = func self.func = func
w = paddle.to_tensor(np_w) w = paddle.to_tensor(np_w)
self.w = paddle.create_parameter( self.w = paddle.create_parameter(
......
...@@ -407,7 +407,7 @@ class EncoderCell(SimpleRNNCell): ...@@ -407,7 +407,7 @@ class EncoderCell(SimpleRNNCell):
dropout_prob=0.0, dropout_prob=0.0,
init_scale=0.1, init_scale=0.1,
): ):
super(EncoderCell, self).__init__(input_size, hidden_size) super().__init__(input_size, hidden_size)
self.dropout_prob = dropout_prob self.dropout_prob = dropout_prob
# use add_sublayer to add multi-layers # use add_sublayer to add multi-layers
self.lstm_cells = [] self.lstm_cells = []
...@@ -453,7 +453,7 @@ class Encoder(Layer): ...@@ -453,7 +453,7 @@ class Encoder(Layer):
dropout_prob=0.0, dropout_prob=0.0,
init_scale=0.1, init_scale=0.1,
): ):
super(Encoder, self).__init__() super().__init__()
self.embedder = Embedding(vocab_size, embed_dim) self.embedder = Embedding(vocab_size, embed_dim)
self.stack_lstm = RNN( self.stack_lstm = RNN(
EncoderCell( EncoderCell(
...@@ -484,7 +484,7 @@ class Decoder(Layer): ...@@ -484,7 +484,7 @@ class Decoder(Layer):
dropout_prob=0.0, dropout_prob=0.0,
init_scale=0.1, init_scale=0.1,
): ):
super(Decoder, self).__init__() super().__init__()
self.embedder = Embedding(vocab_size, embed_dim) self.embedder = Embedding(vocab_size, embed_dim)
self.stack_lstm = RNN( self.stack_lstm = RNN(
DecoderCell( DecoderCell(
...@@ -603,7 +603,7 @@ class BaseModel(Layer): ...@@ -603,7 +603,7 @@ class BaseModel(Layer):
dropout_prob=0.0, dropout_prob=0.0,
init_scale=0.1, init_scale=0.1,
): ):
super(BaseModel, self).__init__() super().__init__()
self.hidden_size = hidden_size self.hidden_size = hidden_size
self.word_embedding = Embedding(vocab_size, embed_dim) self.word_embedding = Embedding(vocab_size, embed_dim)
self.encoder = Encoder( self.encoder = Encoder(
......
...@@ -44,7 +44,7 @@ class cus_tanh(PyLayer): ...@@ -44,7 +44,7 @@ class cus_tanh(PyLayer):
class SimpleNet(paddle.nn.Layer): class SimpleNet(paddle.nn.Layer):
def __init__(self, train_id, model_id): def __init__(self, train_id, model_id):
super(SimpleNet, self).__init__() super().__init__()
self.w = self.create_parameter(shape=[in_dim, batch], dtype="float32") self.w = self.create_parameter(shape=[in_dim, batch], dtype="float32")
self.linear = paddle.nn.Linear(in_dim, out_dim) self.linear = paddle.nn.Linear(in_dim, out_dim)
self.tanh = paddle.tanh self.tanh = paddle.tanh
......
...@@ -30,7 +30,7 @@ out_dim = 20 ...@@ -30,7 +30,7 @@ out_dim = 20
class SimpleNet(paddle.nn.Layer): class SimpleNet(paddle.nn.Layer):
def __init__(self, train_id): def __init__(self, train_id):
super(SimpleNet, self).__init__() super().__init__()
self.w1 = self.create_parameter( self.w1 = self.create_parameter(
shape=[in_dim, out_dim], dtype="float32" shape=[in_dim, out_dim], dtype="float32"
) )
......
...@@ -30,7 +30,7 @@ out_dim = 20 ...@@ -30,7 +30,7 @@ out_dim = 20
class SimpleNet(paddle.nn.Layer): class SimpleNet(paddle.nn.Layer):
def __init__(self, train_id): def __init__(self, train_id):
super(SimpleNet, self).__init__() super().__init__()
self.w1 = self.create_parameter( self.w1 = self.create_parameter(
shape=[in_dim, out_dim], dtype="float32" shape=[in_dim, out_dim], dtype="float32"
) )
......
...@@ -52,7 +52,7 @@ class Naive_fc_net(paddle.nn.Layer): ...@@ -52,7 +52,7 @@ class Naive_fc_net(paddle.nn.Layer):
def __init__( def __init__(
self, input_size=10, recompute_blocks=[1, 3], recompute_kwargs={} self, input_size=10, recompute_blocks=[1, 3], recompute_kwargs={}
): ):
super(Naive_fc_net, self).__init__() super().__init__()
self.recompute_blocks = recompute_blocks self.recompute_blocks = recompute_blocks
self.recompute_kwargs = recompute_kwargs self.recompute_kwargs = recompute_kwargs
self.runfunc0 = get_fc_block(0, input_size, is_last=False) self.runfunc0 = get_fc_block(0, input_size, is_last=False)
......
...@@ -89,7 +89,7 @@ class LBFGS(Optimizer): ...@@ -89,7 +89,7 @@ class LBFGS(Optimizer):
class Net(paddle.nn.Layer): class Net(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(Net, self).__init__() super().__init__()
w = paddle.to_tensor(np_w) w = paddle.to_tensor(np_w)
self.w = paddle.create_parameter(shape=w.shape, dtype=w.dtype, default_initializer=paddle.nn.initializer.Assign(w)) self.w = paddle.create_parameter(shape=w.shape, dtype=w.dtype, default_initializer=paddle.nn.initializer.Assign(w))
......
...@@ -23,7 +23,7 @@ from paddle.nn import Layer ...@@ -23,7 +23,7 @@ from paddle.nn import Layer
class LinearQuanterDequanter(Layer): class LinearQuanterDequanter(Layer):
def __init__(self, quanter, dequanter): def __init__(self, quanter, dequanter):
super(LinearQuanterDequanter, self).__init__() super().__init__()
self._quanter = quanter self._quanter = quanter
self._dequanter = dequanter self._dequanter = dequanter
...@@ -46,7 +46,7 @@ class LinearQuanterDequanter(Layer): ...@@ -46,7 +46,7 @@ class LinearQuanterDequanter(Layer):
class LinearQuanter(Layer): class LinearQuanter(Layer):
def __init__(self, scales, zero_point=None, quant_axis=None, bit_length=8): def __init__(self, scales, zero_point=None, quant_axis=None, bit_length=8):
super(LinearQuanter, self).__init__() super().__init__()
self._scales = paddle.to_tensor(scales, dtype="float32") self._scales = paddle.to_tensor(scales, dtype="float32")
self._zero_point = ( self._zero_point = (
paddle.zeros([1], dtype="float32") paddle.zeros([1], dtype="float32")
...@@ -97,7 +97,7 @@ class LinearQuanter(Layer): ...@@ -97,7 +97,7 @@ class LinearQuanter(Layer):
class LinearDequanter(Layer): class LinearDequanter(Layer):
def __init__(self, scales, zero_point=None, quant_axis=None, bit_length=8): def __init__(self, scales, zero_point=None, quant_axis=None, bit_length=8):
super(LinearDequanter, self).__init__() super().__init__()
self._scales = paddle.to_tensor(scales, dtype="float32") self._scales = paddle.to_tensor(scales, dtype="float32")
self._zero_point = ( self._zero_point = (
paddle.zeros([1], dtype="float32") paddle.zeros([1], dtype="float32")
...@@ -156,7 +156,7 @@ class ConvertibleQuantedLayer(Layer, metaclass=abc.ABCMeta): ...@@ -156,7 +156,7 @@ class ConvertibleQuantedLayer(Layer, metaclass=abc.ABCMeta):
# Given codes in ./customized_quanter.py # Given codes in ./customized_quanter.py
class CustomizedQuantedLayer(ConvertibleQuantedLayer): class CustomizedQuantedLayer(ConvertibleQuantedLayer):
def __init__(self): def __init__(self):
super(CustomizedQuantedLayer, self).__init__() super().__init__()
self.weight_a = paddle.create_parameter(shape=[1], dtype='float32') self.weight_a = paddle.create_parameter(shape=[1], dtype='float32')
self.weight_b = paddle.create_parameter(shape=[1], dtype='float32') self.weight_b = paddle.create_parameter(shape=[1], dtype='float32')
self.quanter_for_weight_a = None self.quanter_for_weight_a = None
...@@ -176,7 +176,7 @@ class ConvertibleQuantedLayer(Layer, metaclass=abc.ABCMeta): ...@@ -176,7 +176,7 @@ class ConvertibleQuantedLayer(Layer, metaclass=abc.ABCMeta):
""" """
def __init__(self): def __init__(self):
super(ConvertibleQuantedLayer, self).__init__() super().__init__()
self.converted = False self.converted = False
@abc.abstractmethod @abc.abstractmethod
......
...@@ -27,7 +27,7 @@ class QuantedConv2D(ConvertibleQuantedLayer): ...@@ -27,7 +27,7 @@ class QuantedConv2D(ConvertibleQuantedLayer):
""" """
def __init__(self, layer: Layer, q_config): def __init__(self, layer: Layer, q_config):
super(QuantedConv2D, self).__init__() super().__init__()
# For Conv2D # For Conv2D
self._groups = layer._groups self._groups = layer._groups
......
...@@ -26,7 +26,7 @@ class QuantedLinear(ConvertibleQuantedLayer): ...@@ -26,7 +26,7 @@ class QuantedLinear(ConvertibleQuantedLayer):
""" """
def __init__(self, layer: Layer, q_config): def __init__(self, layer: Layer, q_config):
super(QuantedLinear, self).__init__() super().__init__()
# For Linear # For Linear
self.weight = layer.weight self.weight = layer.weight
self.bias = layer.bias self.bias = layer.bias
......
...@@ -36,7 +36,7 @@ class Stub(Layer): ...@@ -36,7 +36,7 @@ class Stub(Layer):
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self, num_classes=10): def __init__(self, num_classes=10):
super(Model, self).__init__() super().__init__()
self.conv = Conv2D(3, 6, 3, stride=1, padding=1) self.conv = Conv2D(3, 6, 3, stride=1, padding=1)
self.quant = Stub(quanter) self.quant = Stub(quanter)
def forward(self, inputs): def forward(self, inputs):
...@@ -51,7 +51,7 @@ class Stub(Layer): ...@@ -51,7 +51,7 @@ class Stub(Layer):
""" """
def __init__(self, observer=None): def __init__(self, observer=None):
super(Stub, self).__init__() super().__init__()
self._observer = observer self._observer = observer
def forward(self, input): def forward(self, input):
...@@ -71,7 +71,7 @@ class QuanterStub(Layer): ...@@ -71,7 +71,7 @@ class QuanterStub(Layer):
""" """
def __init__(self, layer: Stub, q_config): def __init__(self, layer: Stub, q_config):
super(QuanterStub, self).__init__() super().__init__()
self._observer = None self._observer = None
if layer._observer is not None: if layer._observer is not None:
self._observer = layer._observer._instance(layer) self._observer = layer._observer._instance(layer)
......
...@@ -25,7 +25,7 @@ class BaseObserver(BaseQuanter, metaclass=abc.ABCMeta): ...@@ -25,7 +25,7 @@ class BaseObserver(BaseQuanter, metaclass=abc.ABCMeta):
""" """
def __init__(self): def __init__(self):
super(BaseObserver, self).__init__() super().__init__()
@abc.abstractmethod @abc.abstractmethod
def cal_thresholds(self): def cal_thresholds(self):
......
...@@ -29,7 +29,7 @@ class BaseQuanter(Layer, metaclass=abc.ABCMeta): ...@@ -29,7 +29,7 @@ class BaseQuanter(Layer, metaclass=abc.ABCMeta):
""" """
def __init__(self): def __init__(self):
super(BaseQuanter, self).__init__() super().__init__()
@abc.abstractmethod @abc.abstractmethod
def forward(self, input): def forward(self, input):
......
...@@ -118,7 +118,7 @@ class QuantConfig: ...@@ -118,7 +118,7 @@ class QuantConfig:
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(Model, self).__init__() super().__init__()
self.fc = Linear(576, 120) self.fc = Linear(576, 120)
model = Model() model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
...@@ -162,7 +162,7 @@ class QuantConfig: ...@@ -162,7 +162,7 @@ class QuantConfig:
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(Model, self).__init__() super().__init__()
self.fc = Linear(576, 120) self.fc = Linear(576, 120)
model = Model() model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
...@@ -207,7 +207,7 @@ class QuantConfig: ...@@ -207,7 +207,7 @@ class QuantConfig:
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(Model, self).__init__() super().__init__()
self.fc = Linear(576, 120) self.fc = Linear(576, 120)
model = Model() model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
...@@ -388,7 +388,7 @@ class QuantConfig: ...@@ -388,7 +388,7 @@ class QuantConfig:
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self): def __init__(self):
super(Model, self).__init__() super().__init__()
self.fc = Sequential(Linear(576, 120),Linear(576, 120)) self.fc = Sequential(Linear(576, 120),Linear(576, 120))
model = Model() model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
......
...@@ -56,7 +56,7 @@ class QuanterFactory(ClassWithArguments): ...@@ -56,7 +56,7 @@ class QuanterFactory(ClassWithArguments):
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(QuanterFactory, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.partial_class = None self.partial_class = None
def _instance(self, layer: Layer) -> BaseQuanter: def _instance(self, layer: Layer) -> BaseQuanter:
......
...@@ -39,7 +39,7 @@ class AbsmaxObserver(ObserverFactory): ...@@ -39,7 +39,7 @@ class AbsmaxObserver(ObserverFactory):
""" """
def __init__(self, quant_bits=8): def __init__(self, quant_bits=8):
super(AbsmaxObserver, self).__init__(quant_bits=quant_bits) super().__init__(quant_bits=quant_bits)
def _get_class(self): def _get_class(self):
return AbsmaxObserverLayer return AbsmaxObserverLayer
...@@ -53,7 +53,7 @@ class AbsmaxObserverLayer(BaseObserver): ...@@ -53,7 +53,7 @@ class AbsmaxObserverLayer(BaseObserver):
INIT_ABS_MAX = 1e-7 INIT_ABS_MAX = 1e-7
def __init__(self, layer, quant_bits=8): def __init__(self, layer, quant_bits=8):
super(AbsmaxObserverLayer, self).__init__() super().__init__()
self._quant_bits = quant_bits self._quant_bits = quant_bits
self.abs_max_val = paddle.to_tensor(AbsmaxObserverLayer.INIT_ABS_MAX) self.abs_max_val = paddle.to_tensor(AbsmaxObserverLayer.INIT_ABS_MAX)
......
...@@ -27,7 +27,7 @@ class PTQ(Quantization): ...@@ -27,7 +27,7 @@ class PTQ(Quantization):
""" """
def __init__(self, config: QuantConfig): def __init__(self, config: QuantConfig):
super(PTQ, self).__init__(config) super().__init__(config)
def _is_parallel_training(self): def _is_parallel_training(self):
try: try:
......
...@@ -36,7 +36,7 @@ class QAT(Quantization): ...@@ -36,7 +36,7 @@ class QAT(Quantization):
""" """
def __init__(self, config: QuantConfig): def __init__(self, config: QuantConfig):
super(QAT, self).__init__(config) super().__init__(config)
def quantize(self, model: Layer, inplace=False): def quantize(self, model: Layer, inplace=False):
r""" r"""
......
...@@ -82,7 +82,7 @@ class FakeQuanterWithAbsMaxObserver(QuanterFactory): ...@@ -82,7 +82,7 @@ class FakeQuanterWithAbsMaxObserver(QuanterFactory):
dtype='float32', dtype='float32',
name=None, name=None,
): ):
super(FakeQuanterWithAbsMaxObserver, self).__init__( super().__init__(
name=name, name=name,
moving_rate=moving_rate, moving_rate=moving_rate,
bit_length=bit_length, bit_length=bit_length,
...@@ -102,7 +102,7 @@ class FakeQuanterWithAbsMaxObserverLayer(BaseQuanter): ...@@ -102,7 +102,7 @@ class FakeQuanterWithAbsMaxObserverLayer(BaseQuanter):
bit_length=8, bit_length=8,
dtype='float32', dtype='float32',
): ):
super(FakeQuanterWithAbsMaxObserverLayer, self).__init__() super().__init__()
self._moving_rate = moving_rate self._moving_rate = moving_rate
self._bit_length = bit_length self._bit_length = bit_length
scale_prefix = ( scale_prefix = (
......
...@@ -34,7 +34,7 @@ class ObserveWrapper(Layer): ...@@ -34,7 +34,7 @@ class ObserveWrapper(Layer):
observed: Layer, observed: Layer,
observe_input=True, observe_input=True,
): ):
super(ObserveWrapper, self).__init__() super().__init__()
self._observer = observer self._observer = observer
self._observed = observed self._observed = observed
self._observe_input = observe_input self._observe_input = observe_input
......
...@@ -28,7 +28,7 @@ linear_quant_axis = 1 ...@@ -28,7 +28,7 @@ linear_quant_axis = 1
@quanter("CustomizedQuanter") @quanter("CustomizedQuanter")
class CustomizedQuanterLayer(BaseQuanter): class CustomizedQuanterLayer(BaseQuanter):
def __init__(self, layer, bit_length=8, kwargs1=None): def __init__(self, layer, bit_length=8, kwargs1=None):
super(CustomizedQuanterLayer, self).__init__() super().__init__()
self._layer = layer self._layer = layer
self._bit_length = bit_length self._bit_length = bit_length
self._kwargs1 = kwargs1 self._kwargs1 = kwargs1
......
...@@ -29,7 +29,7 @@ from paddle.quantization.observers.abs_max import AbsmaxObserverLayer ...@@ -29,7 +29,7 @@ from paddle.quantization.observers.abs_max import AbsmaxObserverLayer
class LeNetDygraph(paddle.nn.Layer): class LeNetDygraph(paddle.nn.Layer):
def __init__(self, num_classes=10): def __init__(self, num_classes=10):
super(LeNetDygraph, self).__init__() super().__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = Sequential( self.features = Sequential(
Conv2D(1, 6, 3, stride=1, padding=1), Conv2D(1, 6, 3, stride=1, padding=1),
......
...@@ -41,7 +41,7 @@ class RandomDataset(Dataset): ...@@ -41,7 +41,7 @@ class RandomDataset(Dataset):
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self, num_classes=10): def __init__(self, num_classes=10):
super(Model, self).__init__() super().__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = Sequential( self.features = Sequential(
Conv2D(3, 6, 3, stride=1, padding=1), Conv2D(3, 6, 3, stride=1, padding=1),
......
...@@ -24,7 +24,7 @@ from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver ...@@ -24,7 +24,7 @@ from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
class LeNetDygraph(paddle.nn.Layer): class LeNetDygraph(paddle.nn.Layer):
def __init__(self, num_classes=10): def __init__(self, num_classes=10):
super(LeNetDygraph, self).__init__() super().__init__()
self.num_classes = num_classes self.num_classes = num_classes
self.features = Sequential( self.features = Sequential(
Conv2D(3, 6, 3, stride=1, padding=1), Conv2D(3, 6, 3, stride=1, padding=1),
......
...@@ -28,7 +28,7 @@ quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) ...@@ -28,7 +28,7 @@ quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
class Model(paddle.nn.Layer): class Model(paddle.nn.Layer):
def __init__(self, num_classes=10): def __init__(self, num_classes=10):
super(Model, self).__init__() super().__init__()
self.quant_in = Stub() self.quant_in = Stub()
self.conv = Conv2D(3, 6, 3, stride=1, padding=1) self.conv = Conv2D(3, 6, 3, stride=1, padding=1)
self.quant = Stub(quanter) self.quant = Stub(quanter)
......
...@@ -186,7 +186,7 @@ def train_for_run_parallel(): ...@@ -186,7 +186,7 @@ def train_for_run_parallel():
""" """
def __init__(self): def __init__(self):
super(LinearNet, self).__init__() super().__init__()
self._linear1 = paddle.nn.Linear(10, 10) self._linear1 = paddle.nn.Linear(10, 10)
self._linear2 = paddle.nn.Linear(10, 1) self._linear2 = paddle.nn.Linear(10, 1)
......
...@@ -87,7 +87,7 @@ class IntermediateLayerGetter(nn.LayerDict): ...@@ -87,7 +87,7 @@ class IntermediateLayerGetter(nn.LayerDict):
if not return_layers: if not return_layers:
break break
super(IntermediateLayerGetter, self).__init__(layers) super().__init__(layers)
self.return_layers = orig_return_layers self.return_layers = orig_return_layers
def forward(self, x): def forward(self, x):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册