未验证 提交 23ac89d1 编写于 作者: M minghaoBD 提交者: GitHub

Fix unused warning (#847) (#849)

* Fix warning

* Add unittest
Co-authored-by: Nwhs <wanghaoshuang@baidu.com>
上级 96ef7d7b
......@@ -17,7 +17,7 @@ import copy
import pickle
import numpy as np
from collections import OrderedDict
from collections import Iterable
from collections.abc import Iterable
from paddle.fluid.framework import Program, program_guard, Parameter, Variable
__all__ = ['GraphWrapper', 'VarWrapper', 'OpWrapper']
......
......@@ -29,7 +29,7 @@ class ResNet():
def net(self, input, class_dim=1000, conv1_name='conv1', fc_name=None):
layers = self.layers
prefix_name = self.prefix_name if self.prefix_name is '' else self.prefix_name + '_'
prefix_name = self.prefix_name if self.prefix_name == '' else self.prefix_name + '_'
supported_layers = [34, 50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
......@@ -137,12 +137,7 @@ class ResNet():
else:
bn_name = "bn" + name[3:]
else:
if name.split("_")[1] == "conv1":
bn_name = name.split("_", 1)[0] + "_bn_" + name.split("_",
1)[1]
else:
bn_name = name.split("_", 1)[0] + "_bn" + name.split("_",
1)[1][3:]
bn_name = name.split("_", 1)[0] + "_bn" + name.split("_", 1)[1][3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
......
......@@ -18,20 +18,29 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from collections import Iterable
from collections.abc import Iterable
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer, Conv2D, BatchNorm, Pool2D, to_variable
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear
from paddle.fluid.dygraph import Conv2D, BatchNorm, Pool2D
from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph import to_variable
from paddle.fluid.initializer import NormalInitializer
from paddle.fluid import ParamAttr
from paddle.fluid.initializer import MSRA, ConstantInitializer
ConvBN_PRIMITIVES = [
'std_conv_bn_3', 'std_conv_bn_5', 'std_conv_bn_7', 'dil_conv_bn_3',
'dil_conv_bn_5', 'dil_conv_bn_7', 'avg_pool_3', 'max_pool_3',
'skip_connect', 'none'
'std_conv_bn_3',
'std_conv_bn_5',
'std_conv_bn_7',
'dil_conv_bn_3',
'dil_conv_bn_5',
'dil_conv_bn_7',
'avg_pool_3',
'max_pool_3',
'skip_connect',
'none',
]
......@@ -299,13 +308,11 @@ class EncoderLayer(Layer):
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1], # zero 2
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0] # dil_conv3 3
]
self.gumbel_alphas = to_variable(
np.array(mrpc_arch).astype(np.float32))
self.gumbel_alphas = to_variable(np.array(mrpc_arch).astype(np.float32))
self.gumbel_alphas.stop_gradient = True
print("gumbel_alphas: \n", self.gumbel_alphas.numpy())
def forward(self, enc_input_0, enc_input_1, epoch, flops=[],
model_size=[]):
def forward(self, enc_input_0, enc_input_1, epoch, flops=[], model_size=[]):
alphas = self.gumbel_alphas if self.use_fixed_gumbel else gumbel_softmax(
self.alphas, epoch)
......
......@@ -39,8 +39,7 @@ _logger = get_logger(__name__, level=logging.INFO)
class SuperConv2D(nn.Conv2D):
"""
This interface is used to construct a callable object of the ``SuperConv2D`` class.
"""This interface is used to construct a callable object of the ``SuperConv2D`` class.
Note: the channel in config need to less than first defined.
......@@ -116,7 +115,7 @@ class SuperConv2D(nn.Conv2D):
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
and the :math:`std` is :math:`(\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
......@@ -370,7 +369,7 @@ class SuperConv2DTranspose(nn.Conv2DTranspose):
`conv2dtranspose <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_ .
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Out = \\sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
......@@ -386,10 +385,10 @@ class SuperConv2DTranspose(nn.Conv2DTranspose):
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
H^\\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\
W^\\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\\in [ H^\\prime_{out}, H^\\prime_{out} + strides[0] ) \\\\
W_{out} &\\in [ W^\\prime_{out}, W^\\prime_{out} + strides[1] )
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of the filter. It is as same as the output
......@@ -806,12 +805,12 @@ class SuperLinear(nn.Linear):
Out = XW + b
where :math:`W` is the weight and :math:`b` is the bias.
Linear layer takes only one multi-dimensional tensor as input with the
shape :math:`[batch\_size, *, in\_features]` , where :math:`*` means any
shape :math:`[batch\\_size, *, in\\_features]` , where :math:`*` means any
number of additional dimensions. It multiplies input tensor with the weight
(a 2-D tensor of shape :math:`[in\_features, out\_features]` ) and produces
an output tensor of shape :math:`[batch\_size, *, out\_features]` .
If :math:`bias\_attr` is not False, the bias (a 1-D tensor of
shape :math:`[out\_features]` ) will be created and added to the output.
(a 2-D tensor of shape :math:`[in\\_features, out\\_features]` ) and produces
an output tensor of shape :math:`[batch\\_size, *, out\\_features]` .
If :math:`bias\\_attr` is not False, the bias (a 1-D tensor of
shape :math:`[out\\_features]` ) will be created and added to the output.
Parameters:
in_features (int): The number of input units.
out_features (int): The number of output units.
......@@ -835,8 +834,8 @@ class SuperLinear(nn.Linear):
**weight** (Parameter): the learnable weight of this layer.
**bias** (Parameter): the learnable bias of this layer.
Shape:
- input: Multi-dimentional tensor with shape :math:`[batch\_size, *, in\_features]` .
- output: Multi-dimentional tensor with shape :math:`[batch\_size, *, out\_features]` .
- input: Multi-dimentional tensor with shape :math:`[batch\\_size, *, in\\_features]` .
- output: Multi-dimentional tensor with shape :math:`[batch\\_size, *, out\\_features]` .
Examples:
.. code-block:: python
import numpy as np
......@@ -1142,9 +1141,9 @@ class SuperEmbedding(nn.Embedding):
of the dictionary of embeddings.
embedding_dim: Just one element which indicate the size of each embedding vector respectively.
padding_idx(int|long|None): padding_idx needs to be in the interval [-num_embeddings, num_embeddings).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If :math:`padding\\_idx < 0`, the :math:`padding\\_idx` will automatically be converted
to :math:`vocab\\_size + padding\\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
......
......@@ -39,9 +39,11 @@ class PruningDetails(object):
"""
def __init__(self, var, axis, transform, op, is_parameter=True):
assert (isinstance(var, VarWrapper),
"name should be VarWrapper, but get type = ".format(type(var)))
assert (isinstance(axis, int))
assert isinstance(
var,
VarWrapper), "name should be VarWrapper, but get type = {}".format(
type(var))
assert isinstance(axis, int)
self.name = var.name()
self.var = var
self.axis = axis
......
......@@ -71,8 +71,8 @@ def geometry_median(group, values, graph):
_logger.warning("The value of tensor '{}' is not found.")
return None
value = values[name]
assert (len(value.shape) == 4,
"geometry_median only support for weight of conv2d.")
assert len(
value.shape) == 4, "geometry_median only support for weight of conv2d."
def get_distance_sum(value, out_idx):
w = value.view()
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import unittest
from static_case import StaticCase
import paddle.fluid as fluid
from paddleslim import flops
import paddleslim.models as models
class TestModel(StaticCase):
def __init__(self, model_name, flops, prefix=None,
method_name="test_model"):
super(TestModel, self).__init__(method_name)
self.model_name = model_name
self.flops = flops
self.prefix = prefix
def test_model(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
input = fluid.data(name="image", shape=[None, 3, 16, 16])
if self.prefix is not None:
model = models.__dict__[self.model_name](
prefix_name=self.prefix)
else:
model = models.__dict__[self.model_name]()
model.net(input)
print(flops(main_program))
self.assertTrue(self.flops == flops(main_program))
suite = unittest.TestSuite()
suite.addTest(TestModel("ResNet34", 29097984, prefix=""))
suite.addTest(TestModel("ResNet34", 29097984, prefix="model1"))
suite.addTest(TestModel("MobileNet", 5110528))
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册