det_mobilenet_v3.py 9.5 KB
Newer Older
W
WenmuZhou 已提交
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
L
LDOUBLEV 已提交
2
#
W
WenmuZhou 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
L
LDOUBLEV 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
W
WenmuZhou 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
LDOUBLEV 已提交
14 15 16 17 18

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

W
WenmuZhou 已提交
19 20 21 22
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle import ParamAttr
L
LDOUBLEV 已提交
23 24 25 26

__all__ = ['MobileNetV3']


W
WenmuZhou 已提交
27 28 29 30 31 32 33 34 35 36
def make_divisible(v, divisor=8, min_value=None):
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v


class MobileNetV3(nn.Layer):
L
LDOUBLEV 已提交
37 38 39 40 41 42
    def __init__(self,
                 in_channels=3,
                 model_name='large',
                 scale=0.5,
                 disable_se=False,
                 **kwargs):
L
LDOUBLEV 已提交
43 44 45 46 47
        """
        the MobilenetV3 backbone network for detection module.
        Args:
            params(dict): the super parameters for build network
        """
W
WenmuZhou 已提交
48
        super(MobileNetV3, self).__init__()
L
LDOUBLEV 已提交
49 50 51

        self.disable_se = disable_se

L
LDOUBLEV 已提交
52
        if model_name == "large":
W
WenmuZhou 已提交
53
            cfg = [
L
LDOUBLEV 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
                # k, exp, c,  se,     nl,  s,
                [3, 16, 16, False, 'relu', 1],
                [3, 64, 24, False, 'relu', 2],
                [3, 72, 24, False, 'relu', 1],
                [5, 72, 40, True, 'relu', 2],
                [5, 120, 40, True, 'relu', 1],
                [5, 120, 40, True, 'relu', 1],
                [3, 240, 80, False, 'hard_swish', 2],
                [3, 200, 80, False, 'hard_swish', 1],
                [3, 184, 80, False, 'hard_swish', 1],
                [3, 184, 80, False, 'hard_swish', 1],
                [3, 480, 112, True, 'hard_swish', 1],
                [3, 672, 112, True, 'hard_swish', 1],
                [5, 672, 160, True, 'hard_swish', 2],
                [5, 960, 160, True, 'hard_swish', 1],
                [5, 960, 160, True, 'hard_swish', 1],
            ]
W
WenmuZhou 已提交
71
            cls_ch_squeeze = 960
L
LDOUBLEV 已提交
72
        elif model_name == "small":
W
WenmuZhou 已提交
73
            cfg = [
L
LDOUBLEV 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86
                # k, exp, c,  se,     nl,  s,
                [3, 16, 16, True, 'relu', 2],
                [3, 72, 24, False, 'relu', 2],
                [3, 88, 24, False, 'relu', 1],
                [5, 96, 40, True, 'hard_swish', 2],
                [5, 240, 40, True, 'hard_swish', 1],
                [5, 240, 40, True, 'hard_swish', 1],
                [5, 120, 48, True, 'hard_swish', 1],
                [5, 144, 48, True, 'hard_swish', 1],
                [5, 288, 96, True, 'hard_swish', 2],
                [5, 576, 96, True, 'hard_swish', 1],
                [5, 576, 96, True, 'hard_swish', 1],
            ]
W
WenmuZhou 已提交
87
            cls_ch_squeeze = 576
L
LDOUBLEV 已提交
88 89 90 91 92
        else:
            raise NotImplementedError("mode[" + model_name +
                                      "_model] is not implemented!")

        supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25]
W
WenmuZhou 已提交
93 94 95 96 97 98 99 100
        assert scale in supported_scale, \
            "supported scale are {} but input scale is {}".format(supported_scale, scale)
        inplanes = 16
        # conv1
        self.conv = ConvBNLayer(
            in_channels=in_channels,
            out_channels=make_divisible(inplanes * scale),
            kernel_size=3,
L
LDOUBLEV 已提交
101 102
            stride=2,
            padding=1,
W
WenmuZhou 已提交
103
            groups=1,
L
LDOUBLEV 已提交
104 105 106
            if_act=True,
            act='hard_swish',
            name='conv1')
W
WenmuZhou 已提交
107 108 109 110

        self.stages = []
        self.out_channels = []
        block_list = []
L
LDOUBLEV 已提交
111
        i = 0
W
WenmuZhou 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
        inplanes = make_divisible(inplanes * scale)
        for (k, exp, c, se, nl, s) in cfg:
            if s == 2 and i > 2:
                self.out_channels.append(inplanes)
                self.stages.append(nn.Sequential(*block_list))
                block_list = []
            block_list.append(
                ResidualUnit(
                    in_channels=inplanes,
                    mid_channels=make_divisible(scale * exp),
                    out_channels=make_divisible(scale * c),
                    kernel_size=k,
                    stride=s,
                    use_se=se,
                    act=nl,
                    name="conv" + str(i + 2)))
            inplanes = make_divisible(scale * c)
L
LDOUBLEV 已提交
129
            i += 1
W
WenmuZhou 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
        block_list.append(
            ConvBNLayer(
                in_channels=inplanes,
                out_channels=make_divisible(scale * cls_ch_squeeze),
                kernel_size=1,
                stride=1,
                padding=0,
                groups=1,
                if_act=True,
                act='hard_swish',
                name='conv_last'))
        self.stages.append(nn.Sequential(*block_list))
        self.out_channels.append(make_divisible(scale * cls_ch_squeeze))
        for i, stage in enumerate(self.stages):
            self.add_sublayer(sublayer=stage, name="stage{}".format(i))

    def forward(self, x):
        x = self.conv(x)
        out_list = []
        for stage in self.stages:
            x = stage(x)
            out_list.append(x)
        return out_list


class ConvBNLayer(nn.Layer):
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 groups=1,
                 if_act=True,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.if_act = if_act
        self.act = act
D
dyning 已提交
169
        self.conv = nn.Conv2D(
W
WenmuZhou 已提交
170 171 172
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
L
LDOUBLEV 已提交
173 174
            stride=stride,
            padding=padding,
W
WenmuZhou 已提交
175 176
            groups=groups,
            weight_attr=ParamAttr(name=name + '_weights'),
L
LDOUBLEV 已提交
177
            bias_attr=False)
W
WenmuZhou 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193

        self.bn = nn.BatchNorm(
            num_channels=out_channels,
            act=None,
            param_attr=ParamAttr(name=name + "_bn_scale"),
            bias_attr=ParamAttr(name=name + "_bn_offset"),
            moving_mean_name=name + "_bn_mean",
            moving_variance_name=name + "_bn_variance")

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        if self.if_act:
            if self.act == "relu":
                x = F.relu(x)
            elif self.act == "hard_swish":
D
dyning 已提交
194
                x = F.activation.hard_swish(x)
W
WenmuZhou 已提交
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
            else:
                print("The activation function is selected incorrectly.")
                exit()
        return x


class ResidualUnit(nn.Layer):
    def __init__(self,
                 in_channels,
                 mid_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 use_se,
                 act=None,
                 name=''):
        super(ResidualUnit, self).__init__()
        self.if_shortcut = stride == 1 and in_channels == out_channels
        self.if_se = use_se

        self.expand_conv = ConvBNLayer(
            in_channels=in_channels,
            out_channels=mid_channels,
            kernel_size=1,
L
LDOUBLEV 已提交
219 220 221 222
            stride=1,
            padding=0,
            if_act=True,
            act=act,
W
WenmuZhou 已提交
223 224 225 226 227
            name=name + "_expand")
        self.bottleneck_conv = ConvBNLayer(
            in_channels=mid_channels,
            out_channels=mid_channels,
            kernel_size=kernel_size,
L
LDOUBLEV 已提交
228
            stride=stride,
W
WenmuZhou 已提交
229 230
            padding=int((kernel_size - 1) // 2),
            groups=mid_channels,
L
LDOUBLEV 已提交
231 232
            if_act=True,
            act=act,
W
WenmuZhou 已提交
233
            name=name + "_depthwise")
L
LDOUBLEV 已提交
234
        if self.if_se and not self.disable_se:
W
WenmuZhou 已提交
235 236 237 238 239
            self.mid_se = SEModule(mid_channels, name=name + "_se")
        self.linear_conv = ConvBNLayer(
            in_channels=mid_channels,
            out_channels=out_channels,
            kernel_size=1,
L
LDOUBLEV 已提交
240 241 242
            stride=1,
            padding=0,
            if_act=False,
W
WenmuZhou 已提交
243 244 245 246 247 248
            act=None,
            name=name + "_linear")

    def forward(self, inputs):
        x = self.expand_conv(inputs)
        x = self.bottleneck_conv(x)
L
LDOUBLEV 已提交
249
        if self.if_se and not self.disable_se:
W
WenmuZhou 已提交
250 251 252
            x = self.mid_se(x)
        x = self.linear_conv(x)
        if self.if_shortcut:
D
dyning 已提交
253
            x = paddle.add(inputs, x)
W
WenmuZhou 已提交
254 255 256 257 258 259
        return x


class SEModule(nn.Layer):
    def __init__(self, in_channels, reduction=4, name=""):
        super(SEModule, self).__init__()
D
dyning 已提交
260 261
        self.avg_pool = nn.AdaptiveAvgPool2D(1)
        self.conv1 = nn.Conv2D(
W
WenmuZhou 已提交
262 263 264 265 266 267 268
            in_channels=in_channels,
            out_channels=in_channels // reduction,
            kernel_size=1,
            stride=1,
            padding=0,
            weight_attr=ParamAttr(name=name + "_1_weights"),
            bias_attr=ParamAttr(name=name + "_1_offset"))
D
dyning 已提交
269
        self.conv2 = nn.Conv2D(
W
WenmuZhou 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282
            in_channels=in_channels // reduction,
            out_channels=in_channels,
            kernel_size=1,
            stride=1,
            padding=0,
            weight_attr=ParamAttr(name + "_2_weights"),
            bias_attr=ParamAttr(name=name + "_2_offset"))

    def forward(self, inputs):
        outputs = self.avg_pool(inputs)
        outputs = self.conv1(outputs)
        outputs = F.relu(outputs)
        outputs = self.conv2(outputs)
D
dyning 已提交
283
        outputs = F.activation.hard_sigmoid(outputs)
L
LDOUBLEV 已提交
284
        return inputs * outputs