mobilenet_v3.py 11.0 KB
Newer Older
littletomatodonkey's avatar
littletomatodonkey 已提交
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
W
WuHaobo 已提交
2
#
littletomatodonkey's avatar
littletomatodonkey 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
W
WuHaobo 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
littletomatodonkey's avatar
littletomatodonkey 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
W
WuHaobo 已提交
14 15 16 17 18

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19 20
import numpy as np
import paddle
littletomatodonkey's avatar
littletomatodonkey 已提交
21 22 23
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
littletomatodonkey's avatar
littletomatodonkey 已提交
24 25
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
littletomatodonkey's avatar
fix opt  
littletomatodonkey 已提交
26
from paddle.fluid.regularizer import L2Decay
27 28

import math
W
WuHaobo 已提交
29 30

__all__ = [
31 32 33 34 35
    "MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5",
    "MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0",
    "MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35",
    "MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75",
    "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25"
W
WuHaobo 已提交
36 37 38
]


39 40 41 42 43 44 45 46
def make_divisible(v, divisor=8, min_value=None):
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v

littletomatodonkey's avatar
littletomatodonkey 已提交
47

littletomatodonkey's avatar
littletomatodonkey 已提交
48
class MobileNetV3(nn.Layer):
littletomatodonkey's avatar
littletomatodonkey 已提交
49 50 51 52 53
    def __init__(self,
                 scale=1.0,
                 model_name="small",
                 dropout_prob=0.2,
                 class_dim=1000):
54 55 56
        super(MobileNetV3, self).__init__()

        inplanes = 16
W
WuHaobo 已提交
57 58 59
        if model_name == "large":
            self.cfg = [
                # k, exp, c,  se,     nl,  s,
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
                [3, 16, 16, False, "relu", 1],
                [3, 64, 24, False, "relu", 2],
                [3, 72, 24, False, "relu", 1],
                [5, 72, 40, True, "relu", 2],
                [5, 120, 40, True, "relu", 1],
                [5, 120, 40, True, "relu", 1],
                [3, 240, 80, False, "hard_swish", 2],
                [3, 200, 80, False, "hard_swish", 1],
                [3, 184, 80, False, "hard_swish", 1],
                [3, 184, 80, False, "hard_swish", 1],
                [3, 480, 112, True, "hard_swish", 1],
                [3, 672, 112, True, "hard_swish", 1],
                [5, 672, 160, True, "hard_swish", 2],
                [5, 960, 160, True, "hard_swish", 1],
                [5, 960, 160, True, "hard_swish", 1],
W
WuHaobo 已提交
75 76 77 78 79 80
            ]
            self.cls_ch_squeeze = 960
            self.cls_ch_expand = 1280
        elif model_name == "small":
            self.cfg = [
                # k, exp, c,  se,     nl,  s,
81 82 83 84 85 86 87 88 89 90 91
                [3, 16, 16, True, "relu", 2],
                [3, 72, 24, False, "relu", 2],
                [3, 88, 24, False, "relu", 1],
                [5, 96, 40, True, "hard_swish", 2],
                [5, 240, 40, True, "hard_swish", 1],
                [5, 240, 40, True, "hard_swish", 1],
                [5, 120, 48, True, "hard_swish", 1],
                [5, 144, 48, True, "hard_swish", 1],
                [5, 288, 96, True, "hard_swish", 2],
                [5, 576, 96, True, "hard_swish", 1],
                [5, 576, 96, True, "hard_swish", 1],
W
WuHaobo 已提交
92 93 94 95
            ]
            self.cls_ch_squeeze = 576
            self.cls_ch_expand = 1280
        else:
littletomatodonkey's avatar
littletomatodonkey 已提交
96 97
            raise NotImplementedError(
                "mode[{}_model] is not implemented!".format(model_name))
W
WuHaobo 已提交
98

99 100 101
        self.conv1 = ConvBNLayer(
            in_c=3,
            out_c=make_divisible(inplanes * scale),
W
WuHaobo 已提交
102 103 104 105 106
            filter_size=3,
            stride=2,
            padding=1,
            num_groups=1,
            if_act=True,
107 108 109 110
            act="hard_swish",
            name="conv1")

        self.block_list = []
W
WuHaobo 已提交
111
        i = 0
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
        inplanes = make_divisible(inplanes * scale)
        for (k, exp, c, se, nl, s) in self.cfg:
            self.block_list.append(
                ResidualUnit(
                    in_c=inplanes,
                    mid_c=make_divisible(scale * exp),
                    out_c=make_divisible(scale * c),
                    filter_size=k,
                    stride=s,
                    use_se=se,
                    act=nl,
                    name="conv" + str(i + 2)))
            self.add_sublayer(
                sublayer=self.block_list[-1], name="conv" + str(i + 2))
            inplanes = make_divisible(scale * c)
W
WuHaobo 已提交
127 128
            i += 1

129 130 131
        self.last_second_conv = ConvBNLayer(
            in_c=inplanes,
            out_c=make_divisible(scale * self.cls_ch_squeeze),
W
WuHaobo 已提交
132 133 134 135 136
            filter_size=1,
            stride=1,
            padding=0,
            num_groups=1,
            if_act=True,
137 138 139
            act="hard_swish",
            name="conv_last")

littletomatodonkey's avatar
littletomatodonkey 已提交
140
        self.pool = AdaptiveAvgPool2d(1)
141

littletomatodonkey's avatar
littletomatodonkey 已提交
142 143 144 145
        self.last_conv = Conv2d(
            in_channels=make_divisible(scale * self.cls_ch_squeeze),
            out_channels=self.cls_ch_expand,
            kernel_size=1,
W
WuHaobo 已提交
146 147
            stride=1,
            padding=0,
littletomatodonkey's avatar
littletomatodonkey 已提交
148
            weight_attr=ParamAttr(name="last_1x1_conv_weights"),
W
WuHaobo 已提交
149
            bias_attr=False)
150

littletomatodonkey's avatar
littletomatodonkey 已提交
151 152
        self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")

153
        self.out = Linear(
littletomatodonkey's avatar
littletomatodonkey 已提交
154 155 156
            self.cls_ch_expand,
            class_dim,
            weight_attr=ParamAttr("fc_weights"),
157 158
            bias_attr=ParamAttr(name="fc_offset"))

littletomatodonkey's avatar
littletomatodonkey 已提交
159
    def forward(self, inputs, label=None):
160 161 162
        x = self.conv1(inputs)
        for block in self.block_list:
            x = block(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
163

164 165
        x = self.last_second_conv(x)
        x = self.pool(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
166

167
        x = self.last_conv(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
168
        x = F.hard_swish(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
169
        x = self.dropout(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
170
        x = paddle.reshape(x, shape=[x.shape[0], x.shape[1]])
171 172 173 174
        x = self.out(x)
        return x


littletomatodonkey's avatar
littletomatodonkey 已提交
175
class ConvBNLayer(nn.Layer):
176 177 178 179 180 181 182 183 184 185 186 187 188 189
    def __init__(self,
                 in_c,
                 out_c,
                 filter_size,
                 stride,
                 padding,
                 num_groups=1,
                 if_act=True,
                 act=None,
                 use_cudnn=True,
                 name=""):
        super(ConvBNLayer, self).__init__()
        self.if_act = if_act
        self.act = act
littletomatodonkey's avatar
littletomatodonkey 已提交
190 191 192 193
        self.conv = Conv2d(
            in_channels=in_c,
            out_channels=out_c,
            kernel_size=filter_size,
W
WuHaobo 已提交
194 195 196
            stride=stride,
            padding=padding,
            groups=num_groups,
littletomatodonkey's avatar
littletomatodonkey 已提交
197 198 199
            weight_attr=ParamAttr(name=name + "_weights"),
            bias_attr=False)
        self.bn = BatchNorm(
200 201
            num_channels=out_c,
            act=None,
littletomatodonkey's avatar
littletomatodonkey 已提交
202
            param_attr=ParamAttr(
littletomatodonkey's avatar
littletomatodonkey 已提交
203
                name=name + "_bn_scale", regularizer=L2Decay(0.0)),
W
WuHaobo 已提交
204
            bias_attr=ParamAttr(
littletomatodonkey's avatar
littletomatodonkey 已提交
205
                name=name + "_bn_offset", regularizer=L2Decay(0.0)),
206 207 208 209 210 211 212 213
            moving_mean_name=name + "_bn_mean",
            moving_variance_name=name + "_bn_variance")

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        if self.if_act:
            if self.act == "relu":
littletomatodonkey's avatar
littletomatodonkey 已提交
214
                x = F.relu(x)
215
            elif self.act == "hard_swish":
littletomatodonkey's avatar
littletomatodonkey 已提交
216
                x = F.hard_swish(x)
217 218 219 220 221 222
            else:
                print("The activation function is selected incorrectly.")
                exit()
        return x


littletomatodonkey's avatar
littletomatodonkey 已提交
223
class ResidualUnit(nn.Layer):
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
    def __init__(self,
                 in_c,
                 mid_c,
                 out_c,
                 filter_size,
                 stride,
                 use_se,
                 act=None,
                 name=''):
        super(ResidualUnit, self).__init__()
        self.if_shortcut = stride == 1 and in_c == out_c
        self.if_se = use_se

        self.expand_conv = ConvBNLayer(
            in_c=in_c,
            out_c=mid_c,
W
WuHaobo 已提交
240 241 242 243 244
            filter_size=1,
            stride=1,
            padding=0,
            if_act=True,
            act=act,
245 246 247 248
            name=name + "_expand")
        self.bottleneck_conv = ConvBNLayer(
            in_c=mid_c,
            out_c=mid_c,
W
WuHaobo 已提交
249 250 251
            filter_size=filter_size,
            stride=stride,
            padding=int((filter_size - 1) // 2),
252
            num_groups=mid_c,
W
WuHaobo 已提交
253 254
            if_act=True,
            act=act,
255 256 257 258 259 260
            name=name + "_depthwise")
        if self.if_se:
            self.mid_se = SEModule(mid_c, name=name + "_se")
        self.linear_conv = ConvBNLayer(
            in_c=mid_c,
            out_c=out_c,
W
WuHaobo 已提交
261 262 263 264
            filter_size=1,
            stride=1,
            padding=0,
            if_act=False,
265 266 267 268 269 270 271 272 273 274
            act=None,
            name=name + "_linear")

    def forward(self, inputs):
        x = self.expand_conv(inputs)
        x = self.bottleneck_conv(x)
        if self.if_se:
            x = self.mid_se(x)
        x = self.linear_conv(x)
        if self.if_shortcut:
littletomatodonkey's avatar
littletomatodonkey 已提交
275
            x = paddle.elementwise_add(inputs, x)
276 277 278
        return x


littletomatodonkey's avatar
littletomatodonkey 已提交
279
class SEModule(nn.Layer):
280 281
    def __init__(self, channel, reduction=4, name=""):
        super(SEModule, self).__init__()
littletomatodonkey's avatar
littletomatodonkey 已提交
282
        self.avg_pool = AdaptiveAvgPool2d(1)
littletomatodonkey's avatar
littletomatodonkey 已提交
283 284 285 286
        self.conv1 = Conv2d(
            in_channels=channel,
            out_channels=channel // reduction,
            kernel_size=1,
287 288
            stride=1,
            padding=0,
littletomatodonkey's avatar
littletomatodonkey 已提交
289
            weight_attr=ParamAttr(name=name + "_1_weights"),
290
            bias_attr=ParamAttr(name=name + "_1_offset"))
littletomatodonkey's avatar
littletomatodonkey 已提交
291 292 293 294
        self.conv2 = Conv2d(
            in_channels=channel // reduction,
            out_channels=channel,
            kernel_size=1,
295 296
            stride=1,
            padding=0,
littletomatodonkey's avatar
littletomatodonkey 已提交
297
            weight_attr=ParamAttr(name + "_2_weights"),
298 299 300 301 302
            bias_attr=ParamAttr(name=name + "_2_offset"))

    def forward(self, inputs):
        outputs = self.avg_pool(inputs)
        outputs = self.conv1(outputs)
littletomatodonkey's avatar
littletomatodonkey 已提交
303
        outputs = F.relu(outputs)
304
        outputs = self.conv2(outputs)
littletomatodonkey's avatar
littletomatodonkey 已提交
305 306
        outputs = F.hard_sigmoid(outputs)
        return paddle.multiply(x=inputs, y=outputs, axis=0)
W
WuHaobo 已提交
307 308


309 310
def MobileNetV3_small_x0_35(**args):
    model = MobileNetV3(model_name="small", scale=0.35, **args)
W
WuHaobo 已提交
311 312 313
    return model


314 315
def MobileNetV3_small_x0_5(**args):
    model = MobileNetV3(model_name="small", scale=0.5, **args)
W
WuHaobo 已提交
316 317 318
    return model


319 320
def MobileNetV3_small_x0_75(**args):
    model = MobileNetV3(model_name="small", scale=0.75, **args)
W
WuHaobo 已提交
321 322 323
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
324
def MobileNetV3_small_x1_0(**args):
325
    model = MobileNetV3(model_name="small", scale=1.0, **args)
W
WuHaobo 已提交
326 327 328
    return model


329 330
def MobileNetV3_small_x1_25(**args):
    model = MobileNetV3(model_name="small", scale=1.25, **args)
W
WuHaobo 已提交
331 332 333
    return model


334 335
def MobileNetV3_large_x0_35(**args):
    model = MobileNetV3(model_name="large", scale=0.35, **args)
W
WuHaobo 已提交
336 337 338
    return model


339 340
def MobileNetV3_large_x0_5(**args):
    model = MobileNetV3(model_name="large", scale=0.5, **args)
W
WuHaobo 已提交
341 342 343
    return model


344 345
def MobileNetV3_large_x0_75(**args):
    model = MobileNetV3(model_name="large", scale=0.75, **args)
W
WuHaobo 已提交
346 347 348
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
349
def MobileNetV3_large_x1_0(**args):
350
    model = MobileNetV3(model_name="large", scale=1.0, **args)
W
WuHaobo 已提交
351 352 353
    return model


354 355
def MobileNetV3_large_x1_25(**args):
    model = MobileNetV3(model_name="large", scale=1.25, **args)
W
WuHaobo 已提交
356
    return model