efficientnet.py 30.4 KB
Newer Older
1
import paddle
littletomatodonkey's avatar
littletomatodonkey 已提交
2 3 4
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
5 6
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
7
import math
W
WuHaobo 已提交
8 9 10 11
import collections
import re
import copy

C
cuicheng01 已提交
12 13
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url

littletomatodonkey's avatar
littletomatodonkey 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
MODEL_URLS = {
    "EfficientNetB0_small":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_small_pretrained.pdparams",
    "EfficientNetB0":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_pretrained.pdparams",
    "EfficientNetB1":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB1_pretrained.pdparams",
    "EfficientNetB2":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB2_pretrained.pdparams",
    "EfficientNetB3":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB3_pretrained.pdparams",
    "EfficientNetB4":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB4_pretrained.pdparams",
    "EfficientNetB5":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB5_pretrained.pdparams",
    "EfficientNetB6":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB6_pretrained.pdparams",
    "EfficientNetB7":
    "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams",
}
C
cuicheng01 已提交
34 35

__all__ = list(MODEL_URLS.keys())
W
WuHaobo 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

GlobalParams = collections.namedtuple('GlobalParams', [
    'batch_norm_momentum',
    'batch_norm_epsilon',
    'dropout_rate',
    'num_classes',
    'width_coefficient',
    'depth_coefficient',
    'depth_divisor',
    'min_depth',
    'drop_connect_rate',
])

BlockArgs = collections.namedtuple('BlockArgs', [
    'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
    'expand_ratio', 'id_skip', 'stride', 'se_ratio'
])

GlobalParams.__new__.__defaults__ = (None, ) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None, ) * len(BlockArgs._fields)


def efficientnet_params(model_name):
    """ Map EfficientNet model name to parameter coefficients. """
    params_dict = {
        # Coefficients:   width,depth,resolution,dropout
        'efficientnet-b0': (1.0, 1.0, 224, 0.2),
        'efficientnet-b1': (1.0, 1.1, 240, 0.2),
        'efficientnet-b2': (1.1, 1.2, 260, 0.3),
        'efficientnet-b3': (1.2, 1.4, 300, 0.3),
        'efficientnet-b4': (1.4, 1.8, 380, 0.4),
        'efficientnet-b5': (1.6, 2.2, 456, 0.4),
        'efficientnet-b6': (1.8, 2.6, 528, 0.5),
        'efficientnet-b7': (2.0, 3.1, 600, 0.5),
    }
    return params_dict[model_name]


def efficientnet(width_coefficient=None,
                 depth_coefficient=None,
                 dropout_rate=0.2,
                 drop_connect_rate=0.2):
    """ Get block arguments according to parameter and coefficients. """
    blocks_args = [
        'r1_k3_s11_e1_i32_o16_se0.25',
        'r2_k3_s22_e6_i16_o24_se0.25',
        'r2_k5_s22_e6_i24_o40_se0.25',
        'r3_k3_s22_e6_i40_o80_se0.25',
        'r3_k5_s11_e6_i80_o112_se0.25',
        'r4_k5_s22_e6_i112_o192_se0.25',
        'r1_k3_s11_e6_i192_o320_se0.25',
    ]
    blocks_args = BlockDecoder.decode(blocks_args)

    global_params = GlobalParams(
        batch_norm_momentum=0.99,
        batch_norm_epsilon=1e-3,
        dropout_rate=dropout_rate,
        drop_connect_rate=drop_connect_rate,
        num_classes=1000,
        width_coefficient=width_coefficient,
        depth_coefficient=depth_coefficient,
        depth_divisor=8,
        min_depth=None)

    return blocks_args, global_params


def get_model_params(model_name, override_params):
    """ Get the block args and global params for a given model """
    if model_name.startswith('efficientnet'):
        w, d, _, p = efficientnet_params(model_name)
        blocks_args, global_params = efficientnet(
            width_coefficient=w, depth_coefficient=d, dropout_rate=p)
    else:
        raise NotImplementedError('model name is not pre-defined: %s' %
                                  model_name)
    if override_params:
        global_params = global_params._replace(**override_params)
    return blocks_args, global_params


def round_filters(filters, global_params):
    """ Calculate and round number of filters based on depth multiplier. """
    multiplier = global_params.width_coefficient
    if not multiplier:
        return filters
    divisor = global_params.depth_divisor
    min_depth = global_params.min_depth
    filters *= multiplier
    min_depth = min_depth or divisor
    new_filters = max(min_depth,
                      int(filters + divisor / 2) // divisor * divisor)
    if new_filters < 0.9 * filters:  # prevent rounding by more than 10%
        new_filters += divisor
    return int(new_filters)


def round_repeats(repeats, global_params):
    """ Round number of filters based on depth multiplier. """
    multiplier = global_params.depth_coefficient
    if not multiplier:
        return repeats
    return int(math.ceil(multiplier * repeats))


class BlockDecoder(object):
littletomatodonkey's avatar
littletomatodonkey 已提交
143 144 145
    """
    Block Decoder, straight from the official TensorFlow repository.
    """
W
WuHaobo 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

    @staticmethod
    def _decode_block_string(block_string):
        """ Gets a block through a string notation of arguments. """
        assert isinstance(block_string, str)

        ops = block_string.split('_')
        options = {}
        for op in ops:
            splits = re.split(r'(\d.*)', op)
            if len(splits) >= 2:
                key, value = splits[:2]
                options[key] = value

        # Check stride
littletomatodonkey's avatar
littletomatodonkey 已提交
161
        cond_1 = ('s' in options and len(options['s']) == 1)
S
shippingwang 已提交
162 163
        cond_2 = ((len(options['s']) == 2) and
                  (options['s'][0] == options['s'][1]))
littletomatodonkey's avatar
littletomatodonkey 已提交
164
        assert (cond_1 or cond_2)
W
WuHaobo 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192

        return BlockArgs(
            kernel_size=int(options['k']),
            num_repeat=int(options['r']),
            input_filters=int(options['i']),
            output_filters=int(options['o']),
            expand_ratio=int(options['e']),
            id_skip=('noskip' not in block_string),
            se_ratio=float(options['se']) if 'se' in options else None,
            stride=[int(options['s'][0])])

    @staticmethod
    def _encode_block_string(block):
        """Encodes a block to a string."""
        args = [
            'r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' %
            (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio,
            'i%d' % block.input_filters, 'o%d' % block.output_filters
        ]
        if 0 < block.se_ratio <= 1:
            args.append('se%s' % block.se_ratio)
        if block.id_skip is False:
            args.append('noskip')
        return '_'.join(args)

    @staticmethod
    def decode(string_list):
        """
littletomatodonkey's avatar
littletomatodonkey 已提交
193
        Decode a list of string notations to specify blocks in the network.
W
WuHaobo 已提交
194

littletomatodonkey's avatar
littletomatodonkey 已提交
195 196 197
        string_list: list of strings, each string is a notation of block
        return
            list of BlockArgs namedtuples of block args
W
WuHaobo 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
        """
        assert isinstance(string_list, list)
        blocks_args = []
        for block_string in string_list:
            blocks_args.append(BlockDecoder._decode_block_string(block_string))
        return blocks_args

    @staticmethod
    def encode(blocks_args):
        """
        Encodes a list of BlockArgs to a list of strings.

        :param blocks_args: a list of BlockArgs namedtuples of block args
        :return: a list of strings, each string is a notation of block
        """
        block_strings = []
        for block in blocks_args:
            block_strings.append(BlockDecoder._encode_block_string(block))
        return block_strings


219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
def initial_type(name, use_bias=False):
    param_attr = ParamAttr(name=name + "_weights")
    if use_bias:
        bias_attr = ParamAttr(name=name + "_offset")
    else:
        bias_attr = False
    return param_attr, bias_attr


def init_batch_norm_layer(name="batch_norm"):
    param_attr = ParamAttr(name=name + "_scale")
    bias_attr = ParamAttr(name=name + "_offset")
    return param_attr, bias_attr


def init_fc_layer(name="fc"):
    param_attr = ParamAttr(name=name + "_weights")
    bias_attr = ParamAttr(name=name + "_offset")
    return param_attr, bias_attr


def cal_padding(img_size, stride, filter_size, dilation=1):
    """Calculate padding size."""
    if img_size % stride == 0:
        out_size = max(filter_size - stride, 0)
    else:
        out_size = max(filter_size - (img_size % stride), 0)
    return out_size // 2, out_size - out_size // 2


inp_shape = {
    "b0_small": [224, 112, 112, 56, 28, 14, 14, 7],
    "b0": [224, 112, 112, 56, 28, 14, 14, 7],
    "b1": [240, 120, 120, 60, 30, 15, 15, 8],
    "b2": [260, 130, 130, 65, 33, 17, 17, 9],
    "b3": [300, 150, 150, 75, 38, 19, 19, 10],
    "b4": [380, 190, 190, 95, 48, 24, 24, 12],
    "b5": [456, 228, 228, 114, 57, 29, 29, 15],
    "b6": [528, 264, 264, 132, 66, 33, 33, 17],
    "b7": [600, 300, 300, 150, 75, 38, 38, 19]
}


def _drop_connect(inputs, prob, is_test):
    if is_test:
264 265 266 267 268 269 270 271
        output = inputs
    else:
        keep_prob = 1.0 - prob
        inputs_shape = paddle.shape(inputs)
        random_tensor = keep_prob + paddle.rand(
            shape=[inputs_shape[0], 1, 1, 1])
        binary_tensor = paddle.floor(random_tensor)
        output = paddle.multiply(inputs, binary_tensor) / keep_prob
272 273 274
    return output


littletomatodonkey's avatar
littletomatodonkey 已提交
275
class Conv2ds(nn.Layer):
276 277 278 279 280 281 282 283 284 285 286 287 288
    def __init__(self,
                 input_channels,
                 output_channels,
                 filter_size,
                 stride=1,
                 padding=0,
                 groups=None,
                 name="conv2d",
                 act=None,
                 use_bias=False,
                 padding_type=None,
                 model_name=None,
                 cur_stage=None):
W
fix  
wqz960 已提交
289
        super(Conv2ds, self).__init__()
littletomatodonkey's avatar
littletomatodonkey 已提交
290 291
        assert act in [None, "swish", "sigmoid"]
        self.act = act
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322

        param_attr, bias_attr = initial_type(name=name, use_bias=use_bias)

        def get_padding(filter_size, stride=1, dilation=1):
            padding = ((stride - 1) + dilation * (filter_size - 1)) // 2
            return padding

        inps = 1 if model_name == None and cur_stage == None else inp_shape[
            model_name][cur_stage]
        self.need_crop = False
        if padding_type == "SAME":
            top_padding, bottom_padding = cal_padding(inps, stride,
                                                      filter_size)
            left_padding, right_padding = cal_padding(inps, stride,
                                                      filter_size)
            height_padding = bottom_padding
            width_padding = right_padding
            if top_padding != bottom_padding or left_padding != right_padding:
                height_padding = top_padding + stride
                width_padding = left_padding + stride
                self.need_crop = True
            padding = [height_padding, width_padding]
        elif padding_type == "VALID":
            height_padding = 0
            width_padding = 0
            padding = [height_padding, width_padding]
        elif padding_type == "DYNAMIC":
            padding = get_padding(filter_size, stride)
        else:
            padding = padding_type

littletomatodonkey's avatar
littletomatodonkey 已提交
323
        groups = 1 if groups is None else groups
324
        self._conv = Conv2D(
325 326 327 328 329
            input_channels,
            output_channels,
            filter_size,
            groups=groups,
            stride=stride,
littletomatodonkey's avatar
littletomatodonkey 已提交
330
            #             act=act,
331
            padding=padding,
littletomatodonkey's avatar
littletomatodonkey 已提交
332
            weight_attr=param_attr,
333 334 335 336
            bias_attr=bias_attr)

    def forward(self, inputs):
        x = self._conv(inputs)
littletomatodonkey's avatar
littletomatodonkey 已提交
337 338 339 340 341
        if self.act == "swish":
            x = F.swish(x)
        elif self.act == "sigmoid":
            x = F.sigmoid(x)

342 343 344 345 346
        if self.need_crop:
            x = x[:, :, 1:, 1:]
        return x


littletomatodonkey's avatar
littletomatodonkey 已提交
347
class ConvBNLayer(nn.Layer):
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
    def __init__(self,
                 input_channels,
                 filter_size,
                 output_channels,
                 stride=1,
                 num_groups=1,
                 padding_type="SAME",
                 conv_act=None,
                 bn_act="swish",
                 use_bn=True,
                 use_bias=False,
                 name=None,
                 conv_name=None,
                 bn_name=None,
                 model_name=None,
                 cur_stage=None):
        super(ConvBNLayer, self).__init__()

W
fix  
wqz960 已提交
366
        self._conv = Conv2ds(
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
            input_channels=input_channels,
            output_channels=output_channels,
            filter_size=filter_size,
            stride=stride,
            groups=num_groups,
            act=conv_act,
            padding_type=padding_type,
            name=conv_name,
            use_bias=use_bias,
            model_name=model_name,
            cur_stage=cur_stage)
        self.use_bn = use_bn
        if use_bn is True:
            bn_name = name + bn_name
            param_attr, bias_attr = init_batch_norm_layer(bn_name)

            self._bn = BatchNorm(
                num_channels=output_channels,
                act=bn_act,
                momentum=0.99,
                epsilon=0.001,
                moving_mean_name=bn_name + "_mean",
                moving_variance_name=bn_name + "_variance",
                param_attr=param_attr,
                bias_attr=bias_attr)

    def forward(self, inputs):
        if self.use_bn:
            x = self._conv(inputs)
            x = self._bn(x)
            return x
        else:
            return self._conv(inputs)


littletomatodonkey's avatar
littletomatodonkey 已提交
402
class ExpandConvNorm(nn.Layer):
403 404 405 406 407 408 409
    def __init__(self,
                 input_channels,
                 block_args,
                 padding_type,
                 name=None,
                 model_name=None,
                 cur_stage=None):
W
fix  
wqz960 已提交
410
        super(ExpandConvNorm, self).__init__()
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

        self.oup = block_args.input_filters * block_args.expand_ratio
        self.expand_ratio = block_args.expand_ratio

        if self.expand_ratio != 1:
            self._conv = ConvBNLayer(
                input_channels,
                1,
                self.oup,
                bn_act=None,
                padding_type=padding_type,
                name=name,
                conv_name=name + "_expand_conv",
                bn_name="_bn0",
                model_name=model_name,
                cur_stage=cur_stage)

    def forward(self, inputs):
        if self.expand_ratio != 1:
            return self._conv(inputs)
        else:
            return inputs


littletomatodonkey's avatar
littletomatodonkey 已提交
435
class DepthwiseConvNorm(nn.Layer):
436 437 438 439 440 441 442
    def __init__(self,
                 input_channels,
                 block_args,
                 padding_type,
                 name=None,
                 model_name=None,
                 cur_stage=None):
W
wqz960 已提交
443
        super(DepthwiseConvNorm, self).__init__()
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468

        self.k = block_args.kernel_size
        self.s = block_args.stride
        if isinstance(self.s, list) or isinstance(self.s, tuple):
            self.s = self.s[0]
        oup = block_args.input_filters * block_args.expand_ratio

        self._conv = ConvBNLayer(
            input_channels,
            self.k,
            oup,
            self.s,
            num_groups=input_channels,
            bn_act=None,
            padding_type=padding_type,
            name=name,
            conv_name=name + "_depthwise_conv",
            bn_name="_bn1",
            model_name=model_name,
            cur_stage=cur_stage)

    def forward(self, inputs):
        return self._conv(inputs)


littletomatodonkey's avatar
littletomatodonkey 已提交
469
class ProjectConvNorm(nn.Layer):
470 471 472 473 474 475 476
    def __init__(self,
                 input_channels,
                 block_args,
                 padding_type,
                 name=None,
                 model_name=None,
                 cur_stage=None):
W
wqz960 已提交
477
        super(ProjectConvNorm, self).__init__()
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496

        final_oup = block_args.output_filters

        self._conv = ConvBNLayer(
            input_channels,
            1,
            final_oup,
            bn_act=None,
            padding_type=padding_type,
            name=name,
            conv_name=name + "_project_conv",
            bn_name="_bn2",
            model_name=model_name,
            cur_stage=cur_stage)

    def forward(self, inputs):
        return self._conv(inputs)


littletomatodonkey's avatar
littletomatodonkey 已提交
497
class SEBlock(nn.Layer):
498 499 500 501 502 503 504 505
    def __init__(self,
                 input_channels,
                 num_squeezed_channels,
                 oup,
                 padding_type,
                 name=None,
                 model_name=None,
                 cur_stage=None):
W
fix  
wqz960 已提交
506
        super(SEBlock, self).__init__()
507

508
        self._pool = AdaptiveAvgPool2D(1)
W
fix  
wqz960 已提交
509
        self._conv1 = Conv2ds(
510 511 512 513 514 515 516 517
            input_channels,
            num_squeezed_channels,
            1,
            use_bias=True,
            padding_type=padding_type,
            act="swish",
            name=name + "_se_reduce")

W
fix  
wqz960 已提交
518
        self._conv2 = Conv2ds(
519 520 521
            num_squeezed_channels,
            oup,
            1,
littletomatodonkey's avatar
littletomatodonkey 已提交
522
            act="sigmoid",
523 524 525 526 527 528 529 530
            use_bias=True,
            padding_type=padding_type,
            name=name + "_se_expand")

    def forward(self, inputs):
        x = self._pool(inputs)
        x = self._conv1(x)
        x = self._conv2(x)
L
littletomatodonkey 已提交
531 532
        out = paddle.multiply(inputs, x)
        return out
533 534


littletomatodonkey's avatar
littletomatodonkey 已提交
535
class MbConvBlock(nn.Layer):
536 537 538 539 540 541 542 543 544
    def __init__(self,
                 input_channels,
                 block_args,
                 padding_type,
                 use_se,
                 name=None,
                 drop_connect_rate=None,
                 model_name=None,
                 cur_stage=None):
W
fix  
wqz960 已提交
545
        super(MbConvBlock, self).__init__()
546 547 548 549 550 551 552 553 554 555

        oup = block_args.input_filters * block_args.expand_ratio
        self.block_args = block_args
        self.has_se = use_se and (block_args.se_ratio is not None) and (
            0 < block_args.se_ratio <= 1)
        self.id_skip = block_args.id_skip
        self.expand_ratio = block_args.expand_ratio
        self.drop_connect_rate = drop_connect_rate

        if self.expand_ratio != 1:
W
fix  
wqz960 已提交
556
            self._ecn = ExpandConvNorm(
557 558 559 560 561 562 563
                input_channels,
                block_args,
                padding_type=padding_type,
                name=name,
                model_name=model_name,
                cur_stage=cur_stage)

W
wqz960 已提交
564
        self._dcn = DepthwiseConvNorm(
565 566 567 568 569 570 571 572 573 574
            input_channels * block_args.expand_ratio,
            block_args,
            padding_type=padding_type,
            name=name,
            model_name=model_name,
            cur_stage=cur_stage)

        if self.has_se:
            num_squeezed_channels = max(
                1, int(block_args.input_filters * block_args.se_ratio))
W
fix  
wqz960 已提交
575
            self._se = SEBlock(
576 577 578 579 580 581 582 583
                input_channels * block_args.expand_ratio,
                num_squeezed_channels,
                oup,
                padding_type=padding_type,
                name=name,
                model_name=model_name,
                cur_stage=cur_stage)

W
wqz960 已提交
584
        self._pcn = ProjectConvNorm(
585 586 587 588 589 590 591 592 593 594 595
            input_channels * block_args.expand_ratio,
            block_args,
            padding_type=padding_type,
            name=name,
            model_name=model_name,
            cur_stage=cur_stage)

    def forward(self, inputs):
        x = inputs
        if self.expand_ratio != 1:
            x = self._ecn(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
596
            x = F.swish(x)
L
littletomatodonkey 已提交
597

598
        x = self._dcn(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
599
        x = F.swish(x)
600 601 602
        if self.has_se:
            x = self._se(x)
        x = self._pcn(x)
L
littletomatodonkey 已提交
603

W
fix  
wqz960 已提交
604
        if self.id_skip and \
littletomatodonkey's avatar
littletomatodonkey 已提交
605 606
                self.block_args.stride == 1 and \
                self.block_args.input_filters == self.block_args.output_filters:
607
            if self.drop_connect_rate:
littletomatodonkey's avatar
littletomatodonkey 已提交
608
                x = _drop_connect(x, self.drop_connect_rate, not self.training)
609
            x = paddle.add(x, inputs)
610 611 612
        return x


littletomatodonkey's avatar
littletomatodonkey 已提交
613
class ConvStemNorm(nn.Layer):
614 615 616 617 618 619 620
    def __init__(self,
                 input_channels,
                 padding_type,
                 _global_params,
                 name=None,
                 model_name=None,
                 cur_stage=None):
W
fix  
wqz960 已提交
621
        super(ConvStemNorm, self).__init__()
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640

        output_channels = round_filters(32, _global_params)
        self._conv = ConvBNLayer(
            input_channels,
            filter_size=3,
            output_channels=output_channels,
            stride=2,
            bn_act=None,
            padding_type=padding_type,
            name="",
            conv_name="_conv_stem",
            bn_name="_bn0",
            model_name=model_name,
            cur_stage=cur_stage)

    def forward(self, inputs):
        return self._conv(inputs)


littletomatodonkey's avatar
littletomatodonkey 已提交
641
class ExtractFeatures(nn.Layer):
642 643 644 645 646 647 648
    def __init__(self,
                 input_channels,
                 _block_args,
                 _global_params,
                 padding_type,
                 use_se,
                 model_name=None):
W
fix  
wqz960 已提交
649
        super(ExtractFeatures, self).__init__()
650 651 652

        self._global_params = _global_params

W
fix  
wqz960 已提交
653
        self._conv_stem = ConvStemNorm(
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
            input_channels,
            padding_type=padding_type,
            _global_params=_global_params,
            model_name=model_name,
            cur_stage=0)

        self.block_args_copy = copy.deepcopy(_block_args)
        idx = 0
        block_size = 0
        for block_arg in self.block_args_copy:
            block_arg = block_arg._replace(
                input_filters=round_filters(block_arg.input_filters,
                                            _global_params),
                output_filters=round_filters(block_arg.output_filters,
                                             _global_params),
                num_repeat=round_repeats(block_arg.num_repeat, _global_params))
            block_size += 1
            for _ in range(block_arg.num_repeat - 1):
                block_size += 1

        self.conv_seq = []
        cur_stage = 1
        for block_args in _block_args:
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters,
                                            _global_params),
                output_filters=round_filters(block_args.output_filters,
                                             _global_params),
                num_repeat=round_repeats(block_args.num_repeat,
                                         _global_params))

littletomatodonkey's avatar
littletomatodonkey 已提交
685
            drop_connect_rate = self._global_params.drop_connect_rate
686 687 688 689 690
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / block_size

            _mc_block = self.add_sublayer(
                "_blocks." + str(idx) + ".",
W
fix  
wqz960 已提交
691
                MbConvBlock(
692 693 694 695 696 697 698 699 700 701 702 703 704 705
                    block_args.input_filters,
                    block_args=block_args,
                    padding_type=padding_type,
                    use_se=use_se,
                    name="_blocks." + str(idx) + ".",
                    drop_connect_rate=drop_connect_rate,
                    model_name=model_name,
                    cur_stage=cur_stage))
            self.conv_seq.append(_mc_block)
            idx += 1
            if block_args.num_repeat > 1:
                block_args = block_args._replace(
                    input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
littletomatodonkey's avatar
littletomatodonkey 已提交
706
                drop_connect_rate = self._global_params.drop_connect_rate
707 708 709 710
                if drop_connect_rate:
                    drop_connect_rate *= float(idx) / block_size
                _mc_block = self.add_sublayer(
                    "block." + str(idx) + ".",
W
fix  
wqz960 已提交
711
                    MbConvBlock(
712 713 714 715 716 717 718 719 720 721 722 723 724 725
                        block_args.input_filters,
                        block_args,
                        padding_type=padding_type,
                        use_se=use_se,
                        name="_blocks." + str(idx) + ".",
                        drop_connect_rate=drop_connect_rate,
                        model_name=model_name,
                        cur_stage=cur_stage))
                self.conv_seq.append(_mc_block)
                idx += 1
            cur_stage += 1

    def forward(self, inputs):
        x = self._conv_stem(inputs)
littletomatodonkey's avatar
littletomatodonkey 已提交
726
        x = F.swish(x)
727 728 729 730 731
        for _mc_block in self.conv_seq:
            x = _mc_block(x)
        return x


littletomatodonkey's avatar
littletomatodonkey 已提交
732
class EfficientNet(nn.Layer):
733 734 735 736 737
    def __init__(self,
                 name="b0",
                 padding_type="SAME",
                 override_params=None,
                 use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
738
                 class_num=1000):
739 740 741 742 743 744 745 746 747
        super(EfficientNet, self).__init__()

        model_name = 'efficientnet-' + name
        self.name = name
        self._block_args, self._global_params = get_model_params(
            model_name, override_params)
        self.padding_type = padding_type
        self.use_se = use_se

W
fix  
wqz960 已提交
748
        self._ef = ExtractFeatures(
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
            3,
            self._block_args,
            self._global_params,
            self.padding_type,
            self.use_se,
            model_name=self.name)

        output_channels = round_filters(1280, self._global_params)
        if name == "b0_small" or name == "b0" or name == "b1":
            oup = 320
        elif name == "b2":
            oup = 352
        elif name == "b3":
            oup = 384
        elif name == "b4":
            oup = 448
        elif name == "b5":
            oup = 512
        elif name == "b6":
            oup = 576
        elif name == "b7":
            oup = 640
        self._conv = ConvBNLayer(
            oup,
            1,
            output_channels,
            bn_act="swish",
            padding_type=self.padding_type,
            name="",
            conv_name="_conv_head",
            bn_name="_bn1",
            model_name=self.name,
            cur_stage=7)
782
        self._pool = AdaptiveAvgPool2D(1)
783 784 785

        if self._global_params.dropout_rate:
            self._drop = Dropout(
littletomatodonkey's avatar
littletomatodonkey 已提交
786
                p=self._global_params.dropout_rate, mode="upscale_in_train")
787 788 789 790

        param_attr, bias_attr = init_fc_layer("_fc")
        self._fc = Linear(
            output_channels,
littletomatodonkey's avatar
littletomatodonkey 已提交
791
            class_num,
littletomatodonkey's avatar
littletomatodonkey 已提交
792
            weight_attr=param_attr,
793 794 795 796 797 798 799 800
            bias_attr=bias_attr)

    def forward(self, inputs):
        x = self._ef(inputs)
        x = self._conv(x)
        x = self._pool(x)
        if self._global_params.dropout_rate:
            x = self._drop(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
801
        x = paddle.squeeze(x, axis=[2, 3])
802 803 804
        x = self._fc(x)
        return x

littletomatodonkey's avatar
littletomatodonkey 已提交
805

C
cuicheng01 已提交
806 807 808 809 810 811 812 813 814 815
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
    if pretrained is False:
        pass
    elif pretrained is True:
        load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
    elif isinstance(pretrained, str):
        load_dygraph_pretrain(model, pretrained)
    else:
        raise RuntimeError(
            "pretrained type is not available. Please use `string` or `boolean` type."
littletomatodonkey's avatar
littletomatodonkey 已提交
816
        )
C
cuicheng01 已提交
817

818

littletomatodonkey's avatar
littletomatodonkey 已提交
819
def EfficientNetB0_small(padding_type='DYNAMIC',
820
                         override_params=None,
W
wqz960 已提交
821
                         use_se=False,
littletomatodonkey's avatar
littletomatodonkey 已提交
822 823
                         pretrained=False,
                         use_ssld=False,
C
cuicheng01 已提交
824
                         **kwargs):
W
WuHaobo 已提交
825 826 827 828
    model = EfficientNet(
        name='b0',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
829
        use_se=use_se,
C
cuicheng01 已提交
830 831
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB0_small"])
W
WuHaobo 已提交
832 833 834
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
835
def EfficientNetB0(padding_type='SAME',
836
                   override_params=None,
W
wqz960 已提交
837
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
838 839
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
840
                   **kwargs):
littletomatodonkey's avatar
littletomatodonkey 已提交
841 842 843 844
    model = EfficientNet(
        name='b0',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
845
        use_se=use_se,
C
cuicheng01 已提交
846 847
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB0"])
littletomatodonkey's avatar
littletomatodonkey 已提交
848 849 850
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
851
def EfficientNetB1(padding_type='SAME',
W
WuHaobo 已提交
852
                   override_params=None,
W
wqz960 已提交
853
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
854 855
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
856
                   **kwargs):
W
WuHaobo 已提交
857 858 859 860
    model = EfficientNet(
        name='b1',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
861
        use_se=use_se,
C
cuicheng01 已提交
862 863
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB1"])
W
WuHaobo 已提交
864 865 866
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
867
def EfficientNetB2(padding_type='SAME',
W
WuHaobo 已提交
868
                   override_params=None,
W
wqz960 已提交
869
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
870 871
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
872
                   **kwargs):
W
WuHaobo 已提交
873 874 875 876
    model = EfficientNet(
        name='b2',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
877
        use_se=use_se,
C
cuicheng01 已提交
878 879
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB2"])
W
WuHaobo 已提交
880 881 882
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
883
def EfficientNetB3(padding_type='SAME',
W
WuHaobo 已提交
884
                   override_params=None,
W
wqz960 已提交
885
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
886 887
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
888
                   **kwargs):
W
WuHaobo 已提交
889 890 891 892
    model = EfficientNet(
        name='b3',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
893
        use_se=use_se,
C
cuicheng01 已提交
894 895
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB3"])
W
WuHaobo 已提交
896 897 898
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
899
def EfficientNetB4(padding_type='SAME',
W
WuHaobo 已提交
900
                   override_params=None,
W
wqz960 已提交
901
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
902 903
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
904
                   **kwargs):
W
WuHaobo 已提交
905 906 907 908
    model = EfficientNet(
        name='b4',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
909
        use_se=use_se,
C
cuicheng01 已提交
910 911
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB4"])
W
WuHaobo 已提交
912 913 914
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
915
def EfficientNetB5(padding_type='SAME',
W
WuHaobo 已提交
916
                   override_params=None,
W
wqz960 已提交
917
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
918 919
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
920
                   **kwargs):
W
WuHaobo 已提交
921 922 923 924
    model = EfficientNet(
        name='b5',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
925
        use_se=use_se,
C
cuicheng01 已提交
926 927
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB5"])
W
WuHaobo 已提交
928 929 930
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
931
def EfficientNetB6(padding_type='SAME',
W
WuHaobo 已提交
932
                   override_params=None,
W
wqz960 已提交
933
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
934 935
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
936
                   **kwargs):
W
WuHaobo 已提交
937 938 939 940
    model = EfficientNet(
        name='b6',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
941
        use_se=use_se,
C
cuicheng01 已提交
942 943
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB6"])
W
WuHaobo 已提交
944 945 946
    return model


littletomatodonkey's avatar
littletomatodonkey 已提交
947
def EfficientNetB7(padding_type='SAME',
W
WuHaobo 已提交
948
                   override_params=None,
W
wqz960 已提交
949
                   use_se=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
950 951
                   pretrained=False,
                   use_ssld=False,
C
cuicheng01 已提交
952
                   **kwargs):
W
WuHaobo 已提交
953 954 955 956
    model = EfficientNet(
        name='b7',
        padding_type=padding_type,
        override_params=override_params,
W
wqz960 已提交
957
        use_se=use_se,
C
cuicheng01 已提交
958 959
        **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["EfficientNetB7"])
littletomatodonkey's avatar
littletomatodonkey 已提交
960
    return model