hrnet.py 15.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from collections import OrderedDict

from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.framework import Variable
from paddle.fluid.regularizer import L2Decay

from ppdet.core.workspace import register, serializable
from numbers import Integral
from paddle.fluid.initializer import MSRA
import math

from .name_adapter import NameAdapter

__all__ = ['HRNet']


@register
@serializable
class HRNet(object):
    """
    HRNet, see https://arxiv.org/abs/1908.07919
    Args:
G
Guanghua Yu 已提交
42 43
        width (int): network width, should be 18, 30, 32, 40, 44, 48, 60 or 64
        has_se (bool): whether contain squeeze_excitation(SE) block or not
44
        freeze_at (int): freeze the backbone at which stage
G
Guanghua Yu 已提交
45
        norm_type (str): normalization type, 'bn'/'sync_bn'
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
        freeze_norm (bool): freeze normalization layers
        norm_decay (float): weight decay for normalization layer weights
        feature_maps (list): index of stages whose feature maps are returned
    """

    def __init__(self,
                 width=40,
                 has_se=False,
                 freeze_at=2,
                 norm_type='bn',
                 freeze_norm=True,
                 norm_decay=0.,
                 feature_maps=[2, 3, 4, 5]):
        super(HRNet, self).__init__()

        if isinstance(feature_maps, Integral):
            feature_maps = [feature_maps]

        assert 0 <= freeze_at <= 4, "freeze_at should be 0, 1, 2, 3 or 4"
        assert len(feature_maps) > 0, "need one or more feature maps"
        assert norm_type in ['bn', 'sync_bn']
67

68 69 70 71 72 73 74 75 76 77 78
        self.width = width
        self.has_se = has_se
        self.channels = {
            18: [[18, 36], [18, 36, 72], [18, 36, 72, 144]],
            30: [[30, 60], [30, 60, 120], [30, 60, 120, 240]],
            32: [[32, 64], [32, 64, 128], [32, 64, 128, 256]],
            40: [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
            44: [[44, 88], [44, 88, 176], [44, 88, 176, 352]],
            48: [[48, 96], [48, 96, 192], [48, 96, 192, 384]],
            60: [[60, 120], [60, 120, 240], [60, 120, 240, 480]],
            64: [[64, 128], [64, 128, 256], [64, 128, 256, 512]],
79
        }
80 81 82 83 84 85 86 87 88

        self.freeze_at = freeze_at
        self.norm_type = norm_type
        self.norm_decay = norm_decay
        self.freeze_norm = freeze_norm
        self._model_type = 'HRNet'
        self.feature_maps = feature_maps
        self.end_points = []
        return
89

90 91
    def net(self, input, class_dim=1000):
        width = self.width
92
        channels_2, channels_3, channels_4 = self.channels[width]
93
        num_modules_2, num_modules_3, num_modules_4 = 1, 4, 3
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108

        x = self.conv_bn_layer(
            input=input,
            filter_size=3,
            num_filters=64,
            stride=2,
            if_act=True,
            name='layer1_1')
        x = self.conv_bn_layer(
            input=x,
            filter_size=3,
            num_filters=64,
            stride=2,
            if_act=True,
            name='layer1_2')
109 110 111 112 113 114 115 116

        la1 = self.layer1(x, name='layer2')
        tr1 = self.transition_layer([la1], [256], channels_2, name='tr1')
        st2 = self.stage(tr1, num_modules_2, channels_2, name='st2')
        tr2 = self.transition_layer(st2, channels_2, channels_3, name='tr2')
        st3 = self.stage(tr2, num_modules_3, channels_3, name='st3')
        tr3 = self.transition_layer(st3, channels_3, channels_4, name='tr3')
        st4 = self.stage(tr3, num_modules_4, channels_4, name='st4')
117

118 119
        self.end_points = st4
        return st4[-1]
120

121 122 123
    def layer1(self, input, name=None):
        conv = input
        for i in range(4):
124 125 126 127 128
            conv = self.bottleneck_block(
                conv,
                num_filters=64,
                downsample=True if i == 0 else False,
                name=name + '_' + str(i + 1))
129
        return conv
130

131 132 133 134 135 136 137
    def transition_layer(self, x, in_channels, out_channels, name=None):
        num_in = len(in_channels)
        num_out = len(out_channels)
        out = []
        for i in range(num_out):
            if i < num_in:
                if in_channels[i] != out_channels[i]:
138 139 140 141 142
                    residual = self.conv_bn_layer(
                        x[i],
                        filter_size=3,
                        num_filters=out_channels[i],
                        name=name + '_layer_' + str(i + 1))
143 144 145 146
                    out.append(residual)
                else:
                    out.append(x[i])
            else:
147 148 149 150 151 152
                residual = self.conv_bn_layer(
                    x[-1],
                    filter_size=3,
                    num_filters=out_channels[i],
                    stride=2,
                    name=name + '_layer_' + str(i + 1))
153 154 155 156 157 158 159 160
                out.append(residual)
        return out

    def branches(self, x, block_num, channels, name=None):
        out = []
        for i in range(len(channels)):
            residual = x[i]
            for j in range(block_num):
161 162 163 164 165
                residual = self.basic_block(
                    residual,
                    channels[i],
                    name=name + '_branch_layer_' + str(i + 1) + '_' +
                    str(j + 1))
166 167 168 169 170 171 172 173 174
            out.append(residual)
        return out

    def fuse_layers(self, x, channels, multi_scale_output=True, name=None):
        out = []
        for i in range(len(channels) if multi_scale_output else 1):
            residual = x[i]
            for j in range(len(channels)):
                if j > i:
175 176 177 178 179 180 181
                    y = self.conv_bn_layer(
                        x[j],
                        filter_size=1,
                        num_filters=channels[i],
                        if_act=False,
                        name=name + '_layer_' + str(i + 1) + '_' + str(j + 1))
                    y = fluid.layers.resize_nearest(input=y, scale=2**(j - i))
182 183 184 185 186 187
                    residual = fluid.layers.elementwise_add(
                        x=residual, y=y, act=None)
                elif j < i:
                    y = x[j]
                    for k in range(i - j):
                        if k == i - j - 1:
188 189 190 191 192 193 194 195
                            y = self.conv_bn_layer(
                                y,
                                filter_size=3,
                                num_filters=channels[i],
                                stride=2,
                                if_act=False,
                                name=name + '_layer_' + str(i + 1) + '_' +
                                str(j + 1) + '_' + str(k + 1))
196
                        else:
197 198 199 200 201 202 203
                            y = self.conv_bn_layer(
                                y,
                                filter_size=3,
                                num_filters=channels[j],
                                stride=2,
                                name=name + '_layer_' + str(i + 1) + '_' +
                                str(j + 1) + '_' + str(k + 1))
204
                    residual = fluid.layers.elementwise_add(
205
                        x=residual, y=y, act=None)
206 207 208 209

            residual = fluid.layers.relu(residual)
            out.append(residual)
        return out
210 211 212 213 214 215

    def high_resolution_module(self,
                               x,
                               channels,
                               multi_scale_output=True,
                               name=None):
216
        residual = self.branches(x, 4, channels, name=name)
217 218 219 220 221
        out = self.fuse_layers(
            residual,
            channels,
            multi_scale_output=multi_scale_output,
            name=name)
222
        return out
223 224 225 226 227 228 229

    def stage(self,
              x,
              num_modules,
              channels,
              multi_scale_output=True,
              name=None):
230 231 232
        out = x
        for i in range(num_modules):
            if i == num_modules - 1 and multi_scale_output == False:
233 234 235 236 237
                out = self.high_resolution_module(
                    out,
                    channels,
                    multi_scale_output=False,
                    name=name + '_' + str(i + 1))
238
            else:
239 240
                out = self.high_resolution_module(
                    out, channels, name=name + '_' + str(i + 1))
241 242

        return out
243

244 245 246 247
    def last_cls_out(self, x, name=None):
        out = []
        num_filters_list = [128, 256, 512, 1024]
        for i in range(len(x)):
248 249 250 251 252 253
            out.append(
                self.conv_bn_layer(
                    input=x[i],
                    filter_size=1,
                    num_filters=num_filters_list[i],
                    name=name + 'conv_' + str(i + 1)))
254 255
        return out

256 257 258 259 260 261
    def basic_block(self,
                    input,
                    num_filters,
                    stride=1,
                    downsample=False,
                    name=None):
262
        residual = input
263 264 265 266 267 268 269 270 271 272 273 274
        conv = self.conv_bn_layer(
            input=input,
            filter_size=3,
            num_filters=num_filters,
            stride=stride,
            name=name + '_conv1')
        conv = self.conv_bn_layer(
            input=conv,
            filter_size=3,
            num_filters=num_filters,
            if_act=False,
            name=name + '_conv2')
275
        if downsample:
276 277 278 279 280 281
            residual = self.conv_bn_layer(
                input=input,
                filter_size=1,
                num_filters=num_filters,
                if_act=False,
                name=name + '_downsample')
282 283 284 285 286
        if self.has_se:
            conv = self.squeeze_excitation(
                input=conv,
                num_channels=num_filters,
                reduction_ratio=16,
287
                name='fc' + name)
288 289
        return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')

290 291 292 293 294 295
    def bottleneck_block(self,
                         input,
                         num_filters,
                         stride=1,
                         downsample=False,
                         name=None):
296
        residual = input
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
        conv = self.conv_bn_layer(
            input=input,
            filter_size=1,
            num_filters=num_filters,
            name=name + '_conv1')
        conv = self.conv_bn_layer(
            input=conv,
            filter_size=3,
            num_filters=num_filters,
            stride=stride,
            name=name + '_conv2')
        conv = self.conv_bn_layer(
            input=conv,
            filter_size=1,
            num_filters=num_filters * 4,
            if_act=False,
            name=name + '_conv3')
314
        if downsample:
315 316 317 318 319 320
            residual = self.conv_bn_layer(
                input=input,
                filter_size=1,
                num_filters=num_filters * 4,
                if_act=False,
                name=name + '_downsample')
321 322 323 324 325
        if self.has_se:
            conv = self.squeeze_excitation(
                input=conv,
                num_channels=num_filters * 4,
                reduction_ratio=16,
326
                name='fc' + name)
327
        return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')
328 329 330 331 332 333

    def squeeze_excitation(self,
                           input,
                           num_channels,
                           reduction_ratio,
                           name=None):
334 335 336
        pool = fluid.layers.pool2d(
            input=input, pool_size=0, pool_type='avg', global_pooling=True)
        stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
337 338 339 340 341 342 343 344
        squeeze = fluid.layers.fc(
            input=pool,
            size=num_channels / reduction_ratio,
            act='relu',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv),
                name=name + '_sqz_weights'),
            bias_attr=ParamAttr(name=name + '_sqz_offset'))
345
        stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
346 347 348 349 350 351 352 353
        excitation = fluid.layers.fc(
            input=squeeze,
            size=num_channels,
            act='sigmoid',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv),
                name=name + '_exc_weights'),
            bias_attr=ParamAttr(name=name + '_exc_offset'))
354 355
        scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
        return scale
356 357 358 359 360 361 362 363 364 365

    def conv_bn_layer(self,
                      input,
                      filter_size,
                      num_filters,
                      stride=1,
                      padding=1,
                      num_groups=1,
                      if_act=True,
                      name=None):
366 367 368 369 370
        conv = fluid.layers.conv2d(
            input=input,
            num_filters=num_filters,
            filter_size=filter_size,
            stride=stride,
371
            padding=(filter_size - 1) // 2,
372 373
            groups=num_groups,
            act=None,
374 375
            param_attr=ParamAttr(
                initializer=MSRA(), name=name + '_weights'),
376 377
            bias_attr=False)
        bn_name = name + '_bn'
378
        bn = self._bn(input=conv, bn_name=bn_name)
379 380 381
        if if_act:
            bn = fluid.layers.relu(bn)
        return bn
382 383

    def _bn(self, input, act=None, bn_name=None):
384 385 386 387 388 389 390 391 392 393
        norm_lr = 0. if self.freeze_norm else 1.
        norm_decay = self.norm_decay
        pattr = ParamAttr(
            name=bn_name + '_scale',
            learning_rate=norm_lr,
            regularizer=L2Decay(norm_decay))
        battr = ParamAttr(
            name=bn_name + '_offset',
            learning_rate=norm_lr,
            regularizer=L2Decay(norm_decay))
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
        global_stats = True if self.freeze_norm else False
        out = fluid.layers.batch_norm(
            input=input,
            act=act,
            name=bn_name + '.output.1',
            param_attr=pattr,
            bias_attr=battr,
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance',
            use_global_stats=global_stats)
        scale = fluid.framework._get_var(pattr.name)
        bias = fluid.framework._get_var(battr.name)
        if self.freeze_norm:
            scale.stop_gradient = True
            bias.stop_gradient = True
        return out
411

412 413 414 415 416 417 418 419 420
    def __call__(self, input):
        assert isinstance(input, Variable)
        assert not (set(self.feature_maps) - set([2, 3, 4, 5])), \
            "feature maps {} not in [2, 3, 4, 5]".format(self.feature_maps)

        res_endpoints = []

        res = input
        feature_maps = self.feature_maps
421
        self.net(input)
422 423

        for i in feature_maps:
424
            res = self.end_points[i - 2]
425 426 427 428
            if i in self.feature_maps:
                res_endpoints.append(res)
            if self.freeze_at >= i:
                res.stop_gradient = True
429

430 431
        return OrderedDict([('res{}_sum'.format(self.feature_maps[idx]), feat)
                            for idx, feat in enumerate(res_endpoints)])