ttf_head.py 11.9 KB
Newer Older
F
Feng Ni 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Uniform, Normal
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register
W
wangguanzhong 已提交
22
from ppdet.modeling.layers import DeformableConvV2, LiteConv
F
Feng Ni 已提交
23 24 25 26 27
import numpy as np


@register
class HMHead(nn.Layer):
F
Feng Ni 已提交
28 29 30 31 32 33
    """
    Args:
        ch_in (int): The channel number of input Tensor.
        ch_out (int): The channel number of output Tensor.
        num_classes (int): Number of classes.
        conv_num (int): The convolution number of hm_feat.
W
wangguanzhong 已提交
34 35 36 37 38
        dcn_head(bool): whether use dcn in head. False by default. 
        lite_head(bool): whether use lite version. False by default.
        norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
            bn by default

F
Feng Ni 已提交
39 40 41
    Return:
        Heatmap head output
    """
W
wangguanzhong 已提交
42
    __shared__ = ['num_classes', 'norm_type']
F
Feng Ni 已提交
43

W
wangguanzhong 已提交
44 45 46 47 48 49 50 51 52
    def __init__(
            self,
            ch_in,
            ch_out=128,
            num_classes=80,
            conv_num=2,
            dcn_head=False,
            lite_head=False,
            norm_type='bn', ):
F
Feng Ni 已提交
53 54 55 56
        super(HMHead, self).__init__()
        head_conv = nn.Sequential()
        for i in range(conv_num):
            name = 'conv.{}'.format(i)
W
wangguanzhong 已提交
57 58 59 60 61 62 63
            if lite_head:
                lite_name = 'hm.' + name
                head_conv.add_sublayer(
                    lite_name,
                    LiteConv(
                        in_channels=ch_in if i == 0 else ch_out,
                        out_channels=ch_out,
64
                        norm_type=norm_type))
W
wangguanzhong 已提交
65 66 67 68 69 70 71 72 73
                head_conv.add_sublayer(lite_name + '.act', nn.ReLU6())
            else:
                if dcn_head:
                    head_conv.add_sublayer(
                        name,
                        DeformableConvV2(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
W
wangguanzhong 已提交
74
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
W
wangguanzhong 已提交
75 76 77 78 79 80 81 82 83 84 85 86
                else:
                    head_conv.add_sublayer(
                        name,
                        nn.Conv2D(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
                            bias_attr=ParamAttr(
                                learning_rate=2., regularizer=L2Decay(0.))))
                head_conv.add_sublayer(name + '.act', nn.ReLU())
87
        self.feat = head_conv
F
Feng Ni 已提交
88
        bias_init = float(-np.log((1 - 0.01) / 0.01))
W
wangguanzhong 已提交
89 90
        weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
                                                                          0.01))
91 92 93 94
        self.head = nn.Conv2D(
            in_channels=ch_out,
            out_channels=num_classes,
            kernel_size=1,
W
wangguanzhong 已提交
95
            weight_attr=weight_attr,
96 97 98 99
            bias_attr=ParamAttr(
                learning_rate=2.,
                regularizer=L2Decay(0.),
                initializer=Constant(bias_init)))
F
Feng Ni 已提交
100 101 102 103 104 105 106 107 108

    def forward(self, feat):
        out = self.feat(feat)
        out = self.head(out)
        return out


@register
class WHHead(nn.Layer):
F
Feng Ni 已提交
109 110 111 112 113
    """
    Args:
        ch_in (int): The channel number of input Tensor.
        ch_out (int): The channel number of output Tensor.
        conv_num (int): The convolution number of wh_feat.
W
wangguanzhong 已提交
114 115 116 117
        dcn_head(bool): whether use dcn in head. False by default.
        lite_head(bool): whether use lite version. False by default.
        norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
            bn by default
F
Feng Ni 已提交
118 119 120
    Return:
        Width & Height head output
    """
W
wangguanzhong 已提交
121
    __shared__ = ['norm_type']
F
Feng Ni 已提交
122

W
wangguanzhong 已提交
123 124 125 126 127 128 129
    def __init__(self,
                 ch_in,
                 ch_out=64,
                 conv_num=2,
                 dcn_head=False,
                 lite_head=False,
                 norm_type='bn'):
F
Feng Ni 已提交
130 131 132 133
        super(WHHead, self).__init__()
        head_conv = nn.Sequential()
        for i in range(conv_num):
            name = 'conv.{}'.format(i)
W
wangguanzhong 已提交
134 135 136 137 138 139 140
            if lite_head:
                lite_name = 'wh.' + name
                head_conv.add_sublayer(
                    lite_name,
                    LiteConv(
                        in_channels=ch_in if i == 0 else ch_out,
                        out_channels=ch_out,
141
                        norm_type=norm_type))
W
wangguanzhong 已提交
142 143 144 145 146 147 148 149 150
                head_conv.add_sublayer(lite_name + '.act', nn.ReLU6())
            else:
                if dcn_head:
                    head_conv.add_sublayer(
                        name,
                        DeformableConvV2(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
W
wangguanzhong 已提交
151
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
W
wangguanzhong 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164
                else:
                    head_conv.add_sublayer(
                        name,
                        nn.Conv2D(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
                            bias_attr=ParamAttr(
                                learning_rate=2., regularizer=L2Decay(0.))))
                head_conv.add_sublayer(name + '.act', nn.ReLU())

W
wangguanzhong 已提交
165 166
        weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
                                                                          0.01))
167 168 169 170 171
        self.feat = head_conv
        self.head = nn.Conv2D(
            in_channels=ch_out,
            out_channels=4,
            kernel_size=1,
W
wangguanzhong 已提交
172
            weight_attr=weight_attr,
173 174
            bias_attr=ParamAttr(
                learning_rate=2., regularizer=L2Decay(0.)))
F
Feng Ni 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187

    def forward(self, feat):
        out = self.feat(feat)
        out = self.head(out)
        out = F.relu(out)
        return out


@register
class TTFHead(nn.Layer):
    """
    TTFHead
    Args:
F
Feng Ni 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
        in_channels (int): the channel number of input to TTFHead.
        num_classes (int): the number of classes, 80 by default.
        hm_head_planes (int): the channel number in heatmap head,
            128 by default.
        wh_head_planes (int): the channel number in width & height head,
            64 by default.
        hm_head_conv_num (int): the number of convolution in heatmap head,
            2 by default.
        wh_head_conv_num (int): the number of convolution in width & height
            head, 2 by default.
        hm_loss (object): Instance of 'CTFocalLoss'.
        wh_loss (object): Instance of 'GIoULoss'.
        wh_offset_base (float): the base offset of width and height,
            16.0 by default.
        down_ratio (int): the actual down_ratio is calculated by base_down_ratio
            (default 16) and the number of upsample layers.
W
wangguanzhong 已提交
204 205 206
        lite_head(bool): whether use lite version. False by default.
        norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
            bn by default
W
wangguanzhong 已提交
207 208 209
        ags_module(bool): whether use AGS module to reweight location feature.
            false by default.

F
Feng Ni 已提交
210 211
    """

W
wangguanzhong 已提交
212
    __shared__ = ['num_classes', 'down_ratio', 'norm_type']
213
    __inject__ = ['hm_loss', 'wh_loss']
F
Feng Ni 已提交
214 215

    def __init__(self,
216 217 218 219 220 221
                 in_channels,
                 num_classes=80,
                 hm_head_planes=128,
                 wh_head_planes=64,
                 hm_head_conv_num=2,
                 wh_head_conv_num=2,
F
Feng Ni 已提交
222 223 224
                 hm_loss='CTFocalLoss',
                 wh_loss='GIoULoss',
                 wh_offset_base=16.,
W
wangguanzhong 已提交
225 226 227
                 down_ratio=4,
                 dcn_head=False,
                 lite_head=False,
W
wangguanzhong 已提交
228 229
                 norm_type='bn',
                 ags_module=False):
F
Feng Ni 已提交
230
        super(TTFHead, self).__init__()
231 232
        self.in_channels = in_channels
        self.hm_head = HMHead(in_channels, hm_head_planes, num_classes,
W
wangguanzhong 已提交
233 234 235
                              hm_head_conv_num, dcn_head, lite_head, norm_type)
        self.wh_head = WHHead(in_channels, wh_head_planes, wh_head_conv_num,
                              dcn_head, lite_head, norm_type)
F
Feng Ni 已提交
236 237 238 239 240
        self.hm_loss = hm_loss
        self.wh_loss = wh_loss

        self.wh_offset_base = wh_offset_base
        self.down_ratio = down_ratio
W
wangguanzhong 已提交
241
        self.ags_module = ags_module
F
Feng Ni 已提交
242

243 244 245 246 247 248
    @classmethod
    def from_config(cls, cfg, input_shape):
        if isinstance(input_shape, (list, tuple)):
            input_shape = input_shape[0]
        return {'in_channels': input_shape.channels, }

F
Feng Ni 已提交
249 250 251 252 253 254
    def forward(self, feats):
        hm = self.hm_head(feats)
        wh = self.wh_head(feats) * self.wh_offset_base
        return hm, wh

    def filter_box_by_weight(self, pred, target, weight):
F
Feng Ni 已提交
255 256 257
        """
        Filter out boxes where ttf_reg_weight is 0, only keep positive samples.
        """
F
Feng Ni 已提交
258 259 260 261 262 263 264
        index = paddle.nonzero(weight > 0)
        index.stop_gradient = True
        weight = paddle.gather_nd(weight, index)
        pred = paddle.gather_nd(pred, index)
        target = paddle.gather_nd(target, index)
        return pred, target, weight

W
wangguanzhong 已提交
265 266 267 268 269 270
    def filter_loc_by_weight(self, score, weight):
        index = paddle.nonzero(weight > 0)
        index.stop_gradient = True
        score = paddle.gather_nd(score, index)
        return score

F
Feng Ni 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
    def get_loss(self, pred_hm, pred_wh, target_hm, box_target, target_weight):
        pred_hm = paddle.clip(F.sigmoid(pred_hm), 1e-4, 1 - 1e-4)
        hm_loss = self.hm_loss(pred_hm, target_hm)
        H, W = target_hm.shape[2:]
        mask = paddle.reshape(target_weight, [-1, H, W])
        avg_factor = paddle.sum(mask) + 1e-4

        base_step = self.down_ratio
        shifts_x = paddle.arange(0, W * base_step, base_step, dtype='int32')
        shifts_y = paddle.arange(0, H * base_step, base_step, dtype='int32')
        shift_y, shift_x = paddle.tensor.meshgrid([shifts_y, shifts_x])
        base_loc = paddle.stack([shift_x, shift_y], axis=0)
        base_loc.stop_gradient = True

        pred_boxes = paddle.concat(
            [0 - pred_wh[:, 0:2, :, :] + base_loc, pred_wh[:, 2:4] + base_loc],
            axis=1)
        pred_boxes = paddle.transpose(pred_boxes, [0, 2, 3, 1])
        boxes = paddle.transpose(box_target, [0, 2, 3, 1])
        boxes.stop_gradient = True

W
wangguanzhong 已提交
292 293 294 295 296 297 298 299 300 301
        if self.ags_module:
            pred_hm_max = paddle.max(pred_hm, axis=1, keepdim=True)
            pred_hm_max_softmax = F.softmax(pred_hm_max, axis=1)
            pred_hm_max_softmax = paddle.transpose(pred_hm_max_softmax,
                                                   [0, 2, 3, 1])
            pred_hm_max_softmax = self.filter_loc_by_weight(pred_hm_max_softmax,
                                                            mask)
        else:
            pred_hm_max_softmax = None

F
Feng Ni 已提交
302 303 304
        pred_boxes, boxes, mask = self.filter_box_by_weight(pred_boxes, boxes,
                                                            mask)
        mask.stop_gradient = True
W
wangguanzhong 已提交
305 306 307 308 309
        wh_loss = self.wh_loss(
            pred_boxes,
            boxes,
            iou_weight=mask.unsqueeze(1),
            loc_reweight=pred_hm_max_softmax)
F
Feng Ni 已提交
310 311 312 313
        wh_loss = wh_loss / avg_factor

        ttf_loss = {'hm_loss': hm_loss, 'wh_loss': wh_loss}
        return ttf_loss