ttf_head.py 11.8 KB
Newer Older
F
Feng Ni 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Uniform, Normal
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register
W
wangguanzhong 已提交
22
from ppdet.modeling.layers import DeformableConvV2, LiteConv
F
Feng Ni 已提交
23 24 25 26 27
import numpy as np


@register
class HMHead(nn.Layer):
F
Feng Ni 已提交
28 29 30 31 32 33
    """
    Args:
        ch_in (int): The channel number of input Tensor.
        ch_out (int): The channel number of output Tensor.
        num_classes (int): Number of classes.
        conv_num (int): The convolution number of hm_feat.
W
wangguanzhong 已提交
34 35 36 37 38
        dcn_head(bool): whether use dcn in head. False by default. 
        lite_head(bool): whether use lite version. False by default.
        norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
            bn by default

F
Feng Ni 已提交
39 40 41
    Return:
        Heatmap head output
    """
W
wangguanzhong 已提交
42
    __shared__ = ['num_classes', 'norm_type']
F
Feng Ni 已提交
43

W
wangguanzhong 已提交
44 45 46 47 48 49 50 51 52
    def __init__(
            self,
            ch_in,
            ch_out=128,
            num_classes=80,
            conv_num=2,
            dcn_head=False,
            lite_head=False,
            norm_type='bn', ):
F
Feng Ni 已提交
53 54 55 56
        super(HMHead, self).__init__()
        head_conv = nn.Sequential()
        for i in range(conv_num):
            name = 'conv.{}'.format(i)
W
wangguanzhong 已提交
57 58 59 60 61 62 63
            if lite_head:
                lite_name = 'hm.' + name
                head_conv.add_sublayer(
                    lite_name,
                    LiteConv(
                        in_channels=ch_in if i == 0 else ch_out,
                        out_channels=ch_out,
64
                        norm_type=norm_type))
W
wangguanzhong 已提交
65 66 67 68 69 70 71 72
            else:
                if dcn_head:
                    head_conv.add_sublayer(
                        name,
                        DeformableConvV2(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
W
wangguanzhong 已提交
73
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
W
wangguanzhong 已提交
74 75 76 77 78 79 80 81 82 83 84 85
                else:
                    head_conv.add_sublayer(
                        name,
                        nn.Conv2D(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
                            bias_attr=ParamAttr(
                                learning_rate=2., regularizer=L2Decay(0.))))
                head_conv.add_sublayer(name + '.act', nn.ReLU())
86
        self.feat = head_conv
F
Feng Ni 已提交
87
        bias_init = float(-np.log((1 - 0.01) / 0.01))
W
wangguanzhong 已提交
88 89
        weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
                                                                          0.01))
90 91 92 93
        self.head = nn.Conv2D(
            in_channels=ch_out,
            out_channels=num_classes,
            kernel_size=1,
W
wangguanzhong 已提交
94
            weight_attr=weight_attr,
95 96 97 98
            bias_attr=ParamAttr(
                learning_rate=2.,
                regularizer=L2Decay(0.),
                initializer=Constant(bias_init)))
F
Feng Ni 已提交
99 100 101 102 103 104 105 106 107

    def forward(self, feat):
        out = self.feat(feat)
        out = self.head(out)
        return out


@register
class WHHead(nn.Layer):
F
Feng Ni 已提交
108 109 110 111 112
    """
    Args:
        ch_in (int): The channel number of input Tensor.
        ch_out (int): The channel number of output Tensor.
        conv_num (int): The convolution number of wh_feat.
W
wangguanzhong 已提交
113 114 115 116
        dcn_head(bool): whether use dcn in head. False by default.
        lite_head(bool): whether use lite version. False by default.
        norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
            bn by default
F
Feng Ni 已提交
117 118 119
    Return:
        Width & Height head output
    """
W
wangguanzhong 已提交
120
    __shared__ = ['norm_type']
F
Feng Ni 已提交
121

W
wangguanzhong 已提交
122 123 124 125 126 127 128
    def __init__(self,
                 ch_in,
                 ch_out=64,
                 conv_num=2,
                 dcn_head=False,
                 lite_head=False,
                 norm_type='bn'):
F
Feng Ni 已提交
129 130 131 132
        super(WHHead, self).__init__()
        head_conv = nn.Sequential()
        for i in range(conv_num):
            name = 'conv.{}'.format(i)
W
wangguanzhong 已提交
133 134 135 136 137 138 139
            if lite_head:
                lite_name = 'wh.' + name
                head_conv.add_sublayer(
                    lite_name,
                    LiteConv(
                        in_channels=ch_in if i == 0 else ch_out,
                        out_channels=ch_out,
140
                        norm_type=norm_type))
W
wangguanzhong 已提交
141 142 143 144 145 146 147 148
            else:
                if dcn_head:
                    head_conv.add_sublayer(
                        name,
                        DeformableConvV2(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
W
wangguanzhong 已提交
149
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
W
wangguanzhong 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162
                else:
                    head_conv.add_sublayer(
                        name,
                        nn.Conv2D(
                            in_channels=ch_in if i == 0 else ch_out,
                            out_channels=ch_out,
                            kernel_size=3,
                            padding=1,
                            weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
                            bias_attr=ParamAttr(
                                learning_rate=2., regularizer=L2Decay(0.))))
                head_conv.add_sublayer(name + '.act', nn.ReLU())

W
wangguanzhong 已提交
163 164
        weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
                                                                          0.01))
165 166 167 168 169
        self.feat = head_conv
        self.head = nn.Conv2D(
            in_channels=ch_out,
            out_channels=4,
            kernel_size=1,
W
wangguanzhong 已提交
170
            weight_attr=weight_attr,
171 172
            bias_attr=ParamAttr(
                learning_rate=2., regularizer=L2Decay(0.)))
F
Feng Ni 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185

    def forward(self, feat):
        out = self.feat(feat)
        out = self.head(out)
        out = F.relu(out)
        return out


@register
class TTFHead(nn.Layer):
    """
    TTFHead
    Args:
F
Feng Ni 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
        in_channels (int): the channel number of input to TTFHead.
        num_classes (int): the number of classes, 80 by default.
        hm_head_planes (int): the channel number in heatmap head,
            128 by default.
        wh_head_planes (int): the channel number in width & height head,
            64 by default.
        hm_head_conv_num (int): the number of convolution in heatmap head,
            2 by default.
        wh_head_conv_num (int): the number of convolution in width & height
            head, 2 by default.
        hm_loss (object): Instance of 'CTFocalLoss'.
        wh_loss (object): Instance of 'GIoULoss'.
        wh_offset_base (float): the base offset of width and height,
            16.0 by default.
        down_ratio (int): the actual down_ratio is calculated by base_down_ratio
            (default 16) and the number of upsample layers.
W
wangguanzhong 已提交
202 203 204
        lite_head(bool): whether use lite version. False by default.
        norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
            bn by default
W
wangguanzhong 已提交
205 206 207
        ags_module(bool): whether use AGS module to reweight location feature.
            false by default.

F
Feng Ni 已提交
208 209
    """

W
wangguanzhong 已提交
210
    __shared__ = ['num_classes', 'down_ratio', 'norm_type']
211
    __inject__ = ['hm_loss', 'wh_loss']
F
Feng Ni 已提交
212 213

    def __init__(self,
214 215 216 217 218 219
                 in_channels,
                 num_classes=80,
                 hm_head_planes=128,
                 wh_head_planes=64,
                 hm_head_conv_num=2,
                 wh_head_conv_num=2,
F
Feng Ni 已提交
220 221 222
                 hm_loss='CTFocalLoss',
                 wh_loss='GIoULoss',
                 wh_offset_base=16.,
W
wangguanzhong 已提交
223 224 225
                 down_ratio=4,
                 dcn_head=False,
                 lite_head=False,
W
wangguanzhong 已提交
226 227
                 norm_type='bn',
                 ags_module=False):
F
Feng Ni 已提交
228
        super(TTFHead, self).__init__()
229 230
        self.in_channels = in_channels
        self.hm_head = HMHead(in_channels, hm_head_planes, num_classes,
W
wangguanzhong 已提交
231 232 233
                              hm_head_conv_num, dcn_head, lite_head, norm_type)
        self.wh_head = WHHead(in_channels, wh_head_planes, wh_head_conv_num,
                              dcn_head, lite_head, norm_type)
F
Feng Ni 已提交
234 235 236 237 238
        self.hm_loss = hm_loss
        self.wh_loss = wh_loss

        self.wh_offset_base = wh_offset_base
        self.down_ratio = down_ratio
W
wangguanzhong 已提交
239
        self.ags_module = ags_module
F
Feng Ni 已提交
240

241 242 243 244 245 246
    @classmethod
    def from_config(cls, cfg, input_shape):
        if isinstance(input_shape, (list, tuple)):
            input_shape = input_shape[0]
        return {'in_channels': input_shape.channels, }

F
Feng Ni 已提交
247 248 249 250 251 252
    def forward(self, feats):
        hm = self.hm_head(feats)
        wh = self.wh_head(feats) * self.wh_offset_base
        return hm, wh

    def filter_box_by_weight(self, pred, target, weight):
F
Feng Ni 已提交
253 254 255
        """
        Filter out boxes where ttf_reg_weight is 0, only keep positive samples.
        """
F
Feng Ni 已提交
256 257 258 259 260 261 262
        index = paddle.nonzero(weight > 0)
        index.stop_gradient = True
        weight = paddle.gather_nd(weight, index)
        pred = paddle.gather_nd(pred, index)
        target = paddle.gather_nd(target, index)
        return pred, target, weight

W
wangguanzhong 已提交
263 264 265 266 267 268
    def filter_loc_by_weight(self, score, weight):
        index = paddle.nonzero(weight > 0)
        index.stop_gradient = True
        score = paddle.gather_nd(score, index)
        return score

F
Feng Ni 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
    def get_loss(self, pred_hm, pred_wh, target_hm, box_target, target_weight):
        pred_hm = paddle.clip(F.sigmoid(pred_hm), 1e-4, 1 - 1e-4)
        hm_loss = self.hm_loss(pred_hm, target_hm)
        H, W = target_hm.shape[2:]
        mask = paddle.reshape(target_weight, [-1, H, W])
        avg_factor = paddle.sum(mask) + 1e-4

        base_step = self.down_ratio
        shifts_x = paddle.arange(0, W * base_step, base_step, dtype='int32')
        shifts_y = paddle.arange(0, H * base_step, base_step, dtype='int32')
        shift_y, shift_x = paddle.tensor.meshgrid([shifts_y, shifts_x])
        base_loc = paddle.stack([shift_x, shift_y], axis=0)
        base_loc.stop_gradient = True

        pred_boxes = paddle.concat(
            [0 - pred_wh[:, 0:2, :, :] + base_loc, pred_wh[:, 2:4] + base_loc],
            axis=1)
        pred_boxes = paddle.transpose(pred_boxes, [0, 2, 3, 1])
        boxes = paddle.transpose(box_target, [0, 2, 3, 1])
        boxes.stop_gradient = True

W
wangguanzhong 已提交
290 291 292 293 294 295 296 297 298 299
        if self.ags_module:
            pred_hm_max = paddle.max(pred_hm, axis=1, keepdim=True)
            pred_hm_max_softmax = F.softmax(pred_hm_max, axis=1)
            pred_hm_max_softmax = paddle.transpose(pred_hm_max_softmax,
                                                   [0, 2, 3, 1])
            pred_hm_max_softmax = self.filter_loc_by_weight(pred_hm_max_softmax,
                                                            mask)
        else:
            pred_hm_max_softmax = None

F
Feng Ni 已提交
300 301 302
        pred_boxes, boxes, mask = self.filter_box_by_weight(pred_boxes, boxes,
                                                            mask)
        mask.stop_gradient = True
W
wangguanzhong 已提交
303 304 305 306 307
        wh_loss = self.wh_loss(
            pred_boxes,
            boxes,
            iou_weight=mask.unsqueeze(1),
            loc_reweight=pred_hm_max_softmax)
F
Feng Ni 已提交
308 309 310 311
        wh_loss = wh_loss / avg_factor

        ttf_loss = {'hm_loss': hm_loss, 'wh_loss': wh_loss}
        return ttf_loss