faceboxes.py 7.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
from collections import OrderedDict
20

21
from paddle import fluid
22 23 24 25 26 27 28 29 30 31 32 33
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay

from ppdet.core.workspace import register
from ppdet.modeling.ops import SSDOutputDecoder

__all__ = ['FaceBoxes']


@register
class FaceBoxes(object):
    """
34 35
    FaceBoxes: A CPU Real-time Face Detector with High Accuracy.
               see https://arxiv.org/abs/1708.05234
36 37 38 39 40 41 42 43

    Args:
        backbone (object): backbone instance
        output_decoder (object): `SSDOutputDecoder` instance
        densities (list|None): the densities of generated density prior boxes,
            this attribute should be a list or tuple of integers.
        fixed_sizes (list|None): the fixed sizes of generated density prior boxes,
            this attribute should a list or tuple of same length with `densities`.
44 45
        num_classes (int): number of output classes.
        steps (list|None): step size of adjacent prior boxes on each feature map.
46 47 48 49 50 51 52 53 54 55 56
    """

    __category__ = 'architecture'
    __inject__ = ['backbone', 'output_decoder']
    __shared__ = ['num_classes']

    def __init__(self,
                 backbone="FaceBoxNet",
                 output_decoder=SSDOutputDecoder().__dict__,
                 densities=[[4, 2, 1], [1], [1]],
                 fixed_sizes=[[32., 64., 128.], [256.], [512.]],
W
wangguanzhong 已提交
57
                 num_classes=2,
58
                 steps=[8., 16., 32.]):
59 60 61 62 63 64 65 66
        super(FaceBoxes, self).__init__()
        self.backbone = backbone
        self.num_classes = num_classes
        self.output_decoder = output_decoder
        if isinstance(output_decoder, dict):
            self.output_decoder = SSDOutputDecoder(**output_decoder)
        self.densities = densities
        self.fixed_sizes = fixed_sizes
W
wangguanzhong 已提交
67
        self.steps = steps
68 69 70 71

    def build(self, feed_vars, mode='train'):
        im = feed_vars['image']
        if mode == 'train':
72 73
            gt_bbox = feed_vars['gt_bbox']
            gt_class = feed_vars['gt_class']
74 75 76 77 78 79 80 81 82

        body_feats = self.backbone(im)
        locs, confs, box, box_var = self._multi_box_head(
            inputs=body_feats, image=im, num_classes=self.num_classes)

        if mode == 'train':
            loss = fluid.layers.ssd_loss(
                locs,
                confs,
83 84
                gt_bbox,
                gt_class,
85 86 87 88 89 90 91 92 93 94 95 96 97
                box,
                box_var,
                overlap_threshold=0.35,
                neg_overlap=0.35)
            loss = fluid.layers.reduce_sum(loss)
            return {'loss': loss}
        else:
            pred = self.output_decoder(locs, confs, box, box_var)
            return {'bbox': pred}

    def _multi_box_head(self, inputs, image, num_classes=2):
        def permute_and_reshape(input, last_dim):
            trans = fluid.layers.transpose(input, perm=[0, 2, 3, 1])
98
            compile_shape = [0, -1, last_dim]
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
            return fluid.layers.reshape(trans, shape=compile_shape)

        def _is_list_or_tuple_(data):
            return (isinstance(data, list) or isinstance(data, tuple))

        locs, confs = [], []
        boxes, vars = [], []
        b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.))

        for i, input in enumerate(inputs):
            densities = self.densities[i]
            fixed_sizes = self.fixed_sizes[i]
            box, var = fluid.layers.density_prior_box(
                input,
                image,
                densities=densities,
                fixed_sizes=fixed_sizes,
                fixed_ratios=[1.],
                clip=False,
W
wangguanzhong 已提交
118
                offset=0.5,
119
                steps=[self.steps[i]] * 2)
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146

            num_boxes = box.shape[2]

            box = fluid.layers.reshape(box, shape=[-1, 4])
            var = fluid.layers.reshape(var, shape=[-1, 4])
            num_loc_output = num_boxes * 4
            num_conf_output = num_boxes * num_classes
            # get loc
            mbox_loc = fluid.layers.conv2d(
                input, num_loc_output, 3, 1, 1, bias_attr=b_attr)
            loc = permute_and_reshape(mbox_loc, 4)
            # get conf
            mbox_conf = fluid.layers.conv2d(
                input, num_conf_output, 3, 1, 1, bias_attr=b_attr)
            conf = permute_and_reshape(mbox_conf, 2)

            locs.append(loc)
            confs.append(conf)
            boxes.append(box)
            vars.append(var)

        face_mbox_loc = fluid.layers.concat(locs, axis=1)
        face_mbox_conf = fluid.layers.concat(confs, axis=1)
        prior_boxes = fluid.layers.concat(boxes)
        box_vars = fluid.layers.concat(vars)
        return face_mbox_loc, face_mbox_conf, prior_boxes, box_vars

147 148 149 150 151
    def _inputs_def(self, image_shape):
        im_shape = [None] + image_shape
        # yapf: disable
        inputs_def = {
            'image':    {'shape': im_shape,  'dtype': 'float32', 'lod_level': 0},
Q
qingqing01 已提交
152
            'im_id':    {'shape': [None, 1], 'dtype': 'int64',   'lod_level': 0},
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
            'gt_bbox':  {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 1},
            'gt_class': {'shape': [None, 1], 'dtype': 'int32',   'lod_level': 1},
            'im_shape': {'shape': [None, 3], 'dtype': 'int32',   'lod_level': 0},
        }
        # yapf: enable
        return inputs_def

    def build_inputs(
            self,
            image_shape=[3, None, None],
            fields=['image', 'im_id', 'gt_bbox', 'gt_class'],  # for train
            use_dataloader=True,
            iterable=False):
        inputs_def = self._inputs_def(image_shape)
        feed_vars = OrderedDict([(key, fluid.data(
            name=key,
            shape=inputs_def[key]['shape'],
            dtype=inputs_def[key]['dtype'],
            lod_level=inputs_def[key]['lod_level'])) for key in fields])
        loader = fluid.io.DataLoader.from_generator(
            feed_list=list(feed_vars.values()),
174
            capacity=16,
175 176 177 178
            use_double_buffer=True,
            iterable=iterable) if use_dataloader else None
        return feed_vars, loader

179 180 181 182 183 184
    def train(self, feed_vars):
        return self.build(feed_vars, 'train')

    def eval(self, feed_vars):
        return self.build(feed_vars, 'eval')

K
Kaipeng Deng 已提交
185 186 187
    def test(self, feed_vars, exclude_nms=False):
        assert not exclude_nms, "exclude_nms for {} is not support currently".format(
            self.__class__.__name__)
188 189 190 191
        return self.build(feed_vars, 'test')

    def is_bbox_normalized(self):
        return True