fpn.py 8.2 KB
Newer Older
Q
qingqing01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
#   
# Licensed under the Apache License, Version 2.0 (the "License");   
# you may not use this file except in compliance with the License.  
# You may obtain a copy of the License at   
#   
#     http://www.apache.org/licenses/LICENSE-2.0    
#   
# Unless required by applicable law or agreed to in writing, software   
# distributed under the License is distributed on an "AS IS" BASIS, 
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
# See the License for the specific language governing permissions and   
# limitations under the License.

import numpy as np
import paddle
F
Feng Ni 已提交
17
import paddle.nn as nn
Q
qingqing01 已提交
18 19 20 21 22
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import XavierUniform
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register, serializable
F
Feng Ni 已提交
23
from ppdet.modeling.layers import ConvNormLayer
24
from ..shape_spec import ShapeSpec
Q
qingqing01 已提交
25

26 27
__all__ = ['FPN']

Q
qingqing01 已提交
28 29 30

@register
@serializable
F
Feng Ni 已提交
31
class FPN(nn.Layer):
Q
qingqing01 已提交
32 33 34
    def __init__(self,
                 in_channels,
                 out_channel,
35
                 spatial_scales=[0.25, 0.125, 0.0625, 0.03125],
F
Feng Ni 已提交
36
                 has_extra_convs=False,
37
                 extra_stage=1,
F
Feng Ni 已提交
38
                 use_c5=True,
F
Feng Ni 已提交
39 40 41
                 norm_type=None,
                 norm_decay=0.,
                 freeze_norm=False,
F
Feng Ni 已提交
42
                 relu_before_extra_convs=True):
Q
qingqing01 已提交
43
        super(FPN, self).__init__()
44 45 46 47
        self.out_channel = out_channel
        for s in range(extra_stage):
            spatial_scales = spatial_scales + [spatial_scales[-1] / 2.]
        self.spatial_scales = spatial_scales
F
Feng Ni 已提交
48
        self.has_extra_convs = has_extra_convs
49
        self.extra_stage = extra_stage
F
Feng Ni 已提交
50 51
        self.use_c5 = use_c5
        self.relu_before_extra_convs = relu_before_extra_convs
F
Feng Ni 已提交
52 53 54
        self.norm_type = norm_type
        self.norm_decay = norm_decay
        self.freeze_norm = freeze_norm
F
Feng Ni 已提交
55

Q
qingqing01 已提交
56 57 58 59
        self.lateral_convs = []
        self.fpn_convs = []
        fan = out_channel * 3 * 3

F
Feng Ni 已提交
60 61 62 63 64
        # stage index 0,1,2,3 stands for res2,res3,res4,res5 on ResNet Backbone
        # 0 <= st_stage < ed_stage <= 3
        st_stage = 4 - len(in_channels)
        ed_stage = st_stage + len(in_channels) - 1
        for i in range(st_stage, ed_stage + 1):
Q
qingqing01 已提交
65 66 67 68
            if i == 3:
                lateral_name = 'fpn_inner_res5_sum'
            else:
                lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
F
Feng Ni 已提交
69
            in_c = in_channels[i - st_stage]
F
Feng Ni 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
            if self.norm_type == 'gn':
                lateral = self.add_sublayer(
                    lateral_name,
                    ConvNormLayer(
                        ch_in=in_c,
                        ch_out=out_channel,
                        filter_size=1,
                        stride=1,
                        norm_type=self.norm_type,
                        norm_decay=self.norm_decay,
                        norm_name=lateral_name + '_norm',
                        freeze_norm=self.freeze_norm,
                        initializer=XavierUniform(fan_out=in_c),
                        name=lateral_name))
            else:
                lateral = self.add_sublayer(
                    lateral_name,
                    nn.Conv2D(
                        in_channels=in_c,
                        out_channels=out_channel,
                        kernel_size=1,
                        weight_attr=ParamAttr(
                            initializer=XavierUniform(fan_out=in_c))))
Q
qingqing01 已提交
93 94 95
            self.lateral_convs.append(lateral)

            fpn_name = 'fpn_res{}_sum'.format(i + 2)
F
Feng Ni 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
            if self.norm_type == 'gn':
                fpn_conv = self.add_sublayer(
                    fpn_name,
                    ConvNormLayer(
                        ch_in=out_channel,
                        ch_out=out_channel,
                        filter_size=3,
                        stride=1,
                        norm_type=self.norm_type,
                        norm_decay=self.norm_decay,
                        norm_name=fpn_name + '_norm',
                        freeze_norm=self.freeze_norm,
                        initializer=XavierUniform(fan_out=fan),
                        name=fpn_name))
            else:
                fpn_conv = self.add_sublayer(
                    fpn_name,
                    nn.Conv2D(
                        in_channels=out_channel,
                        out_channels=out_channel,
                        kernel_size=3,
                        padding=1,
                        weight_attr=ParamAttr(
                            initializer=XavierUniform(fan_out=fan))))
Q
qingqing01 已提交
120 121
            self.fpn_convs.append(fpn_conv)

F
Feng Ni 已提交
122
        # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
123
        if self.has_extra_convs:
F
Feng Ni 已提交
124 125 126
            for i in range(self.extra_stage):
                lvl = ed_stage + 1 + i
                if i == 0 and self.use_c5:
127
                    in_c = in_channels[-1]
F
Feng Ni 已提交
128 129 130
                else:
                    in_c = out_channel
                extra_fpn_name = 'fpn_{}'.format(lvl + 2)
F
Feng Ni 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
                if self.norm_type == 'gn':
                    extra_fpn_conv = self.add_sublayer(
                        extra_fpn_name,
                        ConvNormLayer(
                            ch_in=in_c,
                            ch_out=out_channel,
                            filter_size=3,
                            stride=2,
                            norm_type=self.norm_type,
                            norm_decay=self.norm_decay,
                            norm_name=extra_fpn_name + '_norm',
                            freeze_norm=self.freeze_norm,
                            initializer=XavierUniform(fan_out=fan),
                            name=extra_fpn_name))
                else:
                    extra_fpn_conv = self.add_sublayer(
                        extra_fpn_name,
                        nn.Conv2D(
                            in_channels=in_c,
                            out_channels=out_channel,
                            kernel_size=3,
                            stride=2,
                            padding=1,
                            weight_attr=ParamAttr(
                                initializer=XavierUniform(fan_out=fan))))
F
Feng Ni 已提交
156 157
                self.fpn_convs.append(extra_fpn_conv)

158 159 160 161 162 163 164
    @classmethod
    def from_config(cls, cfg, input_shape):
        return {
            'in_channels': [i.channels for i in input_shape],
            'spatial_scales': [1.0 / i.stride for i in input_shape],
        }

Q
qingqing01 已提交
165 166
    def forward(self, body_feats):
        laterals = []
167 168
        num_levels = len(body_feats)
        for i in range(num_levels):
F
Feng Ni 已提交
169
            laterals.append(self.lateral_convs[i](body_feats[i]))
Q
qingqing01 已提交
170

171 172
        for i in range(1, num_levels):
            lvl = num_levels - i
Q
qingqing01 已提交
173
            upsample = F.interpolate(
174
                laterals[lvl],
Q
qingqing01 已提交
175 176
                scale_factor=2.,
                mode='nearest', )
177
            laterals[lvl - 1] += upsample
Q
qingqing01 已提交
178 179

        fpn_output = []
180 181
        for lvl in range(num_levels):
            fpn_output.append(self.fpn_convs[lvl](laterals[lvl]))
Q
qingqing01 已提交
182

183
        if self.extra_stage > 0:
F
Feng Ni 已提交
184 185
            # use max pool to get more levels on top of outputs (Faster R-CNN, Mask R-CNN)
            if not self.has_extra_convs:
186
                assert self.extra_stage == 1, 'extra_stage should be 1 if FPN has not extra convs'
F
Feng Ni 已提交
187 188 189 190 191 192 193
                fpn_output.append(F.max_pool2d(fpn_output[-1], 1, stride=2))
            # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
            else:
                if self.use_c5:
                    extra_source = body_feats[-1]
                else:
                    extra_source = fpn_output[-1]
194 195 196
                fpn_output.append(self.fpn_convs[num_levels](extra_source))

                for i in range(1, self.extra_stage):
F
Feng Ni 已提交
197
                    if self.relu_before_extra_convs:
198 199
                        fpn_output.append(self.fpn_convs[num_levels + i](F.relu(
                            fpn_output[-1])))
F
Feng Ni 已提交
200
                    else:
201 202 203 204 205 206 207 208 209 210 211
                        fpn_output.append(self.fpn_convs[num_levels + i](
                            fpn_output[-1]))
        return fpn_output

    @property
    def out_shape(self):
        return [
            ShapeSpec(
                channels=self.out_channel, stride=1. / s)
            for s in self.spatial_scales
        ]