db_fpn.py 4.1 KB
Newer Older
W
WenmuZhou 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle import ParamAttr


D
dyning 已提交
25
class DBFPN(nn.Layer):
W
WenmuZhou 已提交
26
    def __init__(self, in_channels, out_channels, **kwargs):
D
dyning 已提交
27
        super(DBFPN, self).__init__()
W
WenmuZhou 已提交
28
        self.out_channels = out_channels
D
dyning 已提交
29
        weight_attr = paddle.nn.initializer.KaimingNormal()
W
WenmuZhou 已提交
30

D
dyning 已提交
31
        self.in2_conv = nn.Conv2D(
W
WenmuZhou 已提交
32 33 34 35 36 37
            in_channels=in_channels[0],
            out_channels=self.out_channels,
            kernel_size=1,
            weight_attr=ParamAttr(
                name='conv2d_51.w_0', initializer=weight_attr),
            bias_attr=False)
D
dyning 已提交
38
        self.in3_conv = nn.Conv2D(
W
WenmuZhou 已提交
39 40 41 42 43 44
            in_channels=in_channels[1],
            out_channels=self.out_channels,
            kernel_size=1,
            weight_attr=ParamAttr(
                name='conv2d_50.w_0', initializer=weight_attr),
            bias_attr=False)
D
dyning 已提交
45
        self.in4_conv = nn.Conv2D(
W
WenmuZhou 已提交
46 47 48 49 50 51
            in_channels=in_channels[2],
            out_channels=self.out_channels,
            kernel_size=1,
            weight_attr=ParamAttr(
                name='conv2d_49.w_0', initializer=weight_attr),
            bias_attr=False)
D
dyning 已提交
52
        self.in5_conv = nn.Conv2D(
W
WenmuZhou 已提交
53 54 55 56 57 58
            in_channels=in_channels[3],
            out_channels=self.out_channels,
            kernel_size=1,
            weight_attr=ParamAttr(
                name='conv2d_48.w_0', initializer=weight_attr),
            bias_attr=False)
D
dyning 已提交
59
        self.p5_conv = nn.Conv2D(
W
WenmuZhou 已提交
60 61 62 63 64 65 66
            in_channels=self.out_channels,
            out_channels=self.out_channels // 4,
            kernel_size=3,
            padding=1,
            weight_attr=ParamAttr(
                name='conv2d_52.w_0', initializer=weight_attr),
            bias_attr=False)
D
dyning 已提交
67
        self.p4_conv = nn.Conv2D(
W
WenmuZhou 已提交
68 69 70 71 72 73 74
            in_channels=self.out_channels,
            out_channels=self.out_channels // 4,
            kernel_size=3,
            padding=1,
            weight_attr=ParamAttr(
                name='conv2d_53.w_0', initializer=weight_attr),
            bias_attr=False)
D
dyning 已提交
75
        self.p3_conv = nn.Conv2D(
W
WenmuZhou 已提交
76 77 78 79 80 81 82
            in_channels=self.out_channels,
            out_channels=self.out_channels // 4,
            kernel_size=3,
            padding=1,
            weight_attr=ParamAttr(
                name='conv2d_54.w_0', initializer=weight_attr),
            bias_attr=False)
D
dyning 已提交
83
        self.p2_conv = nn.Conv2D(
W
WenmuZhou 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
            in_channels=self.out_channels,
            out_channels=self.out_channels // 4,
            kernel_size=3,
            padding=1,
            weight_attr=ParamAttr(
                name='conv2d_55.w_0', initializer=weight_attr),
            bias_attr=False)

    def forward(self, x):
        c2, c3, c4, c5 = x

        in5 = self.in5_conv(c5)
        in4 = self.in4_conv(c4)
        in3 = self.in3_conv(c3)
        in2 = self.in2_conv(c2)

D
dyning 已提交
100 101 102
        out4 = in4 + F.upsample(in5, scale_factor=2, mode="nearest")  # 1/16
        out3 = in3 + F.upsample(out4, scale_factor=2, mode="nearest")  # 1/8
        out2 = in2 + F.upsample(out3, scale_factor=2, mode="nearest")  # 1/4
W
WenmuZhou 已提交
103 104 105 106 107

        p5 = self.p5_conv(in5)
        p4 = self.p4_conv(out4)
        p3 = self.p3_conv(out3)
        p2 = self.p2_conv(out2)
D
dyning 已提交
108 109 110
        p5 = F.upsample(p5, scale_factor=8, mode="nearest")
        p4 = F.upsample(p4, scale_factor=4, mode="nearest")
        p3 = F.upsample(p3, scale_factor=2, mode="nearest")
W
WenmuZhou 已提交
111 112 113

        fuse = paddle.concat([p5, p4, p3, p2], axis=1)
        return fuse