You need to sign in or sign up before continuing.
det_resnet_vd.py 7.7 KB
Newer Older
W
WenmuZhou 已提交
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
L
LDOUBLEV 已提交
2
#
W
WenmuZhou 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
L
LDOUBLEV 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
W
WenmuZhou 已提交
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
L
LDOUBLEV 已提交
14 15 16 17 18

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import paddle
W
WenmuZhou 已提交
20
from paddle import ParamAttr
21
import paddle.nn as nn
W
WenmuZhou 已提交
22
import paddle.nn.functional as F
L
LDOUBLEV 已提交
23 24 25 26

__all__ = ["ResNet"]


W
WenmuZhou 已提交
27
class ConvBNLayer(nn.Layer):
littletomatodonkey's avatar
littletomatodonkey 已提交
28 29 30 31 32 33 34 35
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 is_vd_mode=False,
                 act=None):
W
WenmuZhou 已提交
36
        super(ConvBNLayer, self).__init__()
37 38

        self.is_vd_mode = is_vd_mode
W
WenmuZhou 已提交
39
        self._pool2d_avg = nn.AvgPool2D(
40
            kernel_size=2, stride=2, padding=0, ceil_mode=True)
W
WenmuZhou 已提交
41
        self._conv = nn.Conv2D(
W
WenmuZhou 已提交
42 43 44
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
L
LDOUBLEV 已提交
45
            stride=stride,
W
WenmuZhou 已提交
46
            padding=(kernel_size - 1) // 2,
L
LDOUBLEV 已提交
47 48
            groups=groups,
            bias_attr=False)
littletomatodonkey's avatar
littletomatodonkey 已提交
49
        self._batch_norm = nn.BatchNorm(out_channels, act=act)
W
WenmuZhou 已提交
50

51 52 53 54 55 56
    def forward(self, inputs):
        if self.is_vd_mode:
            inputs = self._pool2d_avg(inputs)
        y = self._conv(inputs)
        y = self._batch_norm(y)
        return y
W
WenmuZhou 已提交
57 58


59
class BottleneckBlock(nn.Layer):
W
WenmuZhou 已提交
60 61 62
    def __init__(self,
                 in_channels,
                 out_channels,
63 64
                 stride,
                 shortcut=True,
littletomatodonkey's avatar
littletomatodonkey 已提交
65
                 if_first=False):
W
WenmuZhou 已提交
66
        super(BottleneckBlock, self).__init__()
67

W
WenmuZhou 已提交
68 69 70 71
        self.conv0 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=1,
littletomatodonkey's avatar
littletomatodonkey 已提交
72
            act='relu')
W
WenmuZhou 已提交
73 74 75 76
        self.conv1 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
L
LDOUBLEV 已提交
77
            stride=stride,
littletomatodonkey's avatar
littletomatodonkey 已提交
78
            act='relu')
W
WenmuZhou 已提交
79 80 81 82
        self.conv2 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels * 4,
            kernel_size=1,
littletomatodonkey's avatar
littletomatodonkey 已提交
83
            act=None)
L
LDOUBLEV 已提交
84

85 86 87 88 89 90
        if not shortcut:
            self.short = ConvBNLayer(
                in_channels=in_channels,
                out_channels=out_channels * 4,
                kernel_size=1,
                stride=1,
littletomatodonkey's avatar
littletomatodonkey 已提交
91
                is_vd_mode=False if if_first else True)
92 93 94 95 96 97 98

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
        conv2 = self.conv2(conv1)
W
WenmuZhou 已提交
99

100 101 102 103
        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
W
WenmuZhou 已提交
104 105
        y = paddle.add(x=short, y=conv2)
        y = F.relu(y)
W
WenmuZhou 已提交
106
        return y
L
LDOUBLEV 已提交
107 108


W
WenmuZhou 已提交
109
class BasicBlock(nn.Layer):
littletomatodonkey's avatar
littletomatodonkey 已提交
110 111 112 113 114 115 116
    def __init__(
            self,
            in_channels,
            out_channels,
            stride,
            shortcut=True,
            if_first=False, ):
W
WenmuZhou 已提交
117
        super(BasicBlock, self).__init__()
118
        self.stride = stride
W
WenmuZhou 已提交
119 120 121 122
        self.conv0 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=3,
L
LDOUBLEV 已提交
123
            stride=stride,
littletomatodonkey's avatar
littletomatodonkey 已提交
124
            act='relu')
W
WenmuZhou 已提交
125 126 127 128
        self.conv1 = ConvBNLayer(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
littletomatodonkey's avatar
littletomatodonkey 已提交
129
            act=None)
W
WenmuZhou 已提交
130

131 132 133 134 135 136
        if not shortcut:
            self.short = ConvBNLayer(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=1,
                stride=1,
littletomatodonkey's avatar
littletomatodonkey 已提交
137
                is_vd_mode=False if if_first else True)
138 139 140 141 142 143

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
W
WenmuZhou 已提交
144

145 146 147 148
        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
W
WenmuZhou 已提交
149 150
        y = paddle.add(x=short, y=conv1)
        y = F.relu(y)
151
        return y
W
WenmuZhou 已提交
152 153


154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
class ResNet(nn.Layer):
    def __init__(self, in_channels=3, layers=50, **kwargs):
        super(ResNet, self).__init__()

        self.layers = layers
        supported_layers = [18, 34, 50, 101, 152, 200]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(
                supported_layers, layers)

        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34 or layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        elif layers == 200:
            depth = [3, 12, 48, 3]
        num_channels = [64, 256, 512,
                        1024] if layers >= 50 else [64, 64, 128, 256]
        num_filters = [64, 128, 256, 512]

        self.conv1_1 = ConvBNLayer(
            in_channels=in_channels,
            out_channels=32,
            kernel_size=3,
            stride=2,
littletomatodonkey's avatar
littletomatodonkey 已提交
183
            act='relu')
184 185 186 187 188
        self.conv1_2 = ConvBNLayer(
            in_channels=32,
            out_channels=32,
            kernel_size=3,
            stride=1,
littletomatodonkey's avatar
littletomatodonkey 已提交
189
            act='relu')
190 191 192 193 194
        self.conv1_3 = ConvBNLayer(
            in_channels=32,
            out_channels=64,
            kernel_size=3,
            stride=1,
littletomatodonkey's avatar
littletomatodonkey 已提交
195
            act='relu')
W
WenmuZhou 已提交
196
        self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212

        self.stages = []
        self.out_channels = []
        if layers >= 50:
            for block in range(len(depth)):
                block_list = []
                shortcut = False
                for i in range(depth[block]):
                    bottleneck_block = self.add_sublayer(
                        'bb_%d_%d' % (block, i),
                        BottleneckBlock(
                            in_channels=num_channels[block]
                            if i == 0 else num_filters[block] * 4,
                            out_channels=num_filters[block],
                            stride=2 if i == 0 and block != 0 else 1,
                            shortcut=shortcut,
littletomatodonkey's avatar
littletomatodonkey 已提交
213
                            if_first=block == i == 0))
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
                    shortcut = True
                    block_list.append(bottleneck_block)
                self.out_channels.append(num_filters[block] * 4)
                self.stages.append(nn.Sequential(*block_list))
        else:
            for block in range(len(depth)):
                block_list = []
                shortcut = False
                for i in range(depth[block]):
                    basic_block = self.add_sublayer(
                        'bb_%d_%d' % (block, i),
                        BasicBlock(
                            in_channels=num_channels[block]
                            if i == 0 else num_filters[block],
                            out_channels=num_filters[block],
                            stride=2 if i == 0 and block != 0 else 1,
                            shortcut=shortcut,
littletomatodonkey's avatar
littletomatodonkey 已提交
231
                            if_first=block == i == 0))
232 233 234 235
                    shortcut = True
                    block_list.append(basic_block)
                self.out_channels.append(num_filters[block])
                self.stages.append(nn.Sequential(*block_list))
W
WenmuZhou 已提交
236

237 238 239 240 241 242 243 244 245 246
    def forward(self, inputs):
        y = self.conv1_1(inputs)
        y = self.conv1_2(y)
        y = self.conv1_3(y)
        y = self.pool2d_max(y)
        out = []
        for block in self.stages:
            y = block(y)
            out.append(y)
        return out