resnext.py 8.3 KB
Newer Older
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
W
WuHaobo 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
W
WuHaobo 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
W
WuHaobo 已提交
14 15 16 17 18

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import numpy as np
W
WuHaobo 已提交
20
import paddle
littletomatodonkey's avatar
littletomatodonkey 已提交
21 22
from paddle import ParamAttr
import paddle.nn as nn
23 24 25
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
littletomatodonkey's avatar
littletomatodonkey 已提交
26
from paddle.nn.initializer import Uniform
27 28

import math
W
WuHaobo 已提交
29 30

__all__ = [
31 32
    "ResNeXt50_32x4d", "ResNeXt50_64x4d", "ResNeXt101_32x4d",
    "ResNeXt101_64x4d", "ResNeXt152_32x4d", "ResNeXt152_64x4d"
W
WuHaobo 已提交
33 34 35
]


littletomatodonkey's avatar
littletomatodonkey 已提交
36
class ConvBNLayer(nn.Layer):
37 38 39 40 41 42 43
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None,
44 45
                 name=None,
                 data_format="NCHW"):
46
        super(ConvBNLayer, self).__init__()
47
        self._conv = Conv2D(
littletomatodonkey's avatar
littletomatodonkey 已提交
48 49 50
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
W
WuHaobo 已提交
51 52 53
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=groups,
littletomatodonkey's avatar
littletomatodonkey 已提交
54
            weight_attr=ParamAttr(name=name + "_weights"),
55 56
            bias_attr=False,
            data_format=data_format)
W
WuHaobo 已提交
57 58 59 60
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
61 62
        self._batch_norm = BatchNorm(
            num_filters,
W
WuHaobo 已提交
63 64 65 66
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
67 68
            moving_variance_name=bn_name + '_variance',
            data_layout=data_format)
69 70 71 72 73 74 75

    def forward(self, inputs):
        y = self._conv(inputs)
        y = self._batch_norm(y)
        return y


littletomatodonkey's avatar
littletomatodonkey 已提交
76
class BottleneckBlock(nn.Layer):
77 78 79 80 81 82
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride,
                 cardinality,
                 shortcut=True,
83 84
                 name=None,
                 data_format="NCHW"):
85 86 87
        super(BottleneckBlock, self).__init__()
        self.conv0 = ConvBNLayer(
            num_channels=num_channels,
W
WuHaobo 已提交
88 89 90
            num_filters=num_filters,
            filter_size=1,
            act='relu',
91 92
            name=name + "_branch2a",
            data_format=data_format)
93 94
        self.conv1 = ConvBNLayer(
            num_channels=num_filters,
W
WuHaobo 已提交
95 96 97
            num_filters=num_filters,
            filter_size=3,
            groups=cardinality,
98
            stride=stride,
W
WuHaobo 已提交
99
            act='relu',
100 101
            name=name + "_branch2b",
            data_format=data_format)
102 103 104
        self.conv2 = ConvBNLayer(
            num_channels=num_filters,
            num_filters=num_filters * 2 if cardinality == 32 else num_filters,
W
WuHaobo 已提交
105 106
            filter_size=1,
            act=None,
107 108
            name=name + "_branch2c",
            data_format=data_format)
W
WuHaobo 已提交
109

110 111 112 113 114 115 116
        if not shortcut:
            self.short = ConvBNLayer(
                num_channels=num_channels,
                num_filters=num_filters * 2
                if cardinality == 32 else num_filters,
                filter_size=1,
                stride=stride,
117 118
                name=name + "_branch1",
                data_format=data_format)
119 120 121 122 123 124 125 126 127 128 129 130 131

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
        conv2 = self.conv2(conv1)

        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)

132 133
        y = paddle.add(x=short, y=conv2)
        y = F.relu(y)
littletomatodonkey's avatar
littletomatodonkey 已提交
134
        return y
135

W
WuHaobo 已提交
136

littletomatodonkey's avatar
littletomatodonkey 已提交
137
class ResNeXt(nn.Layer):
138
    def __init__(self, layers=50, class_dim=1000, cardinality=32, input_image_channel=3, data_format="NCHW"):
139 140 141
        super(ResNeXt, self).__init__()

        self.layers = layers
142 143
        self.data_format = data_format
        self.input_image_channel = input_image_channel
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
        self.cardinality = cardinality
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(
                supported_layers, layers)
        supported_cardinality = [32, 64]
        assert cardinality in supported_cardinality, \
            "supported cardinality is {} but input cardinality is {}" \
            .format(supported_cardinality, cardinality)
        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_channels = [64, 256, 512, 1024]
        num_filters = [128, 256, 512,
                       1024] if cardinality == 32 else [256, 512, 1024, 2048]
W
WuHaobo 已提交
162

163
        self.conv = ConvBNLayer(
164
            num_channels=self.input_image_channel,
165 166 167 168
            num_filters=64,
            filter_size=7,
            stride=2,
            act='relu',
169 170 171
            name="res_conv1",
            data_format=self.data_format)
        self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1, data_format=self.data_format)
W
WuHaobo 已提交
172

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
        self.block_list = []
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                if layers in [101, 152] and block == 2:
                    if i == 0:
                        conv_name = "res" + str(block + 2) + "a"
                    else:
                        conv_name = "res" + str(block + 2) + "b" + str(i)
                else:
                    conv_name = "res" + str(block + 2) + chr(97 + i)
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(
                        num_channels=num_channels[block] if i == 0 else
                        num_filters[block] * int(64 // self.cardinality),
                        num_filters=num_filters[block],
                        stride=2 if i == 0 and block != 0 else 1,
                        cardinality=self.cardinality,
                        shortcut=shortcut,
193 194
                        name=conv_name,
                        data_format=self.data_format))
195 196 197
                self.block_list.append(bottleneck_block)
                shortcut = True

198
        self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=self.data_format)
199 200 201 202 203 204 205 206

        self.pool2d_avg_channels = num_channels[-1] * 2

        stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)

        self.out = Linear(
            self.pool2d_avg_channels,
            class_dim,
littletomatodonkey's avatar
littletomatodonkey 已提交
207 208
            weight_attr=ParamAttr(
                initializer=Uniform(-stdv, stdv), name="fc_weights"),
209 210 211
            bias_attr=ParamAttr(name="fc_offset"))

    def forward(self, inputs):
212 213 214 215 216 217 218 219 220 221 222 223
        with paddle.static.amp.fp16_guard():
            if self.data_format == "NHWC":
                inputs = paddle.tensor.transpose(inputs, [0, 2, 3, 1])
                inputs.stop_gradient = True
            y = self.conv(inputs)
            y = self.pool2d_max(y)
            for block in self.block_list:
                y = block(y)
            y = self.pool2d_avg(y)
            y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
            y = self.out(y)
            return y
224 225 226 227


def ResNeXt50_32x4d(**args):
    model = ResNeXt(layers=50, cardinality=32, **args)
W
WuHaobo 已提交
228 229 230
    return model


231 232
def ResNeXt50_64x4d(**args):
    model = ResNeXt(layers=50, cardinality=64, **args)
W
WuHaobo 已提交
233 234 235
    return model


236 237
def ResNeXt101_32x4d(**args):
    model = ResNeXt(layers=101, cardinality=32, **args)
W
WuHaobo 已提交
238 239 240
    return model


241 242
def ResNeXt101_64x4d(**args):
    model = ResNeXt(layers=101, cardinality=64, **args)
W
WuHaobo 已提交
243 244 245
    return model


246 247
def ResNeXt152_32x4d(**args):
    model = ResNeXt(layers=152, cardinality=32, **args)
W
WuHaobo 已提交
248 249 250
    return model


251 252
def ResNeXt152_64x4d(**args):
    model = ResNeXt(layers=152, cardinality=64, **args)
W
WuHaobo 已提交
253
    return model