resnet_vc.py 9.8 KB
Newer Older
1
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
W
WuHaobo 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
W
WuHaobo 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
W
WuHaobo 已提交
14 15 16 17 18

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import numpy as np
W
WuHaobo 已提交
20
import paddle
littletomatodonkey's avatar
littletomatodonkey 已提交
21 22
from paddle import ParamAttr
import paddle.nn as nn
23 24 25
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
littletomatodonkey's avatar
littletomatodonkey 已提交
26
from paddle.nn.initializer import Uniform
W
WuHaobo 已提交
27

28
import math
W
WuHaobo 已提交
29

C
cuicheng01 已提交
30 31 32 33 34 35 36
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url

MODEL_URLS = {
              "ResNet50_vc": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vc_pretrained.pdparams",
             }

__all__ = list(MODEL_URLS.keys())
W
WuHaobo 已提交
37 38


littletomatodonkey's avatar
littletomatodonkey 已提交
39
class ConvBNLayer(nn.Layer):
40 41 42 43 44 45 46 47 48
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
W
WuHaobo 已提交
49

50
        self._conv = Conv2D(
littletomatodonkey's avatar
littletomatodonkey 已提交
51 52 53
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
W
WuHaobo 已提交
54 55 56
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=groups,
littletomatodonkey's avatar
littletomatodonkey 已提交
57
            weight_attr=ParamAttr(name=name + "_weights"),
58
            bias_attr=False)
W
WuHaobo 已提交
59 60 61 62
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
63 64
        self._batch_norm = BatchNorm(
            num_filters,
W
WuHaobo 已提交
65 66 67 68
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
69
            moving_variance_name=bn_name + '_variance')
W
WuHaobo 已提交
70

71 72 73 74 75 76
    def forward(self, inputs):
        y = self._conv(inputs)
        y = self._batch_norm(y)
        return y


littletomatodonkey's avatar
littletomatodonkey 已提交
77
class BottleneckBlock(nn.Layer):
78 79 80 81 82 83 84
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride,
                 shortcut=True,
                 name=None):
        super(BottleneckBlock, self).__init__()
W
WuHaobo 已提交
85

86 87
        self.conv0 = ConvBNLayer(
            num_channels=num_channels,
W
WuHaobo 已提交
88 89 90 91
            num_filters=num_filters,
            filter_size=1,
            act='relu',
            name=name + "_branch2a")
92 93
        self.conv1 = ConvBNLayer(
            num_channels=num_filters,
W
WuHaobo 已提交
94 95 96 97 98
            num_filters=num_filters,
            filter_size=3,
            stride=stride,
            act='relu',
            name=name + "_branch2b")
99 100
        self.conv2 = ConvBNLayer(
            num_channels=num_filters,
W
WuHaobo 已提交
101 102 103 104 105
            num_filters=num_filters * 4,
            filter_size=1,
            act=None,
            name=name + "_branch2c")

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
        if not shortcut:
            self.short = ConvBNLayer(
                num_channels=num_channels,
                num_filters=num_filters * 4,
                filter_size=1,
                stride=stride,
                name=name + "_branch1")

        self.shortcut = shortcut

        self._num_channels_out = num_filters * 4

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
        conv2 = self.conv2(conv1)

        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)

128 129
        y = paddle.add(x=short, y=conv2)
        y = F.relu(y)
littletomatodonkey's avatar
littletomatodonkey 已提交
130
        return y
131 132


littletomatodonkey's avatar
littletomatodonkey 已提交
133
class BasicBlock(nn.Layer):
134 135 136 137 138 139
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride,
                 shortcut=True,
                 name=None):
littletomatodonkey's avatar
littletomatodonkey 已提交
140
        super(BasicBlock, self).__init__()
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
        self.stride = stride
        self.conv0 = ConvBNLayer(
            num_channels=num_channels,
            num_filters=num_filters,
            filter_size=3,
            stride=stride,
            act='relu',
            name=name + "_branch2a")
        self.conv1 = ConvBNLayer(
            num_channels=num_filters,
            num_filters=num_filters,
            filter_size=3,
            act=None,
            name=name + "_branch2b")

        if not shortcut:
            self.short = ConvBNLayer(
                num_channels=num_channels,
                num_filters=num_filters,
                filter_size=1,
                stride=stride,
                name=name + "_branch1")

        self.shortcut = shortcut

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)

        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
174 175
        y = paddle.add(x=short, y=conv1)
        y = F.relu(y)
littletomatodonkey's avatar
littletomatodonkey 已提交
176
        return y
177 178


littletomatodonkey's avatar
littletomatodonkey 已提交
179
class ResNet_vc(nn.Layer):
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
    def __init__(self, layers=50, class_dim=1000):
        super(ResNet_vc, self).__init__()

        self.layers = layers
        supported_layers = [18, 34, 50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(
                supported_layers, layers)

        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34 or layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_channels = [64, 256, 512,
                        1024] if layers >= 50 else [64, 64, 128, 256]
        num_filters = [64, 128, 256, 512]
W
WuHaobo 已提交
200

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
        self.conv1_1 = ConvBNLayer(
            num_channels=3,
            num_filters=32,
            filter_size=3,
            stride=2,
            act='relu',
            name="conv1_1")
        self.conv1_2 = ConvBNLayer(
            num_channels=32,
            num_filters=32,
            filter_size=3,
            stride=1,
            act='relu',
            name="conv1_2")
        self.conv1_3 = ConvBNLayer(
            num_channels=32,
            num_filters=64,
            filter_size=3,
            stride=1,
            act='relu',
            name="conv1_3")

223
        self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252

        self.block_list = []
        if layers >= 50:
            for block in range(len(depth)):
                shortcut = False
                for i in range(depth[block]):
                    if layers in [101, 152] and block == 2:
                        if i == 0:
                            conv_name = "res" + str(block + 2) + "a"
                        else:
                            conv_name = "res" + str(block + 2) + "b" + str(i)
                    else:
                        conv_name = "res" + str(block + 2) + chr(97 + i)
                    bottleneck_block = self.add_sublayer(
                        'bb_%d_%d' % (block, i),
                        BottleneckBlock(
                            num_channels=num_channels[block]
                            if i == 0 else num_filters[block] * 4,
                            num_filters=num_filters[block],
                            stride=2 if i == 0 and block != 0 else 1,
                            shortcut=shortcut,
                            name=conv_name))
                    self.block_list.append(bottleneck_block)
                    shortcut = True
        else:
            for block in range(len(depth)):
                shortcut = False
                for i in range(depth[block]):
                    conv_name = "res" + str(block + 2) + chr(97 + i)
littletomatodonkey's avatar
littletomatodonkey 已提交
253
                    basic_block = self.add_sublayer(
254
                        'bb_%d_%d' % (block, i),
littletomatodonkey's avatar
littletomatodonkey 已提交
255
                        BasicBlock(
256 257 258 259 260 261
                            num_channels=num_channels[block]
                            if i == 0 else num_filters[block],
                            num_filters=num_filters[block],
                            stride=2 if i == 0 and block != 0 else 1,
                            shortcut=shortcut,
                            name=conv_name))
littletomatodonkey's avatar
littletomatodonkey 已提交
262
                    self.block_list.append(basic_block)
263 264
                    shortcut = True

265
        self.pool2d_avg = AdaptiveAvgPool2D(1)
266 267 268 269 270 271 272 273

        self.pool2d_avg_channels = num_channels[-1] * 2

        stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)

        self.out = Linear(
            self.pool2d_avg_channels,
            class_dim,
littletomatodonkey's avatar
littletomatodonkey 已提交
274 275
            weight_attr=ParamAttr(
                initializer=Uniform(-stdv, stdv), name="fc_0.w_0"),
276 277 278 279 280 281 282 283 284 285
            bias_attr=ParamAttr(name="fc_0.b_0"))

    def forward(self, inputs):
        y = self.conv1_1(inputs)
        y = self.conv1_2(y)
        y = self.conv1_3(y)
        y = self.pool2d_max(y)
        for block in self.block_list:
            y = block(y)
        y = self.pool2d_avg(y)
littletomatodonkey's avatar
littletomatodonkey 已提交
286
        y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
287 288 289
        y = self.out(y)
        return y

C
cuicheng01 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
    
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
    if pretrained is False:
        pass
    elif pretrained is True:
        load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
    elif isinstance(pretrained, str):
        load_dygraph_pretrain(model, pretrained)
    else:
        raise RuntimeError(
            "pretrained type is not available. Please use `string` or `boolean` type."
        )

        
def ResNet50_vc(pretrained=False, use_ssld=False, **kwargs):
    model = ResNet_vc(layers=50, **kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["ResNet50_vc"], use_ssld=use_ssld)
W
WuHaobo 已提交
307 308
    return model