parallel_dygraph_se_resnext.py 12.3 KB
Newer Older
Y
Yan Xu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import os
import contextlib
import unittest
import numpy as np
import six
import pickle
import sys

import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph
from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer
30
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, BatchNorm
Y
Yan Xu 已提交
31 32
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.layer_helper import LayerHelper
J
Jiabin Yang 已提交
33
import math
Y
Yan Xu 已提交
34 35
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase

36
batch_size = 64
J
Jiabin Yang 已提交
37 38 39
momentum_rate = 0.9
l2_decay = 1.2e-4

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
train_parameters = {
    "input_size": [3, 224, 224],
    "input_mean": [0.485, 0.456, 0.406],
    "input_std": [0.229, 0.224, 0.225],
    "learning_strategy": {
        "name": "cosine_decay",
        "batch_size": batch_size,
        "epochs": [40, 80, 100],
        "steps": [0.1, 0.01, 0.001, 0.0001]
    },
    "batch_size": batch_size,
    "lr": 0.0125,
    "total_images": 6149,
    "num_epochs": 200
}

J
Jiabin Yang 已提交
56

57
def optimizer_setting(params, parameter_list=None):
J
Jiabin Yang 已提交
58 59 60 61 62 63 64 65 66 67 68
    ls = params["learning_strategy"]
    if "total_images" not in params:
        total_images = 6149
    else:
        total_images = params["total_images"]

    batch_size = ls["batch_size"]
    step = int(math.ceil(float(total_images) / batch_size))
    bd = [step * e for e in ls["epochs"]]
    lr = params["lr"]
    num_epochs = params["num_epochs"]
J
Jiabin Yang 已提交
69
    if fluid._non_static_mode():
70
        optimizer = fluid.optimizer.Momentum(
71 72 73
            learning_rate=fluid.layers.cosine_decay(learning_rate=lr,
                                                    step_each_epoch=step,
                                                    epochs=num_epochs),
74 75 76 77 78
            momentum=momentum_rate,
            regularization=fluid.regularizer.L2Decay(l2_decay),
            parameter_list=parameter_list)
    else:
        optimizer = fluid.optimizer.Momentum(
79 80 81
            learning_rate=fluid.layers.cosine_decay(learning_rate=lr,
                                                    step_each_epoch=step,
                                                    epochs=num_epochs),
82 83
            momentum=momentum_rate,
            regularization=fluid.regularizer.L2Decay(l2_decay))
J
Jiabin Yang 已提交
84 85 86

    return optimizer

Y
Yan Xu 已提交
87 88

class ConvBNLayer(fluid.dygraph.Layer):
89

Y
Yan Xu 已提交
90
    def __init__(self,
91
                 num_channels,
Y
Yan Xu 已提交
92 93 94 95 96
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None):
97
        super(ConvBNLayer, self).__init__()
Y
Yan Xu 已提交
98

99 100 101 102 103 104 105 106
        self._conv = Conv2D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            act=None,
                            bias_attr=False)
Y
Yan Xu 已提交
107

108 109
        # disable BatchNorm in multi-card. disable LayerNorm because of complex input_shape
        # self._batch_norm = BatchNorm(num_filters, act=act)
Y
Yan Xu 已提交
110 111 112

    def forward(self, inputs):
        y = self._conv(inputs)
113
        # y = self._batch_norm(y)
Y
Yan Xu 已提交
114 115 116 117 118

        return y


class SqueezeExcitation(fluid.dygraph.Layer):
119

120
    def __init__(self, num_channels, reduction_ratio):
Y
Yan Xu 已提交
121

122 123
        super(SqueezeExcitation, self).__init__()
        self._num_channels = num_channels
124
        self._pool = Pool2D(pool_size=0, pool_type='avg', global_pooling=True)
J
Jiabin Yang 已提交
125
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
126 127 128
        self._squeeze = Linear(
            num_channels,
            num_channels // reduction_ratio,
Y
Yan Xu 已提交
129
            param_attr=fluid.ParamAttr(
J
Jiabin Yang 已提交
130
                initializer=fluid.initializer.Uniform(-stdv, stdv)),
Y
Yan Xu 已提交
131
            act='relu')
J
Jiabin Yang 已提交
132
        stdv = 1.0 / math.sqrt(num_channels / 16.0 * 1.0)
133 134 135
        self._excitation = Linear(
            num_channels // reduction_ratio,
            num_channels,
Y
Yan Xu 已提交
136
            param_attr=fluid.ParamAttr(
J
Jiabin Yang 已提交
137
                initializer=fluid.initializer.Uniform(-stdv, stdv)),
Y
Yan Xu 已提交
138 139 140 141
            act='sigmoid')

    def forward(self, input):
        y = self._pool(input)
142
        y = fluid.layers.reshape(y, shape=[-1, self._num_channels])
Y
Yan Xu 已提交
143 144 145 146 147 148 149
        y = self._squeeze(y)
        y = self._excitation(y)
        y = fluid.layers.elementwise_mul(x=input, y=y, axis=0)
        return y


class BottleneckBlock(fluid.dygraph.Layer):
150

Y
Yan Xu 已提交
151 152 153 154 155 156 157
    def __init__(self,
                 num_channels,
                 num_filters,
                 stride,
                 cardinality,
                 reduction_ratio,
                 shortcut=True):
158
        super(BottleneckBlock, self).__init__()
Y
Yan Xu 已提交
159

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
        self.conv0 = ConvBNLayer(num_channels=num_channels,
                                 num_filters=num_filters,
                                 filter_size=1,
                                 act="relu")
        self.conv1 = ConvBNLayer(num_channels=num_filters,
                                 num_filters=num_filters,
                                 filter_size=3,
                                 stride=stride,
                                 groups=cardinality,
                                 act="relu")
        self.conv2 = ConvBNLayer(num_channels=num_filters,
                                 num_filters=num_filters * 2,
                                 filter_size=1,
                                 act=None)

        self.scale = SqueezeExcitation(num_channels=num_filters * 2,
                                       reduction_ratio=reduction_ratio)
Y
Yan Xu 已提交
177 178

        if not shortcut:
179 180 181 182
            self.short = ConvBNLayer(num_channels=num_channels,
                                     num_filters=num_filters * 2,
                                     filter_size=1,
                                     stride=stride)
Y
Yan Xu 已提交
183 184 185

        self.shortcut = shortcut

J
Jiabin Yang 已提交
186
        self._num_channels_out = num_filters * 2
Y
Yan Xu 已提交
187 188 189 190 191 192 193 194 195 196 197 198

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
        conv2 = self.conv2(conv1)
        scale = self.scale(conv2)

        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)

J
Jiabin Yang 已提交
199
        y = fluid.layers.elementwise_add(x=short, y=scale, act='relu')
Y
Yan Xu 已提交
200 201 202 203
        return y


class SeResNeXt(fluid.dygraph.Layer):
204

205 206
    def __init__(self, layers=50, class_dim=102):
        super(SeResNeXt, self).__init__()
Y
Yan Xu 已提交
207 208 209 210 211 212 213 214 215 216 217

        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 50:
            cardinality = 32
            reduction_ratio = 16
            depth = [3, 4, 6, 3]
            num_filters = [128, 256, 512, 1024]
218 219 220 221 222 223 224 225 226
            self.conv0 = ConvBNLayer(num_channels=3,
                                     num_filters=64,
                                     filter_size=7,
                                     stride=2,
                                     act='relu')
            self.pool = Pool2D(pool_size=3,
                               pool_stride=2,
                               pool_padding=1,
                               pool_type='max')
Y
Yan Xu 已提交
227 228 229 230 231
        elif layers == 101:
            cardinality = 32
            reduction_ratio = 16
            depth = [3, 4, 23, 3]
            num_filters = [128, 256, 512, 1024]
232 233 234 235 236 237 238 239 240
            self.conv0 = ConvBNLayer(num_channels=3,
                                     num_filters=64,
                                     filter_size=7,
                                     stride=2,
                                     act='relu')
            self.pool = Pool2D(pool_size=3,
                               pool_stride=2,
                               pool_padding=1,
                               pool_type='max')
Y
Yan Xu 已提交
241 242 243 244 245
        elif layers == 152:
            cardinality = 64
            reduction_ratio = 16
            depth = [3, 8, 36, 3]
            num_filters = [128, 256, 512, 1024]
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
            self.conv0 = ConvBNLayer(num_channels=3,
                                     num_filters=64,
                                     filter_size=3,
                                     stride=2,
                                     act='relu')
            self.conv1 = ConvBNLayer(num_channels=64,
                                     num_filters=64,
                                     filter_size=3,
                                     stride=1,
                                     act='relu')
            self.conv2 = ConvBNLayer(num_channels=64,
                                     num_filters=128,
                                     filter_size=3,
                                     stride=1,
                                     act='relu')
            self.pool = Pool2D(pool_size=3,
                               pool_stride=2,
                               pool_padding=1,
                               pool_type='max')
Y
Yan Xu 已提交
265 266 267 268 269 270 271 272

        self.bottleneck_block_list = []
        num_channels = 64
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
273 274 275 276 277 278
                    BottleneckBlock(num_channels=num_channels,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    cardinality=cardinality,
                                    reduction_ratio=reduction_ratio,
                                    shortcut=shortcut))
Y
Yan Xu 已提交
279 280 281 282
                num_channels = bottleneck_block._num_channels_out
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

283 284 285
        self.pool2d_avg = Pool2D(pool_size=7,
                                 pool_type='avg',
                                 global_pooling=True)
Y
Yan Xu 已提交
286 287
        stdv = 1.0 / math.sqrt(2048 * 1.0)

288 289 290 291 292 293 294
        self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1

        self.out = Linear(
            self.pool2d_avg_output,
            class_dim,
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
Y
Yan Xu 已提交
295

J
Jiabin Yang 已提交
296
    def forward(self, inputs):
Y
Yan Xu 已提交
297 298 299 300 301 302 303 304 305 306 307 308
        if self.layers == 50 or self.layers == 101:
            y = self.conv0(inputs)
            y = self.pool(y)
        elif self.layers == 152:
            y = self.conv0(inputs)
            y = self.conv1(inputs)
            y = self.conv2(inputs)
            y = self.pool(y)

        for bottleneck_block in self.bottleneck_block_list:
            y = bottleneck_block(y)
        y = self.pool2d_avg(y)
309
        y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output])
J
Jiabin Yang 已提交
310 311
        y = self.out(y)
        return y
Y
Yan Xu 已提交
312 313 314


class TestSeResNeXt(TestParallelDyGraphRunnerBase):
315

Y
Yan Xu 已提交
316
    def get_model(self):
317
        model = SeResNeXt()
318 319 320 321 322
        train_reader = paddle.batch(paddle.dataset.flowers.test(use_xmap=False),
                                    batch_size=train_parameters["batch_size"],
                                    drop_last=True)
        optimizer = optimizer_setting(train_parameters,
                                      parameter_list=model.parameters())
323
        return model, train_reader, optimizer
Y
Yan Xu 已提交
324 325 326 327 328

    def run_one_loop(self, model, opt, data):
        bs = len(data)
        dy_x_data = np.array([x[0].reshape(3, 224, 224)
                              for x in data]).astype('float32')
X
xiongkun 已提交
329
        dy_x_data = dy_x_data / 255.0
Y
Yan Xu 已提交
330 331 332 333 334
        y_data = np.array([x[1] for x in data]).astype('int64').reshape(bs, 1)
        img = to_variable(dy_x_data)
        label = to_variable(y_data)
        label.stop_gradient = True

J
Jiabin Yang 已提交
335 336 337
        out = model(img)
        softmax_out = fluid.layers.softmax(out, use_cudnn=False)
        loss = fluid.layers.cross_entropy(input=softmax_out, label=label)
338
        avg_loss = paddle.mean(x=loss)
J
Jiabin Yang 已提交
339
        return avg_loss
Y
Yan Xu 已提交
340 341 342 343


if __name__ == "__main__":
    runtime_main(TestSeResNeXt)