dist_se_resnext.py 8.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math

17 18
from test_dist_base import TestDistRunnerBase, runtime_main

19
import paddle
20
from paddle import fluid
21

P
pangyoki 已提交
22 23
paddle.enable_static()

24 25 26 27 28 29 30 31 32 33 34
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1

train_parameters = {
    "input_size": [3, 224, 224],
    "input_mean": [0.485, 0.456, 0.406],
    "input_std": [0.229, 0.224, 0.225],
    "learning_strategy": {
        "name": "piecewise_decay",
        "epochs": [30, 60, 90],
35 36
        "steps": [0.1, 0.01, 0.001, 0.0001],
    },
37 38 39
}


40
class SE_ResNeXt:
41 42 43 44 45 46 47
    def __init__(self, layers=50):
        self.params = train_parameters
        self.layers = layers

    def net(self, input, class_dim=1000):
        layers = self.layers
        supported_layers = [50, 101, 152]
48 49 50 51 52
        assert (
            layers in supported_layers
        ), "supported layers are {} but input layer is {}".format(
            supported_layers, layers
        )
53 54 55 56 57 58
        if layers == 50:
            cardinality = 32
            reduction_ratio = 16
            depth = [3, 4, 6, 3]
            num_filters = [128, 256, 512, 1024]

59 60 61
            conv = self.conv_bn_layer(
                input=input, num_filters=64, filter_size=7, stride=2, act='relu'
            )
C
ccrrong 已提交
62 63 64 65 66
            conv = paddle.nn.functional.max_pool2d(
                x=conv,
                kernel_size=3,
                stride=2,
                padding=1,
67
            )
68 69 70 71 72 73
        elif layers == 101:
            cardinality = 32
            reduction_ratio = 16
            depth = [3, 4, 23, 3]
            num_filters = [128, 256, 512, 1024]

74 75 76
            conv = self.conv_bn_layer(
                input=input, num_filters=64, filter_size=7, stride=2, act='relu'
            )
C
ccrrong 已提交
77 78 79 80 81
            conv = paddle.nn.functional.max_pool2d(
                x=conv,
                kernel_size=3,
                stride=2,
                padding=1,
82
            )
83 84 85 86 87 88
        elif layers == 152:
            cardinality = 64
            reduction_ratio = 16
            depth = [3, 8, 36, 3]
            num_filters = [128, 256, 512, 1024]

89 90 91 92 93 94 95 96 97
            conv = self.conv_bn_layer(
                input=input, num_filters=64, filter_size=3, stride=2, act='relu'
            )
            conv = self.conv_bn_layer(
                input=conv, num_filters=64, filter_size=3, stride=1, act='relu'
            )
            conv = self.conv_bn_layer(
                input=conv, num_filters=128, filter_size=3, stride=1, act='relu'
            )
C
ccrrong 已提交
98 99 100 101 102
            conv = paddle.nn.functional.max_pool2d(
                x=conv,
                kernel_size=3,
                stride=2,
                padding=1,
103
            )
104 105 106 107 108 109 110 111

        for block in range(len(depth)):
            for i in range(depth[block]):
                conv = self.bottleneck_block(
                    input=conv,
                    num_filters=num_filters[block],
                    stride=2 if i == 0 and block != 0 else 1,
                    cardinality=cardinality,
112 113
                    reduction_ratio=reduction_ratio,
                )
114

C
ccrrong 已提交
115
        pool = paddle.nn.functional.adaptive_avg_pool2d(x=conv, output_size=1)
C
ccrrong 已提交
116 117
        drop = paddle.nn.functional.dropout(x=pool, p=0.2)

118
        stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0)
C
Charles-hit 已提交
119 120
        out = paddle.static.nn.fc(
            x=drop,
W
Wu Yi 已提交
121
            size=class_dim,
C
Charles-hit 已提交
122 123
            activation='softmax',
            weight_attr=fluid.ParamAttr(
124
                initializer=paddle.nn.initializer.Constant(value=0.05)
125 126
            ),
        )
127 128 129 130 131 132 133 134 135 136
        return out

    def shortcut(self, input, ch_out, stride):
        ch_in = input.shape[1]
        if ch_in != ch_out or stride != 1:
            filter_size = 1
            return self.conv_bn_layer(input, ch_out, filter_size, stride)
        else:
            return input

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
    def bottleneck_block(
        self, input, num_filters, stride, cardinality, reduction_ratio
    ):
        conv0 = self.conv_bn_layer(
            input=input, num_filters=num_filters, filter_size=1, act='relu'
        )
        conv1 = self.conv_bn_layer(
            input=conv0,
            num_filters=num_filters,
            filter_size=3,
            stride=stride,
            groups=cardinality,
            act='relu',
        )
        conv2 = self.conv_bn_layer(
            input=conv1, num_filters=num_filters * 2, filter_size=1, act=None
        )
        scale = self.squeeze_excitation(
            input=conv2,
            num_channels=num_filters * 2,
            reduction_ratio=reduction_ratio,
        )
159 160 161

        short = self.shortcut(input, num_filters * 2, stride)

162
        return paddle.nn.functional.relu(paddle.add(x=short, y=scale))
163

164 165 166
    def conv_bn_layer(
        self, input, num_filters, filter_size, stride=1, groups=1, act=None
    ):
167
        conv = paddle.static.nn.conv2d(
168 169 170 171
            input=input,
            num_filters=num_filters,
            filter_size=filter_size,
            stride=stride,
M
minqiyang 已提交
172
            padding=(filter_size - 1) // 2,
173 174
            groups=groups,
            act=None,
W
Wu Yi 已提交
175
            # avoid pserver CPU init differs from GPU
176
            param_attr=fluid.ParamAttr(
177
                initializer=paddle.nn.initializer.Constant(value=0.05)
178 179 180
            ),
            bias_attr=False,
        )
181
        return paddle.static.nn.batch_norm(input=conv, act=act)
182 183

    def squeeze_excitation(self, input, num_channels, reduction_ratio):
C
ccrrong 已提交
184
        pool = paddle.nn.functional.adaptive_avg_pool2d(x=input, output_size=1)
185
        stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
C
Charles-hit 已提交
186 187
        squeeze = paddle.static.nn.fc(
            x=pool,
W
Wu Yi 已提交
188
            size=num_channels // reduction_ratio,
C
Charles-hit 已提交
189
            weight_attr=fluid.ParamAttr(
190
                initializer=paddle.nn.initializer.Constant(value=0.05)
191
            ),
C
Charles-hit 已提交
192
            activation='relu',
193
        )
194
        stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
C
Charles-hit 已提交
195 196
        excitation = paddle.static.nn.fc(
            x=squeeze,
W
Wu Yi 已提交
197
            size=num_channels,
C
Charles-hit 已提交
198
            weight_attr=fluid.ParamAttr(
199
                initializer=paddle.nn.initializer.Constant(value=0.05)
200
            ),
C
Charles-hit 已提交
201
            activation='sigmoid',
202
        )
203 204 205
        scale = paddle.tensor.math._multiply_with_axis(
            x=input, y=excitation, axis=0
        )
206 207 208
        return scale


T
typhoonzero 已提交
209
class DistSeResneXt2x2(TestDistRunnerBase):
210
    def get_model(self, batch_size=2, use_dgc=False):
T
typhoonzero 已提交
211
        # Input data
G
GGBond8488 已提交
212 213
        image = paddle.static.data(
            name="data", shape=[-1, 3, 224, 224], dtype='float32'
214
        )
G
GGBond8488 已提交
215
        label = paddle.static.data(name="int64", shape=[-1, 1], dtype='int64')
216

T
typhoonzero 已提交
217 218 219
        # Train program
        model = SE_ResNeXt(layers=50)
        out = model.net(input=image, class_dim=102)
220 221 222
        cost = paddle.nn.functional.cross_entropy(
            input=out, label=label, reduction='none', use_softmax=False
        )
223

224
        avg_cost = paddle.mean(x=cost)
225 226
        acc_top1 = paddle.static.accuracy(input=out, label=label, k=1)
        acc_top5 = paddle.static.accuracy(input=out, label=label, k=5)
227

T
typhoonzero 已提交
228 229
        # Evaluator
        test_program = fluid.default_main_program().clone(for_test=True)
230

T
typhoonzero 已提交
231 232 233 234
        # Optimization
        total_images = 6149  # flowers
        epochs = [30, 60, 90]
        step = int(total_images / batch_size + 1)
235

T
typhoonzero 已提交
236 237 238
        bd = [step * e for e in epochs]
        base_lr = 0.1
        lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
239

240 241
        if not use_dgc:
            optimizer = fluid.optimizer.Momentum(
242 243 244
                learning_rate=fluid.layers.piecewise_decay(
                    boundaries=bd, values=lr
                ),
245
                momentum=0.9,
246 247
                regularization=fluid.regularizer.L2Decay(1e-4),
            )
248
        else:
249 250 251 252 253 254 255 256 257
            optimizer = (
                paddle.distributed.fleet.meta_optimizers.DGCMomentumOptimizer(
                    learning_rate=fluid.layers.piecewise_decay(
                        boundaries=bd, values=lr
                    ),
                    momentum=0.9,
                    rampup_begin_step=0,
                    regularization=fluid.regularizer.L2Decay(1e-4),
                )
258
            )
T
typhoonzero 已提交
259
        optimizer.minimize(avg_cost)
260

T
typhoonzero 已提交
261
        # Reader
262 263 264 265 266 267
        train_reader = paddle.batch(
            paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size
        )
        test_reader = paddle.batch(
            paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size
        )
268

T
typhoonzero 已提交
269
        return test_program, avg_cost, train_reader, test_reader, acc_top1, out
270 271 272


if __name__ == "__main__":
T
typhoonzero 已提交
273
    runtime_main(DistSeResneXt2x2)