seresnext_net.py 7.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
import paddle.fluid as fluid
17

18
fluid.core._set_eager_deletion_mode(-1, -1, False)
19 20

import paddle.fluid.layers.ops as ops
21
from paddle.fluid.layers.learning_rate_scheduler import cosine_decay
22
from simple_nets import init_data
23
from seresnext_test_base import DeviceType
24 25
import math
import os
26

27
os.environ['CPU_NUM'] = str(4)
28
os.environ['FLAGS_cudnn_deterministic'] = str(1)
29 30 31 32 33 34 35 36 37 38 39 40 41

# FIXME(zcd): If the neural net has dropout_op, the output of ParallelExecutor
# and Executor is different. Because, for ParallelExecutor, the dropout_op of
# the neural net will be copied N copies(N is the number of device). This will
# lead to the random numbers generated by ParallelExecutor and Executor are different.
# So, if we compare the loss of ParallelExecutor and Executor, we should remove the
# dropout_op.
remove_dropout = False

# FIXME(zcd): If the neural net has batch_norm, the output of ParallelExecutor
# and Executor is different.
remove_bn = False

42
remove_cudnn_conv = True
43

44 45 46 47 48 49 50 51 52
remove_dropout = True
remove_bn = True


def squeeze_excitation(input, num_channels, reduction_ratio):
    # pool = fluid.layers.pool2d(
    #    input=input, pool_size=0, pool_type='avg', global_pooling=True)
    conv = input
    shape = conv.shape
53 54
    reshape = fluid.layers.reshape(x=conv,
                                   shape=[-1, shape[1], shape[2] * shape[3]])
55 56 57 58 59 60 61 62 63 64 65 66
    pool = fluid.layers.reduce_mean(input=reshape, dim=2)

    squeeze = fluid.layers.fc(input=pool,
                              size=num_channels // reduction_ratio,
                              act='relu')
    excitation = fluid.layers.fc(input=squeeze,
                                 size=num_channels,
                                 act='sigmoid')
    scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
    return scale


67 68 69 70 71
def conv_bn_layer(input,
                  num_filters,
                  filter_size,
                  stride=1,
                  groups=1,
72
                  act=None):
73 74 75 76 77 78 79 80 81
    conv = fluid.layers.conv2d(input=input,
                               num_filters=num_filters,
                               filter_size=filter_size,
                               stride=stride,
                               padding=(filter_size - 1) // 2,
                               groups=groups,
                               act=None,
                               use_cudnn=(not remove_cudnn_conv),
                               bias_attr=False)
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
    return conv if remove_bn else fluid.layers.batch_norm(
        input=conv, act=act, momentum=0.1)


def shortcut(input, ch_out, stride):
    ch_in = input.shape[1]
    if ch_in != ch_out:
        if stride == 1:
            filter_size = 1
        else:
            filter_size = 3
        return conv_bn_layer(input, ch_out, filter_size, stride)
    else:
        return input


def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
    # The number of first 1x1 convolutional channels for each bottleneck build block
    # was halved to reduce the compution cost.
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
    conv0 = conv_bn_layer(input=input,
                          num_filters=num_filters,
                          filter_size=1,
                          act='relu')
    conv1 = conv_bn_layer(input=conv0,
                          num_filters=num_filters * 2,
                          filter_size=3,
                          stride=stride,
                          groups=cardinality,
                          act='relu')
    conv2 = conv_bn_layer(input=conv1,
                          num_filters=num_filters * 2,
                          filter_size=1,
                          act=None)
    scale = squeeze_excitation(input=conv2,
                               num_channels=num_filters * 2,
                               reduction_ratio=reduction_ratio)
118 119 120 121 122 123 124 125 126 127 128 129 130 131

    short = shortcut(input, num_filters * 2, stride)

    return fluid.layers.elementwise_add(x=short, y=scale, act='relu')


img_shape = [3, 224, 224]


def SE_ResNeXt50Small(use_feed):

    img = fluid.layers.data(name='image', shape=img_shape, dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
    conv = conv_bn_layer(input=img,
                         num_filters=16,
                         filter_size=3,
                         stride=2,
                         act='relu')
    conv = conv_bn_layer(input=conv,
                         num_filters=16,
                         filter_size=3,
                         stride=1,
                         act='relu')
    conv = conv_bn_layer(input=conv,
                         num_filters=16,
                         filter_size=3,
                         stride=1,
                         act='relu')
    conv = fluid.layers.pool2d(input=conv,
                               pool_size=3,
                               pool_stride=2,
                               pool_padding=1,
                               pool_type='max')
152 153 154 155 156 157 158 159

    cardinality = 32
    reduction_ratio = 16
    depth = [3, 4, 6, 3]
    num_filters = [128, 256, 512, 1024]

    for block in range(len(depth)):
        for i in range(depth[block]):
160 161 162 163 164
            conv = bottleneck_block(input=conv,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    cardinality=cardinality,
                                    reduction_ratio=reduction_ratio)
165 166

    shape = conv.shape
167 168
    reshape = fluid.layers.reshape(x=conv,
                                   shape=[-1, shape[1], shape[2] * shape[3]])
169 170 171 172 173 174 175 176 177 178 179 180
    pool = fluid.layers.reduce_mean(input=reshape, dim=2)
    dropout = pool if remove_dropout else fluid.layers.dropout(
        x=pool, dropout_prob=0.2, seed=1)
    # Classifier layer:
    prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax')
    loss = fluid.layers.cross_entropy(input=prediction, label=label)
    loss = fluid.layers.mean(loss)
    return loss


def optimizer(learning_rate=0.01):
    optimizer = fluid.optimizer.Momentum(
181 182 183
        learning_rate=cosine_decay(learning_rate=learning_rate,
                                   step_each_epoch=2,
                                   epochs=1),
184 185 186 187 188 189 190 191
        momentum=0.9,
        regularization=fluid.regularizer.L2Decay(1e-4))
    return optimizer


model = SE_ResNeXt50Small


192
def batch_size(use_device):
193
    if use_device == DeviceType.CUDA:
194
        # Paddle uses 8GB P4 GPU for unittest so we decreased the batch size.
195
        return 4
196
    return 12
197 198


199
def iter(use_device):
200
    if use_device == DeviceType.CUDA:
201
        return 10
202
    return 1
203 204 205


gpu_img, gpu_label = init_data(
206
    batch_size=batch_size(use_device=DeviceType.CUDA),
207 208
    img_shape=img_shape,
    label_range=999)
209 210 211
cpu_img, cpu_label = init_data(batch_size=batch_size(use_device=DeviceType.CPU),
                               img_shape=img_shape,
                               label_range=999)
212 213 214 215
feed_dict_gpu = {"image": gpu_img, "label": gpu_label}
feed_dict_cpu = {"image": cpu_img, "label": cpu_label}


216
def feed_dict(use_device):
217
    if use_device == DeviceType.CUDA:
218 219
        return feed_dict_gpu
    return feed_dict_cpu