test_resnet.py 14.4 KB
Newer Older
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import math
16
import os
17
import tempfile
18
import time
19
import unittest
20

21
import numpy as np
22
from predictor_utils import PredictorTools
23

24
import paddle
25
from paddle import fluid
26
from paddle.fluid import core
27
from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
28
from paddle.nn import BatchNorm
29

30
SEED = 2020
31
IMAGENET1000 = 1281167
32
base_lr = 0.001
33 34
momentum_rate = 0.9
l2_decay = 1e-4
35 36
# NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout.
batch_size = 2
37
epoch_num = 1
38 39 40
place = (
    fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
)
41

42 43 44

if fluid.is_compiled_with_cuda():
    fluid.set_flags({'FLAGS_cudnn_deterministic': True})
45 46 47


def optimizer_setting(parameter_list=None):
48 49 50
    optimizer = fluid.optimizer.Momentum(
        learning_rate=base_lr,
        momentum=momentum_rate,
51
        regularization=paddle.regularizer.L2Decay(l2_decay),
52 53
        parameter_list=parameter_list,
    )
54 55 56 57

    return optimizer


58
class ConvBNLayer(paddle.nn.Layer):
59 60 61 62 63 64 65 66 67
    def __init__(
        self,
        num_channels,
        num_filters,
        filter_size,
        stride=1,
        groups=1,
        act=None,
    ):
68
        super().__init__()
69

70 71 72 73
        self._conv = paddle.nn.Conv2D(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
74 75 76 77 78
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=groups,
            bias_attr=False,
        )
79 80 81 82 83 84 85 86 87 88

        self._batch_norm = BatchNorm(num_filters, act=act)

    def forward(self, inputs):
        y = self._conv(inputs)
        y = self._batch_norm(y)

        return y


89
class BottleneckBlock(paddle.nn.Layer):
90
    def __init__(self, num_channels, num_filters, stride, shortcut=True):
91
        super().__init__()
92

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
        self.conv0 = ConvBNLayer(
            num_channels=num_channels,
            num_filters=num_filters,
            filter_size=1,
            act='relu',
        )
        self.conv1 = ConvBNLayer(
            num_channels=num_filters,
            num_filters=num_filters,
            filter_size=3,
            stride=stride,
            act='relu',
        )
        self.conv2 = ConvBNLayer(
            num_channels=num_filters,
            num_filters=num_filters * 4,
            filter_size=1,
            act=None,
        )
112 113

        if not shortcut:
114 115 116 117 118 119
            self.short = ConvBNLayer(
                num_channels=num_channels,
                num_filters=num_filters * 4,
                filter_size=1,
                stride=stride,
            )
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134

        self.shortcut = shortcut

        self._num_channels_out = num_filters * 4

    def forward(self, inputs):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)
        conv2 = self.conv2(conv1)

        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)

135
        y = paddle.add(x=short, y=conv2)
136

137 138 139
        layer_helper = fluid.layer_helper.LayerHelper(
            self.full_name(), act='relu'
        )
140 141 142
        return layer_helper.append_activation(y)


143
class ResNet(paddle.nn.Layer):
144
    def __init__(self, layers=50, class_dim=102):
145
        super().__init__()
146 147 148

        self.layers = layers
        supported_layers = [50, 101, 152]
149 150 151 152 153
        assert (
            layers in supported_layers
        ), "supported layers are {} but input layer is {}".format(
            supported_layers, layers
        )
154 155 156 157 158 159 160 161 162 163

        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_channels = [64, 256, 512, 1024]
        num_filters = [64, 128, 256, 512]

164 165 166
        self.conv = ConvBNLayer(
            num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu'
        )
167
        self.pool2d_max = paddle.nn.MaxPool2D(kernel_size=3, stride=2)
168 169 170 171 172 173 174

        self.bottleneck_block_list = []
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
175 176 177 178 179 180 181 182 183
                    BottleneckBlock(
                        num_channels=num_channels[block]
                        if i == 0
                        else num_filters[block] * 4,
                        num_filters=num_filters[block],
                        stride=2 if i == 0 and block != 0 else 1,
                        shortcut=shortcut,
                    ),
                )
184 185
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True
W
wangzhen38 已提交
186
        self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(1)
187 188 189 190 191

        self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1

        stdv = 1.0 / math.sqrt(2048 * 1.0)

192
        self.out = paddle.nn.Linear(
193 194
            self.pool2d_avg_output,
            class_dim,
195
            weight_attr=fluid.param_attr.ParamAttr(
196
                initializer=paddle.nn.initializer.Uniform(-stdv, stdv)
197 198
            ),
        )
199

200
    def forward(self, inputs):
201 202 203 204 205
        y = self.conv(inputs)
        y = self.pool2d_max(y)
        for bottleneck_block in self.bottleneck_block_list:
            y = bottleneck_block(y)
        y = self.pool2d_avg(y)
206
        y = paddle.reshape(y, shape=[-1, self.pool2d_avg_output])
207
        pred = self.out(y)
208
        pred = paddle.nn.functional.softmax(pred)
209

210 211
        return pred

212

213 214 215 216 217 218
def reader_decorator(reader):
    def __reader__():
        for item in reader():
            img = np.array(item[0]).astype('float32').reshape(3, 224, 224)
            label = np.array(item[1]).astype('int64').reshape(1)
            yield img, label
219

220
    return __reader__
221

222

223 224 225 226 227 228 229
class ResNetHelper:
    def __init__(self):
        self.temp_dir = tempfile.TemporaryDirectory()
        self.model_save_dir = os.path.join(self.temp_dir.name, 'inference')
        self.model_save_prefix = os.path.join(self.model_save_dir, 'resnet')
        self.model_filename = 'resnet' + INFER_MODEL_SUFFIX
        self.params_filename = 'resnet' + INFER_PARAMS_SUFFIX
230 231 232
        self.dy_state_dict_save_path = os.path.join(
            self.temp_dir.name, 'resnet.dygraph'
        )
233 234 235 236 237 238

    def __del__(self):
        self.temp_dir.cleanup()

    def train(self, to_static, build_strategy=None):
        """
239
        Tests model decorated by `dygraph_to_static_output` in static graph mode. For users, the model is defined in dygraph mode and trained in static graph mode.
240 241 242 243 244 245
        """
        with fluid.dygraph.guard(place):
            np.random.seed(SEED)
            paddle.seed(SEED)
            paddle.framework.random._manual_program_seed(SEED)

246 247 248 249 250 251 252 253
            train_reader = paddle.batch(
                reader_decorator(paddle.dataset.flowers.train(use_xmap=False)),
                batch_size=batch_size,
                drop_last=True,
            )
            data_loader = fluid.io.DataLoader.from_generator(
                capacity=5, iterable=True
            )
254 255 256 257
            data_loader.set_sample_list_generator(train_reader)

            resnet = ResNet()
            if to_static:
258 259 260
                resnet = paddle.jit.to_static(
                    resnet, build_strategy=build_strategy
                )
261 262 263 264 265 266 267 268 269 270 271 272 273
            optimizer = optimizer_setting(parameter_list=resnet.parameters())

            for epoch in range(epoch_num):
                total_loss = 0.0
                total_acc1 = 0.0
                total_acc5 = 0.0
                total_sample = 0

                for batch_id, data in enumerate(data_loader()):
                    start_time = time.time()
                    img, label = data

                    pred = resnet(img)
274 275 276 277 278 279
                    loss = paddle.nn.functional.cross_entropy(
                        input=pred,
                        label=label,
                        reduction='none',
                        use_softmax=False,
                    )
280
                    avg_loss = paddle.mean(x=loss)
281
                    acc_top1 = paddle.static.accuracy(
282 283
                        input=pred, label=label, k=1
                    )
284
                    acc_top5 = paddle.static.accuracy(
285 286
                        input=pred, label=label, k=5
                    )
287 288 289 290 291 292 293 294 295 296 297 298

                    avg_loss.backward()
                    optimizer.minimize(avg_loss)
                    resnet.clear_gradients()

                    total_loss += avg_loss
                    total_acc1 += acc_top1
                    total_acc5 += acc_top5
                    total_sample += 1

                    end_time = time.time()
                    if batch_id % 2 == 0:
299 300 301 302 303 304 305 306 307 308 309
                        print(
                            "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f"
                            % (
                                epoch,
                                batch_id,
                                total_loss.numpy() / total_sample,
                                total_acc1.numpy() / total_sample,
                                total_acc5.numpy() / total_sample,
                                end_time - start_time,
                            )
                        )
310 311
                    if batch_id == 10:
                        if to_static:
312
                            paddle.jit.save(resnet, self.model_save_prefix)
313
                        else:
314
                            paddle.save(
315
                                resnet.state_dict(),
316
                                self.dy_state_dict_save_path + '.pdparams',
317
                            )
318 319 320 321 322 323 324
                        # avoid dataloader throw abort signaal
                        data_loader._reset()
                        break

        return total_loss.numpy()

    def predict_dygraph(self, data):
R
Ryan 已提交
325
        paddle.jit.enable_to_static(False)
326 327 328
        with fluid.dygraph.guard(place):
            resnet = ResNet()

329
            model_dict = paddle.load(self.dy_state_dict_save_path + '.pdparams')
330 331 332 333 334 335 336 337 338 339
            resnet.set_dict(model_dict)
            resnet.eval()

            pred_res = resnet(fluid.dygraph.to_variable(data))

            return pred_res.numpy()

    def predict_static(self, data):
        paddle.enable_static()
        exe = fluid.Executor(place)
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
        [
            inference_program,
            feed_target_names,
            fetch_targets,
        ] = fluid.io.load_inference_model(
            self.model_save_dir,
            executor=exe,
            model_filename=self.model_filename,
            params_filename=self.params_filename,
        )

        pred_res = exe.run(
            inference_program,
            feed={feed_target_names[0]: data},
            fetch_list=fetch_targets,
        )
356 357 358 359 360

        return pred_res[0]

    def predict_dygraph_jit(self, data):
        with fluid.dygraph.guard(place):
361
            resnet = paddle.jit.load(self.model_save_prefix)
362 363 364 365 366 367 368
            resnet.eval()

            pred_res = resnet(data)

            return pred_res.numpy()

    def predict_analysis_inference(self, data):
369 370 371 372 373 374 375
        output = PredictorTools(
            self.model_save_dir,
            self.model_filename,
            self.params_filename,
            [data],
        )
        (out,) = output()
376
        return out
377 378


379
class TestResnet(unittest.TestCase):
380 381 382
    def setUp(self):
        self.resnet_helper = ResNetHelper()

383
    def train(self, to_static):
R
Ryan 已提交
384
        paddle.jit.enable_to_static(to_static)
385
        return self.resnet_helper.train(to_static)
386 387 388

    def verify_predict(self):
        image = np.random.random([1, 3, 224, 224]).astype('float32')
389 390 391 392
        dy_pre = self.resnet_helper.predict_dygraph(image)
        st_pre = self.resnet_helper.predict_static(image)
        dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image)
        predictor_pre = self.resnet_helper.predict_analysis_inference(image)
393 394 395 396
        np.testing.assert_allclose(
            dy_pre,
            st_pre,
            rtol=1e-05,
397
            err_msg=f'dy_pre:\n {dy_pre}\n, st_pre: \n{st_pre}.',
398
        )
399 400 401 402 403
        np.testing.assert_allclose(
            dy_jit_pre,
            st_pre,
            rtol=1e-05,
            err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
404 405 406
                dy_jit_pre, st_pre
            ),
        )
407 408 409 410 411
        np.testing.assert_allclose(
            predictor_pre,
            st_pre,
            rtol=1e-05,
            err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format(
412 413 414
                predictor_pre, st_pre
            ),
        )
415 416 417 418

    def test_resnet(self):
        static_loss = self.train(to_static=True)
        dygraph_loss = self.train(to_static=False)
419 420 421 422 423
        np.testing.assert_allclose(
            static_loss,
            dygraph_loss,
            rtol=1e-05,
            err_msg='static_loss: {} \n dygraph_loss: {}'.format(
424 425 426
                static_loss, dygraph_loss
            ),
        )
427
        self.verify_predict()
428

429 430
    def test_resnet_composite_backward(self):
        core._set_prim_backward_enabled(True)
431
        static_loss = self.train(to_static=True)
432
        core._set_prim_backward_enabled(False)
433 434 435 436 437 438 439 440 441 442
        dygraph_loss = self.train(to_static=True)
        np.testing.assert_allclose(
            static_loss,
            dygraph_loss,
            rtol=1e-05,
            err_msg='static_loss: {} \n dygraph_loss: {}'.format(
                static_loss, dygraph_loss
            ),
        )

443
    def test_resnet_composite_forward_backward(self):
C
cyber-pioneer 已提交
444 445 446 447 448 449 450 451 452 453 454 455
        core._set_prim_all_enabled(True)
        static_loss = self.train(to_static=True)
        core._set_prim_all_enabled(False)
        dygraph_loss = self.train(to_static=True)
        np.testing.assert_allclose(
            static_loss,
            dygraph_loss,
            rtol=1e-02,
            err_msg='static_loss: {} \n dygraph_loss: {}'.format(
                static_loss, dygraph_loss
            ),
        )
456

457 458 459 460 461 462 463 464
    def test_in_static_mode_mkldnn(self):
        fluid.set_flags({'FLAGS_use_mkldnn': True})
        try:
            if paddle.fluid.core.is_compiled_with_mkldnn():
                self.resnet_helper.train(to_static=True)
        finally:
            fluid.set_flags({'FLAGS_use_mkldnn': False})

465

466
if __name__ == '__main__':
467
    unittest.main()