test_inplace_abn_op.py 3.9 KB
Newer Older
K
Kaipeng Deng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
import os
import paddle.fluid.core as core
import paddle.fluid as fluid
H
hong 已提交
20
import paddle
K
Kaipeng Deng 已提交
21 22 23 24


class TestInplaceANBOpTraining(unittest.TestCase):
    def setUp(self):
25
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
K
Kaipeng Deng 已提交
26 27 28 29 30 31
        self.N = 4
        self.C = 5
        self.H = 7
        self.W = 9
        self.dshape = [self.N, self.C, self.H, self.W]

32 33 34 35 36 37 38 39 40 41 42
    def build_program(
        self,
        place,
        layout,
        seed,
        only_forward=False,
        activation="identity",
        alpha=1.0,
        use_cuda=False,
        inplace=False,
    ):
K
Kaipeng Deng 已提交
43 44 45 46 47 48
        main = fluid.Program()
        startup = fluid.Program()
        main.random_seed = seed
        startup.random_seed = seed
        with fluid.unique_name.guard():
            with fluid.program_guard(main, startup):
49 50 51 52 53 54 55
                data = fluid.layers.data(
                    name='input',
                    shape=self.dshape,
                    dtype=self.dtype,
                    append_batch_size=False,
                    stop_gradient=False,
                )
C
ccrrong 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

                bn = fluid.layers.batch_norm(
                    data,
                    param_attr=fluid.ParamAttr(name='bn_scale'),
                    bias_attr=fluid.ParamAttr(name='bn_bias'),
                    moving_mean_name='bn_moving_mean',
                    moving_variance_name='bn_moving_variance',
                    data_layout=layout,
                    is_test=only_forward,
                    in_place=inplace,
                )
                if activation == 'leaky_relu':
                    bn = paddle.nn.functional.leaky_relu(bn, alpha)
                if activation == 'elu':
                    bn = paddle.nn.functional.elu(bn, alpha)
K
Kaipeng Deng 已提交
71 72

                # NOTE: in inplace mode input and output of bn
73
                # may have same name, multiply 1. to generate
K
Kaipeng Deng 已提交
74
                # a new Variable for fetch
75
                bn = bn * 1.0
76
                sigmoid = paddle.nn.functional.sigmoid(bn)
77
                out = paddle.sum(sigmoid)
K
Kaipeng Deng 已提交
78 79 80 81 82 83 84 85
                if not only_forward:
                    sgd_opt = fluid.optimizer.SGD(learning_rate=0.0)
                    sgd_opt.backward(out)
        return main, startup, [out, bn]

    def test_all_branches(self):
        seed = 10
        os.environ['FLAGS_cudnn_deterministic'] = "1"
86
        data = np.random.random(size=self.dshape).astype(self.dtype) * 4.0 - 2
K
Kaipeng Deng 已提交
87 88 89 90 91 92 93 94
        use_cudas = [False, True] if core.is_compiled_with_cuda() else [False]
        alpha = 0.1
        layouts = ["NCHW", "NHWC"]
        for use_cuda in use_cudas:
            place = core.CUDAPlace(0) if use_cuda else core.CPUPlace()
            for layout in layouts:
                for activation in ['identity', 'leaky_relu']:
                    main, startup, outs = self.build_program(
95 96 97 98 99 100 101
                        place,
                        layout,
                        seed,
                        False,
                        activation,
                        alpha,
                        use_cuda,
C
ccrrong 已提交
102
                        False,
103
                    )
K
Kaipeng Deng 已提交
104 105 106 107 108 109
                    exe = fluid.Executor(place)
                    exe.run(startup)
                    exe.run(program=main, feed={'input': data})


if __name__ == '__main__':
H
hong 已提交
110
    paddle.enable_static()
K
Kaipeng Deng 已提交
111
    unittest.main()