test_inplace_abn_op.py 3.9 KB
Newer Older
K
Kaipeng Deng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
K
Kaipeng Deng 已提交
16
import unittest
17

K
Kaipeng Deng 已提交
18
import numpy as np
19

H
hong 已提交
20
import paddle
21 22
import paddle.fluid as fluid
import paddle.fluid.core as core
K
Kaipeng Deng 已提交
23 24 25 26


class TestInplaceANBOpTraining(unittest.TestCase):
    def setUp(self):
27
        self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
K
Kaipeng Deng 已提交
28 29 30 31 32 33
        self.N = 4
        self.C = 5
        self.H = 7
        self.W = 9
        self.dshape = [self.N, self.C, self.H, self.W]

34 35 36 37 38 39 40 41 42 43 44
    def build_program(
        self,
        place,
        layout,
        seed,
        only_forward=False,
        activation="identity",
        alpha=1.0,
        use_cuda=False,
        inplace=False,
    ):
K
Kaipeng Deng 已提交
45 46 47 48 49 50
        main = fluid.Program()
        startup = fluid.Program()
        main.random_seed = seed
        startup.random_seed = seed
        with fluid.unique_name.guard():
            with fluid.program_guard(main, startup):
G
GGBond8488 已提交
51
                data = paddle.static.data(
52 53 54 55
                    name='input',
                    shape=self.dshape,
                    dtype=self.dtype,
                )
G
GGBond8488 已提交
56 57
                data.stop_gradient = False
                data.desc.set_need_check_feed(False)
C
ccrrong 已提交
58

59
                bn = paddle.static.nn.batch_norm(
C
ccrrong 已提交
60 61 62 63 64 65 66 67 68 69 70 71 72
                    data,
                    param_attr=fluid.ParamAttr(name='bn_scale'),
                    bias_attr=fluid.ParamAttr(name='bn_bias'),
                    moving_mean_name='bn_moving_mean',
                    moving_variance_name='bn_moving_variance',
                    data_layout=layout,
                    is_test=only_forward,
                    in_place=inplace,
                )
                if activation == 'leaky_relu':
                    bn = paddle.nn.functional.leaky_relu(bn, alpha)
                if activation == 'elu':
                    bn = paddle.nn.functional.elu(bn, alpha)
K
Kaipeng Deng 已提交
73 74

                # NOTE: in inplace mode input and output of bn
75
                # may have same name, multiply 1. to generate
K
Kaipeng Deng 已提交
76
                # a new Variable for fetch
77
                bn = bn * 1.0
78
                sigmoid = paddle.nn.functional.sigmoid(bn)
79
                out = paddle.sum(sigmoid)
K
Kaipeng Deng 已提交
80 81 82 83 84 85 86 87
                if not only_forward:
                    sgd_opt = fluid.optimizer.SGD(learning_rate=0.0)
                    sgd_opt.backward(out)
        return main, startup, [out, bn]

    def test_all_branches(self):
        seed = 10
        os.environ['FLAGS_cudnn_deterministic'] = "1"
88
        data = np.random.random(size=self.dshape).astype(self.dtype) * 4.0 - 2
K
Kaipeng Deng 已提交
89 90 91 92 93 94 95 96
        use_cudas = [False, True] if core.is_compiled_with_cuda() else [False]
        alpha = 0.1
        layouts = ["NCHW", "NHWC"]
        for use_cuda in use_cudas:
            place = core.CUDAPlace(0) if use_cuda else core.CPUPlace()
            for layout in layouts:
                for activation in ['identity', 'leaky_relu']:
                    main, startup, outs = self.build_program(
97 98 99 100 101 102 103
                        place,
                        layout,
                        seed,
                        False,
                        activation,
                        alpha,
                        use_cuda,
C
ccrrong 已提交
104
                        False,
105
                    )
K
Kaipeng Deng 已提交
106 107 108 109 110 111
                    exe = fluid.Executor(place)
                    exe.run(startup)
                    exe.run(program=main, feed={'input': data})


if __name__ == '__main__':
H
hong 已提交
112
    paddle.enable_static()
K
Kaipeng Deng 已提交
113
    unittest.main()