test_if_else_op.py 9.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import paddle
16
import paddle.fluid.layers as layers
17
from paddle.fluid.framework import Program, program_guard
18 19 20
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import MomentumOptimizer
import paddle.fluid.core as core
21
import paddle.fluid as fluid
22 23 24 25
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
from paddle.fluid.layers.control_flow import ConditionalBlock

Y
Yu Yang 已提交
26 27 28
import unittest
import numpy as np

P
pangyoki 已提交
29 30
paddle.enable_static()

Y
Yu Yang 已提交
31 32

class TestMNISTIfElseOp(unittest.TestCase):
33 34
    # FIXME: https://github.com/PaddlePaddle/Paddle/issues/12245#issuecomment-406462379
    def not_test_raw_api(self):
35 36 37 38
        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')
Y
Yu Yang 已提交
39

40
            label = layers.data(name='y', shape=[1], dtype='int64')
Y
Yu Yang 已提交
41

42
            limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
43
            cond = layers.less_than(x=label, y=limit)
44
            true_image, false_image = split_lod_tensor(input=image, mask=cond)
Y
Yu Yang 已提交
45

46
            true_out = layers.create_tensor(dtype='float32')
47
            true_cond = ConditionalBlock([cond])
Y
Yu Yang 已提交
48

49 50 51 52
            with true_cond.block():
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                layers.assign(input=prob, output=true_out)
Y
Yu Yang 已提交
53

54
            false_out = layers.create_tensor(dtype='float32')
55
            false_cond = ConditionalBlock([cond])
Y
Yu Yang 已提交
56

57 58 59 60
            with false_cond.block():
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                layers.assign(input=prob, output=false_out)
Y
Yu Yang 已提交
61

62 63 64 65
            prob = merge_lod_tensor(in_true=true_out,
                                    in_false=false_out,
                                    mask=cond,
                                    x=image)
66
            loss = layers.cross_entropy(input=prob, label=label)
67
            avg_loss = paddle.mean(loss)
Y
Yu Yang 已提交
68

69 70
            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
Y
Yu Yang 已提交
71

72 73 74
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=10)
Y
Yu Yang 已提交
75 76 77 78

        place = core.CPUPlace()
        exe = Executor(place)

79
        exe.run(startup_prog)
Y
Yu Yang 已提交
80 81 82
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
83 84
                x_data = np.array([x[0] for x in data]).astype("float32")
                y_data = np.array([x[1] for x in data]).astype("int64")
Y
Yu Yang 已提交
85 86
                y_data = np.expand_dims(y_data, axis=1)

87
                outs = exe.run(prog,
88 89 90 91
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
D
dzhwinter 已提交
92
                               fetch_list=[avg_loss])
93
                print(outs[0])
Y
Yu Yang 已提交
94 95 96 97
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)

98 99
    # FIXME: https://github.com/PaddlePaddle/Paddle/issues/12245#issuecomment-406462379
    def not_test_ifelse(self):
100 101 102 103 104 105 106
        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')

            label = layers.data(name='y', shape=[1], dtype='int64')

107
            limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
            cond = layers.less_than(x=label, y=limit)
            ie = layers.IfElse(cond)

            with ie.true_block():
                true_image = ie.input(image)
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            with ie.false_block():
                false_image = ie.input(image)
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            prob = ie()
            loss = layers.cross_entropy(input=prob[0], label=label)
125
            avg_loss = paddle.mean(loss)
126 127 128

            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
129 130 131
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)
Y
Yu Yang 已提交
132 133 134 135

        place = core.CPUPlace()
        exe = Executor(place)

136
        exe.run(startup_prog)
Y
Yu Yang 已提交
137 138 139
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
140 141
                x_data = np.array([x[0] for x in data]).astype("float32")
                y_data = np.array([x[1] for x in data]).astype("int64")
D
dzhwinter 已提交
142
                y_data = y_data.reshape((y_data.shape[0], 1))
Y
Yu Yang 已提交
143

144
                outs = exe.run(prog,
145 146 147 148
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
D
dzhwinter 已提交
149
                               fetch_list=[avg_loss])
150
                print(outs[0])
Y
Yu Yang 已提交
151 152 153 154 155
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)


156
class TestIfElse(unittest.TestCase):
157

158 159 160 161 162
    def set_test_case(self):
        # condiction is: self.data < self.cond_value
        self.cond_value = 0.5
        self.data = np.random.rand(25, 1).astype(np.float32)

163 164 165 166 167 168 169
    def numpy_cal(self):
        s1 = self.data[np.where(self.data < self.cond_value)]
        res = np.sum(np.exp(s1))
        s2 = self.data[np.where(self.data >= self.cond_value)]
        res += np.sum(np.tanh(s2))
        return res

170 171 172 173 174 175 176
    def compare_ifelse_op_and_numpy(self, place):
        self.set_test_case()

        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            src = layers.data(name='data', shape=[1], dtype='float32')
177 178 179
            cond = layers.fill_constant([1],
                                        dtype='float32',
                                        value=self.cond_value)
180 181 182 183
            ifcond = layers.less_than(x=src, y=cond)
            ie = layers.IfElse(ifcond)
            with ie.true_block():
                true_target = ie.input(src)
184
                true_target = fluid.layers.exp(true_target)
185 186 187 188
                ie.output(true_target)

            with ie.false_block():
                false_target = ie.input(src)
189
                false_target = fluid.layers.tanh(false_target)
190 191
                ie.output(false_target)
            if_out = ie()
192
            out = layers.reduce_sum(if_out[0])
193 194 195 196 197 198 199

            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            fetch_list = [out]
            o1, = exe.run(fluid.default_main_program(),
                          feed={'data': self.data},
                          fetch_list=[out])
200 201
            o2 = self.numpy_cal()

202 203 204 205 206 207
            np.testing.assert_allclose(
                o1,
                o2,
                rtol=1e-05,
                atol=1e-08,
            )
208 209 210 211 212 213 214 215 216 217 218

    def test_cpu(self):
        self.compare_ifelse_op_and_numpy(fluid.CPUPlace())

    def test_cuda(self):
        if not core.is_compiled_with_cuda():
            return
        self.compare_ifelse_op_and_numpy(fluid.CUDAPlace(0))


class TestIfElseTrueBranch(TestIfElse):
219

220 221 222 223 224 225 226
    def set_test_case(self):
        # condiction is: self.data < self.cond_value
        self.cond_value = 10.
        self.data = np.random.rand(25, 1).astype(np.float32)


class TestIfElseFalseBranch(TestIfElse):
227

228 229 230 231 232 233
    def set_test_case(self):
        # condiction is: self.data < self.cond_value
        self.cond_value = -10.
        self.data = np.random.rand(25, 1).astype(np.float32)


234
class TestIfElseError(unittest.TestCase):
235

236 237 238 239 240
    def test_input_type_error(self):
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            src = layers.data(name='data', shape=[1], dtype='float32')
241 242 243
            const_value = layers.fill_constant([1],
                                               dtype='float32',
                                               value=123.0)
244 245 246 247 248 249 250 251 252 253 254 255 256 257
            ifcond = layers.less_than(x=src, y=const_value)
            with self.assertRaises(TypeError):
                ie = layers.IfElse(set())
            with self.assertRaises(TypeError):
                ie = layers.IfElse(ifcond, set())

            with self.assertRaises(TypeError):
                ie = layers.IfElse(ifcond)
                with ie.true_block():
                    true_target = ie.input(src)
                    true_target = fluid.layers.exp(true_target)
                    ie.output([])


Y
Yu Yang 已提交
258
if __name__ == '__main__':
259
    unittest.main()