test_if_else_op.py 9.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import paddle
18
import paddle.fluid.layers as layers
19
from paddle.fluid.framework import Program, program_guard
20 21 22
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import MomentumOptimizer
import paddle.fluid.core as core
23
import paddle.fluid as fluid
24 25 26 27
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
from paddle.fluid.layers.control_flow import ConditionalBlock

Y
Yu Yang 已提交
28 29 30
import unittest
import numpy as np

P
pangyoki 已提交
31 32
paddle.enable_static()

Y
Yu Yang 已提交
33 34

class TestMNISTIfElseOp(unittest.TestCase):
35 36
    # FIXME: https://github.com/PaddlePaddle/Paddle/issues/12245#issuecomment-406462379
    def not_test_raw_api(self):
37 38 39 40
        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')
Y
Yu Yang 已提交
41

42
            label = layers.data(name='y', shape=[1], dtype='int64')
Y
Yu Yang 已提交
43

44
            limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
45
            cond = layers.less_than(x=label, y=limit)
46
            true_image, false_image = split_lod_tensor(input=image, mask=cond)
Y
Yu Yang 已提交
47

48
            true_out = layers.create_tensor(dtype='float32')
49
            true_cond = ConditionalBlock([cond])
Y
Yu Yang 已提交
50

51 52 53 54
            with true_cond.block():
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                layers.assign(input=prob, output=true_out)
Y
Yu Yang 已提交
55

56
            false_out = layers.create_tensor(dtype='float32')
57
            false_cond = ConditionalBlock([cond])
Y
Yu Yang 已提交
58

59 60 61 62
            with false_cond.block():
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                layers.assign(input=prob, output=false_out)
Y
Yu Yang 已提交
63

64 65 66 67
            prob = merge_lod_tensor(in_true=true_out,
                                    in_false=false_out,
                                    mask=cond,
                                    x=image)
68
            loss = layers.cross_entropy(input=prob, label=label)
69
            avg_loss = paddle.mean(loss)
Y
Yu Yang 已提交
70

71 72
            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
Y
Yu Yang 已提交
73

74 75 76
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=10)
Y
Yu Yang 已提交
77 78 79 80

        place = core.CPUPlace()
        exe = Executor(place)

81
        exe.run(startup_prog)
Y
Yu Yang 已提交
82 83 84
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
85 86
                x_data = np.array([x[0] for x in data]).astype("float32")
                y_data = np.array([x[1] for x in data]).astype("int64")
Y
Yu Yang 已提交
87 88
                y_data = np.expand_dims(y_data, axis=1)

89
                outs = exe.run(prog,
90 91 92 93
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
D
dzhwinter 已提交
94
                               fetch_list=[avg_loss])
95
                print(outs[0])
Y
Yu Yang 已提交
96 97 98 99
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)

100 101
    # FIXME: https://github.com/PaddlePaddle/Paddle/issues/12245#issuecomment-406462379
    def not_test_ifelse(self):
102 103 104 105 106 107 108
        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')

            label = layers.data(name='y', shape=[1], dtype='int64')

109
            limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
            cond = layers.less_than(x=label, y=limit)
            ie = layers.IfElse(cond)

            with ie.true_block():
                true_image = ie.input(image)
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            with ie.false_block():
                false_image = ie.input(image)
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            prob = ie()
            loss = layers.cross_entropy(input=prob[0], label=label)
127
            avg_loss = paddle.mean(loss)
128 129 130

            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
131 132 133
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)
Y
Yu Yang 已提交
134 135 136 137

        place = core.CPUPlace()
        exe = Executor(place)

138
        exe.run(startup_prog)
Y
Yu Yang 已提交
139 140 141
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
142 143
                x_data = np.array([x[0] for x in data]).astype("float32")
                y_data = np.array([x[1] for x in data]).astype("int64")
D
dzhwinter 已提交
144
                y_data = y_data.reshape((y_data.shape[0], 1))
Y
Yu Yang 已提交
145

146
                outs = exe.run(prog,
147 148 149 150
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
D
dzhwinter 已提交
151
                               fetch_list=[avg_loss])
152
                print(outs[0])
Y
Yu Yang 已提交
153 154 155 156 157
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)


158
class TestIfElse(unittest.TestCase):
159

160 161 162 163 164
    def set_test_case(self):
        # condiction is: self.data < self.cond_value
        self.cond_value = 0.5
        self.data = np.random.rand(25, 1).astype(np.float32)

165 166 167 168 169 170 171
    def numpy_cal(self):
        s1 = self.data[np.where(self.data < self.cond_value)]
        res = np.sum(np.exp(s1))
        s2 = self.data[np.where(self.data >= self.cond_value)]
        res += np.sum(np.tanh(s2))
        return res

172 173 174 175 176 177 178
    def compare_ifelse_op_and_numpy(self, place):
        self.set_test_case()

        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            src = layers.data(name='data', shape=[1], dtype='float32')
179 180 181
            cond = layers.fill_constant([1],
                                        dtype='float32',
                                        value=self.cond_value)
182 183 184 185
            ifcond = layers.less_than(x=src, y=cond)
            ie = layers.IfElse(ifcond)
            with ie.true_block():
                true_target = ie.input(src)
186
                true_target = fluid.layers.exp(true_target)
187 188 189 190
                ie.output(true_target)

            with ie.false_block():
                false_target = ie.input(src)
191
                false_target = fluid.layers.tanh(false_target)
192 193
                ie.output(false_target)
            if_out = ie()
194
            out = layers.reduce_sum(if_out[0])
195 196 197 198 199 200 201

            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            fetch_list = [out]
            o1, = exe.run(fluid.default_main_program(),
                          feed={'data': self.data},
                          fetch_list=[out])
202 203
            o2 = self.numpy_cal()

204
            self.assertTrue(
205
                np.allclose(o1, o2, atol=1e-8),
206 207 208 209 210 211 212 213 214 215 216 217
                "IfElse result : " + str(o1) + "\n Numpy result :" + str(o2))

    def test_cpu(self):
        self.compare_ifelse_op_and_numpy(fluid.CPUPlace())

    def test_cuda(self):
        if not core.is_compiled_with_cuda():
            return
        self.compare_ifelse_op_and_numpy(fluid.CUDAPlace(0))


class TestIfElseTrueBranch(TestIfElse):
218

219 220 221 222 223 224 225
    def set_test_case(self):
        # condiction is: self.data < self.cond_value
        self.cond_value = 10.
        self.data = np.random.rand(25, 1).astype(np.float32)


class TestIfElseFalseBranch(TestIfElse):
226

227 228 229 230 231 232
    def set_test_case(self):
        # condiction is: self.data < self.cond_value
        self.cond_value = -10.
        self.data = np.random.rand(25, 1).astype(np.float32)


233
class TestIfElseError(unittest.TestCase):
234

235 236 237 238 239
    def test_input_type_error(self):
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            src = layers.data(name='data', shape=[1], dtype='float32')
240 241 242
            const_value = layers.fill_constant([1],
                                               dtype='float32',
                                               value=123.0)
243 244 245 246 247 248 249 250 251 252 253 254 255 256
            ifcond = layers.less_than(x=src, y=const_value)
            with self.assertRaises(TypeError):
                ie = layers.IfElse(set())
            with self.assertRaises(TypeError):
                ie = layers.IfElse(ifcond, set())

            with self.assertRaises(TypeError):
                ie = layers.IfElse(ifcond)
                with ie.true_block():
                    true_target = ie.input(src)
                    true_target = fluid.layers.exp(true_target)
                    ie.output([])


Y
Yu Yang 已提交
257
if __name__ == '__main__':
258
    unittest.main()