test_while_op.py 8.2 KB
Newer Older
C
chengduoZH 已提交
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yang Yang(Tony) 已提交
15
import unittest
L
Leo Chen 已提交
16
import paddle
17 18 19
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
20
import paddle.fluid as fluid
21
from paddle.fluid.backward import append_backward
Y
Yang Yang(Tony) 已提交
22 23
import numpy

24 25
paddle.enable_static()

Y
Yang Yang(Tony) 已提交
26 27

class TestWhileOp(unittest.TestCase):
28

29
    def simple_net(self):
30 31 32 33 34 35 36 37 38 39 40 41
        d0 = layers.data("d0",
                         shape=[10],
                         append_batch_size=False,
                         dtype='float32')
        d1 = layers.data("d1",
                         shape=[10],
                         append_batch_size=False,
                         dtype='float32')
        d2 = layers.data("d2",
                         shape=[10],
                         append_batch_size=False,
                         dtype='float32')
Y
Yang Yang(Tony) 已提交
42 43 44
        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True
        init = layers.zeros(shape=[10], dtype='float32')
Y
Yang Yang(Tony) 已提交
45
        mem_array = layers.array_write(x=init, i=i)
Y
Yang Yang(Tony) 已提交
46 47 48 49 50 51 52
        data_array = layers.array_write(x=d0, i=i)
        i = layers.increment(i)
        layers.array_write(d1, i, array=data_array)
        i = layers.increment(i)
        layers.array_write(d2, i, array=data_array)
        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True
C
chengduoZH 已提交
53
        array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
Y
Yang Yang(Tony) 已提交
54
        array_len.stop_gradient = True
Y
Yang Yang(Tony) 已提交
55
        cond = layers.less_than(x=i, y=array_len)
C
chengduoZH 已提交
56 57 58 59 60
        j = layers.fill_constant(shape=[1], dtype='int64', value=1)
        j.stop_gradient = True
        array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
        array_len2.stop_gradient = True
        cond2 = layers.less_than(x=j, y=array_len2)
Y
Yang Yang(Tony) 已提交
61
        while_op = layers.While(cond=cond)
C
chengduoZH 已提交
62
        while_op2 = layers.While(cond=cond2)
Y
Yang Yang(Tony) 已提交
63 64 65 66
        with while_op.block():
            d = layers.array_read(array=data_array, i=i)
            prev = layers.array_read(array=mem_array, i=i)
            result = layers.sums(input=[d, prev])
Y
Yang Yang(Tony) 已提交
67 68

            i = layers.increment(x=i, in_place=True)
Y
Yang Yang(Tony) 已提交
69 70
            layers.array_write(result, i=i, array=mem_array)
            layers.less_than(x=i, y=array_len, cond=cond)
Y
Yang Yang(Tony) 已提交
71

C
chengduoZH 已提交
72 73 74 75 76 77 78 79 80
            with while_op2.block():
                d2 = layers.array_read(array=data_array, i=j)
                prev2 = layers.array_read(array=mem_array, i=j)
                result2 = layers.sums(input=[d2, prev2])

                j = layers.increment(x=j, in_place=True)
                layers.array_write(result2, i=j, array=mem_array)
                layers.less_than(x=j, y=array_len2, cond=cond2)
        sum_result = layers.array_read(array=mem_array, i=j)
81
        loss = paddle.mean(sum_result)
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
        return loss, sum_result

    def test_simple_net(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            loss, sum_result = self.simple_net()

            append_backward(loss)

            cpu = core.CPUPlace()
            exe = Executor(cpu)
            d = []

            for i in range(3):
                d.append(numpy.random.random(size=[10]).astype('float32'))

99 100 101 102 103
            outs = exe.run(feed={
                'd0': d[0],
                'd1': d[1],
                'd2': d[2]
            },
104 105
                           fetch_list=[sum_result])
            self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
Y
Yang Yang(Tony) 已提交
106

107 108 109 110 111 112
    def test_simple_net_forward(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            self.simple_net()
            binary = fluid.compiler.CompiledProgram(main_program)
Y
Yang Yang(Tony) 已提交
113

114 115 116
            cpu = core.CPUPlace()
            exe = Executor(cpu)
            d = []
Y
Yang Yang(Tony) 已提交
117

118 119
            for i in range(3):
                d.append(numpy.random.random(size=[10]).astype('float32'))
Y
Yang Yang(Tony) 已提交
120

121 122
            for _ in range(2):
                exe.run(binary, feed={'d0': d[0], 'd1': d[1], 'd2': d[2]})
Y
Yang Yang(Tony) 已提交
123

124 125 126 127 128 129 130 131 132 133
    def test_exceptions(self):
        i = layers.zeros(shape=[2], dtype='int64')
        array_len = layers.fill_constant(shape=[2], dtype='int64', value=1)
        cond = layers.less_than(x=i, y=array_len)
        with self.assertRaises(TypeError):
            layers.While(cond=cond)
        cond = layers.cast(cond, dtype='float64')
        with self.assertRaises(TypeError):
            layers.While(cond=cond)

Y
Yang Yang(Tony) 已提交
134

135
class BadInputTest(unittest.TestCase):
136

137 138 139 140 141 142 143 144 145 146
    def test_error(self):
        with fluid.program_guard(fluid.Program()):

            def test_bad_x():
                x = [1, 2, 3]
                fluid.layers.increment(x)

            self.assertRaises(TypeError, test_bad_x)


147
class TestIgnoreVarNameInWhile(unittest.TestCase):
148

149
    def test_ignore_var(self):
150

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
        def cond(i, ten, temp, y):
            return i < ten

        def body_func(i, ten, batch_info, origin_seq):
            print(batch_info)
            batch_info = fluid.contrib.layers.shuffle_batch(batch_info)
            print(batch_info)
            i = i + 1
            return [i, ten, batch_info, origin_seq]

        x = fluid.layers.data(name='x', shape=[-1, 1, 4])
        y = fluid.layers.data(name='y', shape=[-1, 1, 1])
        temp = layers.concat(input=[x, y], axis=-1)
        i = layers.fill_constant(shape=[1], value=0, dtype='int32')
        num = layers.fill_constant(shape=[1], value=5, dtype='int32')

        i, ten, shuffle_temp, y = layers.while_loop(cond, body_func,
                                                    [i, num, temp, y])

        output = shuffle_temp

        exe = fluid.Executor(fluid.CPUPlace())
        exe.run(fluid.default_startup_program())

        input_x = numpy.array([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]])
        input_x = input_x.reshape(3, 1, 4)
        input_y = numpy.array([[10], [12], [33]])
        input_y = input_y.reshape(3, 1, 1)

        res, = exe.run(fluid.default_main_program(),
181 182 183 184
                       feed={
                           'x': input_x,
                           'y': input_y
                       },
185 186 187 188 189
                       fetch_list=[output])

        self.assertListEqual(list(res.shape), [3, 1, 5])


190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
class TestOutputsMustExistsInputs(unittest.TestCase):

    def test_outputs_exists_inputs(self):
        """
        We guarantee that the output tensor must be in the input tensor, so that the output and input can correspond to each other, but the input can be greater than the number of outputs. It's required in paddle2onnx.
        """
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):

            def func(x):
                s = paddle.zeros([1])
                i = paddle.ones([1])
                max_len = paddle.shape(x)[0]

                def cond(i, s, x):
                    return i < max_len

                def body(i, s, x):
                    iter = x[i]
                    s += iter
                    i += 1
                    return i, s, x

                [i, s, x] = paddle.static.nn.while_loop(cond, body, [i, s, x])
                return s

            paddle.enable_static()
            x = paddle.static.data(shape=[-1], name='x')
            func(x)
        for op in main_program.block(0).ops:
            if op.type == "while":
                for out_name in op.output("Out"):
223
                    if out_name in op.input("Condition"): continue
224 225 226 227 228 229
                    self.assertTrue(
                        out_name in op.input("X"),
                        "In while op, the variable in output(`Out`) must exists in inputs(`X`), but the variable with name `{}` not meet the precondition."
                        .format(out_name))


Y
Yang Yang(Tony) 已提交
230 231
if __name__ == '__main__':
    unittest.main()