test_while_op.py 2.8 KB
Newer Older
D
dzhwinter 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
Y
Yang Yang(Tony) 已提交
14
import unittest
Q
Qiao Longfei 已提交
15 16 17
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
import paddle.v2.fluid.core as core
F
fengjiayi 已提交
18
from paddle.v2.fluid.backward import append_backward
Y
Yang Yang(Tony) 已提交
19 20 21 22 23 24
import numpy


class TestWhileOp(unittest.TestCase):
    def test_simple_forward(self):
        d0 = layers.data(
F
fengjiayi 已提交
25
            "d0", shape=[10], append_batch_size=False, dtype='float32')
Y
Yang Yang(Tony) 已提交
26
        d1 = layers.data(
F
fengjiayi 已提交
27
            "d1", shape=[10], append_batch_size=False, dtype='float32')
Y
Yang Yang(Tony) 已提交
28
        d2 = layers.data(
F
fengjiayi 已提交
29
            "d2", shape=[10], append_batch_size=False, dtype='float32')
Y
Yang Yang(Tony) 已提交
30 31 32
        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True
        init = layers.zeros(shape=[10], dtype='float32')
Y
Yang Yang(Tony) 已提交
33
        mem_array = layers.array_write(x=init, i=i)
Y
Yang Yang(Tony) 已提交
34 35 36 37 38 39 40 41 42 43 44 45
        data_array = layers.array_write(x=d0, i=i)

        i = layers.increment(i)
        layers.array_write(d1, i, array=data_array)

        i = layers.increment(i)
        layers.array_write(d2, i, array=data_array)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True

        array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
Y
Yang Yang(Tony) 已提交
46
        array_len.stop_gradient = True
Y
Yang Yang(Tony) 已提交
47 48 49 50 51 52 53
        cond = layers.less_than(x=i, y=array_len)

        while_op = layers.While(cond=cond)
        with while_op.block():
            d = layers.array_read(array=data_array, i=i)
            prev = layers.array_read(array=mem_array, i=i)
            result = layers.sums(input=[d, prev])
Y
Yang Yang(Tony) 已提交
54 55

            i = layers.increment(x=i, in_place=True)
Y
Yang Yang(Tony) 已提交
56 57
            layers.array_write(result, i=i, array=mem_array)
            layers.less_than(x=i, y=array_len, cond=cond)
Y
Yang Yang(Tony) 已提交
58 59 60 61

        sum_result = layers.array_read(array=mem_array, i=i)
        loss = layers.mean(x=sum_result)

F
fengjiayi 已提交
62
        append_backward(loss)
Y
Yang Yang(Tony) 已提交
63 64 65 66 67 68 69 70

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        d = []

        for i in xrange(3):
            d.append(numpy.random.random(size=[10]).astype('float32'))

D
dzhwinter 已提交
71 72 73 74
        outs = exe.run(feed={'d0': d[0],
                             'd1': d[1],
                             'd2': d[2]},
                       fetch_list=[sum_result])
Y
Yang Yang(Tony) 已提交
75 76 77 78 79
        self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)


if __name__ == '__main__':
    unittest.main()