test_array_read_write_op.py 2.7 KB
Newer Older
Y
Yu Yang 已提交
1
import unittest
Q
Qiao Longfei 已提交
2 3 4 5 6
import paddle.v2.fluid.core as core
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.backward import append_backward_ops
from paddle.v2.fluid.framework import g_main_program
7
import numpy
Y
Yu Yang 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22


class TestArrayReadWrite(unittest.TestCase):
    def test_read_write(self):
        x = [
            layers.data(
                name='x0', shape=[100]), layers.data(
                    name='x1', shape=[100]), layers.data(
                        name='x2', shape=[100])
        ]

        for each_x in x:
            each_x.stop_gradient = False

        i = layers.zeros(shape=[1], dtype='int64')
Y
Yang Yu 已提交
23
        i.stop_gradient = False
Y
Yu Yang 已提交
24
        arr = layers.array_write(x=x[0], i=i)
25
        i = layers.increment(x=i)
Y
Yu Yang 已提交
26
        arr = layers.array_write(x=x[1], i=i, array=arr)
27
        i = layers.increment(x=i)
Y
Yu Yang 已提交
28 29 30
        arr = layers.array_write(x=x[2], i=i, array=arr)

        i = layers.zeros(shape=[1], dtype='int64')
Y
Yang Yu 已提交
31
        i.stop_gradient = False
Y
Yu Yang 已提交
32
        a0 = layers.array_read(array=arr, i=i)
33
        i = layers.increment(x=i)
Y
Yu Yang 已提交
34
        a1 = layers.array_read(array=arr, i=i)
35
        i = layers.increment(x=i)
Y
Yu Yang 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
        a2 = layers.array_read(array=arr, i=i)

        mean_a0 = layers.mean(x=a0)
        mean_a1 = layers.mean(x=a1)
        mean_a2 = layers.mean(x=a2)

        a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])

        mean_x0 = layers.mean(x=x[0])
        mean_x1 = layers.mean(x=x[1])
        mean_x2 = layers.mean(x=x[2])

        x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])

        scope = core.Scope()
        cpu = core.CPUPlace()

        exe = Executor(cpu)

D
dzhwinter 已提交
55 56 57 58 59 60 61
        tensor = numpy.random.random(size=(100, 100)).astype('float32')

        outs = exe.run(feed={'x0': tensor,
                             'x1': tensor,
                             'x2': tensor},
                       fetch_list=[a_sum, x_sum],
                       scope=scope)
Y
Yu Yang 已提交
62 63
        self.assertEqual(outs[0], outs[1])

64 65 66 67 68 69 70 71 72
        total_sum = layers.sums(input=[a_sum, x_sum])
        total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0)

        append_backward_ops(total_sum_scaled)

        g_vars = map(g_main_program.global_block().var,
                     [each_x.name + "@GRAD" for each_x in x])
        g_out = [
            item.sum()
D
dzhwinter 已提交
73 74 75 76 77
            for item in exe.run(
                feed={'x0': tensor,
                      'x1': tensor,
                      'x2': tensor},
                fetch_list=g_vars)
78 79 80 81 82 83 84 85
        ]
        g_out_sum = numpy.array(g_out).sum()

        # since our final gradient is 1 and the neural network are all linear
        # with mean_op.
        # the input gradient should also be 1
        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)

Y
Yu Yang 已提交
86 87 88

if __name__ == '__main__':
    unittest.main()