test_array_read_write_op.py 4.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Y
Yu Yang 已提交
17
import unittest
18
import paddle.fluid as fluid
19 20 21 22 23
import paddle.fluid.core as core
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
from paddle.fluid.framework import default_main_program
24
import numpy
Y
Yu Yang 已提交
25 26


27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
def _test_read_write(x):
    i = layers.zeros(shape=[1], dtype='int64')
    i.stop_gradient = False
    arr = layers.array_write(x=x[0], i=i)
    i = layers.increment(x=i)
    arr = layers.array_write(x=x[1], i=i, array=arr)
    i = layers.increment(x=i)
    arr = layers.array_write(x=x[2], i=i, array=arr)

    i = layers.zeros(shape=[1], dtype='int64')
    i.stop_gradient = False
    a0 = layers.array_read(array=arr, i=i)
    i = layers.increment(x=i)
    a1 = layers.array_read(array=arr, i=i)
    i = layers.increment(x=i)
    a2 = layers.array_read(array=arr, i=i)

    mean_a0 = layers.mean(a0)
    mean_a1 = layers.mean(a1)
    mean_a2 = layers.mean(a2)

    a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])

    mean_x0 = layers.mean(x[0])
    mean_x1 = layers.mean(x[1])
    mean_x2 = layers.mean(x[2])

    x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])

    return a_sum, x_sum


Y
Yu Yang 已提交
59 60 61 62 63 64 65 66 67 68 69
class TestArrayReadWrite(unittest.TestCase):
    def test_read_write(self):
        x = [
            layers.data(
                name='x0', shape=[100]), layers.data(
                    name='x1', shape=[100]), layers.data(
                        name='x2', shape=[100])
        ]
        for each_x in x:
            each_x.stop_gradient = False

D
dzhwinter 已提交
70
        tensor = numpy.random.random(size=(100, 100)).astype('float32')
71
        a_sum, x_sum = _test_read_write(x)
D
dzhwinter 已提交
72

73 74
        place = core.CPUPlace()
        exe = Executor(place)
D
dzhwinter 已提交
75 76 77 78
        outs = exe.run(feed={'x0': tensor,
                             'x1': tensor,
                             'x2': tensor},
                       fetch_list=[a_sum, x_sum],
79
                       scope=core.Scope())
Y
Yu Yang 已提交
80 81
        self.assertEqual(outs[0], outs[1])

82 83 84
        total_sum = layers.sums(input=[a_sum, x_sum])
        total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0)

F
fengjiayi 已提交
85
        append_backward(total_sum_scaled)
86

87 88 89
        g_vars = list(
            map(default_main_program().global_block().var,
                [each_x.name + "@GRAD" for each_x in x]))
90 91
        g_out = [
            item.sum()
D
dzhwinter 已提交
92 93 94 95 96
            for item in exe.run(
                feed={'x0': tensor,
                      'x1': tensor,
                      'x2': tensor},
                fetch_list=g_vars)
97 98 99 100 101 102 103 104
        ]
        g_out_sum = numpy.array(g_out).sum()

        # since our final gradient is 1 and the neural network are all linear
        # with mean_op.
        # the input gradient should also be 1
        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
        with fluid.dygraph.guard(place):
            tensor1 = fluid.dygraph.to_variable(tensor)
            tensor2 = fluid.dygraph.to_variable(tensor)
            tensor3 = fluid.dygraph.to_variable(tensor)
            x_dygraph = [tensor1, tensor2, tensor3]
            for each_x in x_dygraph:
                each_x.stop_gradient = False
            a_sum_dygraph, x_sum_dygraph = _test_read_write(x_dygraph)
            self.assertEqual(a_sum_dygraph, x_sum_dygraph)

            total_sum_dygraph = layers.sums(
                input=[a_sum_dygraph, x_sum_dygraph])
            total_sum_scaled_dygraph = layers.scale(
                x=total_sum_dygraph, scale=1 / 6.0)
            total_sum_scaled_dygraph.backward()
            g_out_dygraph = [
                item._grad_ivar().numpy().sum() for item in x_dygraph
            ]
            g_out_sum_dygraph = numpy.array(g_out_dygraph).sum()

            self.assertAlmostEqual(1.0, g_out_sum_dygraph, delta=0.1)

Y
Yu Yang 已提交
127 128 129

if __name__ == '__main__':
    unittest.main()