test_tensor_array_to_tensor.py 10.5 KB
Newer Older
L
li099 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16
import numpy as np
L
li099 已提交
17 18
import paddle.fluid as fluid
import paddle.fluid.core as core
19 20 21 22 23 24 25 26
from paddle.fluid import Program, program_guard


class TestTensorArrayToTensorError(unittest.TestCase):
    """Tensor_array_to_tensor error message enhance"""

    def test_errors(self):
        with program_guard(Program()):
27
            input_data = np.random.random((2, 4)).astype("float32")
28 29 30 31 32 33 34 35 36 37

            def test_Variable():
                fluid.layers.tensor_array_to_tensor(input=input_data)

            self.assertRaises(TypeError, test_Variable)

            def test_list_Variable():
                fluid.layers.tensor_array_to_tensor(input=[input_data])

            self.assertRaises(TypeError, test_list_Variable)
L
li099 已提交
38 39 40


class TestLoDTensorArrayConcat(unittest.TestCase):
G
Guo Sheng 已提交
41 42
    """Test case for concat mode of tensor_array_to_tensor."""

L
li099 已提交
43 44 45 46 47 48 49 50 51 52
    def setUp(self):
        self.op_type = "tensor_array_to_tensor"
        self.attrs = {"axis": 0}
        self.outputs = ["Out"]

    def test_get_set(self):
        scope = core.Scope()
        program = fluid.Program()
        block = program.global_block()

53 54
        input_arr = block.create_var(name="tmp_lod_tensor_array",
                                     type=core.VarDesc.VarType.LOD_TENSOR_ARRAY)
L
li099 已提交
55 56 57 58 59 60 61 62 63
        input_arr.persistable = True
        input_arr_var = scope.var('tmp_lod_tensor_array')
        input_tensor_array = input_arr_var.get_lod_tensor_array()
        self.assertEqual(0, len(input_tensor_array))

        cpu = core.CPUPlace()
        for i in range(10):
            t = core.LoDTensor()
            if i == 0:
64
                t.set(np.array([[i], [i]], dtype='float32'), cpu)
L
li099 已提交
65
            else:
66
                t.set(np.array([[i]], dtype='float32'), cpu)
L
li099 已提交
67 68 69 70
            input_tensor_array.append(t)

        self.assertEqual(10, len(input_tensor_array))

71
        random_grad = np.random.random_sample([11]).astype(np.float32)
L
li099 已提交
72 73 74 75 76 77

        y_out = block.create_var(name="Out")
        y_out.persistable = True
        y_out_index = block.create_var(name="OutIndex")
        y_out_index.persistable = True

78 79 80
        y_grad_arr = block.create_var(name='Out@GRAD',
                                      dtype='float32',
                                      shape=[11])
L
li099 已提交
81 82 83 84 85
        y_grad_arr.persistable = True
        y_grad = scope.var('Out@GRAD')
        y_grad_tensor = y_grad.get_tensor()
        y_grad_tensor.set(random_grad, cpu)

86 87 88 89 90 91 92
        op = block.append_op(type=self.op_type,
                             inputs={"X": input_arr},
                             outputs={
                                 "Out": y_out,
                                 "OutIndex": y_out_index
                             },
                             attrs=self.attrs)
L
li099 已提交
93

94 95
        out_grad = block.create_var(name="tmp_lod_tensor_array@GRAD",
                                    type=core.VarDesc.VarType.LOD_TENSOR_ARRAY)
L
li099 已提交
96 97
        out_grad.persistable = True

98 99
        grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
            op.desc, set(), [])
L
li099 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
        grad_op_desc = grad_op_desc_list[0]
        new_op_desc = block.desc.append_op()
        new_op_desc.copy_from(grad_op_desc)
        for var_name in grad_op_desc.output_arg_names():
            block.desc.var(var_name.encode("ascii"))

        grad_op_desc.infer_var_type(block.desc)
        grad_op_desc.infer_shape(block.desc)
        for arg in grad_op_desc.output_arg_names():
            grad_var = block.desc.find_var(arg.encode("ascii"))
            grad_var.set_dtype(core.VarDesc.VarType.FP32)

        fetch_list = []
        fetch_list.append(block.var('Out'))
        fetch_list.append(block.var('OutIndex'))

        exe = fluid.Executor(fluid.CPUPlace())
        out = exe.run(program, fetch_list=fetch_list, scope=scope)
118
        #print ("index: ", np.array(out[1]))
L
li099 已提交
119 120

        # test forward
121 122 123 124
        tensor_res = np.array(out[0])
        tensor_res_out_idx = np.array(out[1])
        tensor_gt = np.array([0] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
                             dtype='float32')
L
li099 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146

        self.assertEqual(len(tensor_res), len(tensor_gt))
        self.assertEqual(len(tensor_res_out_idx), 10)

        for i in range(len(tensor_res)):
            self.assertEqual(tensor_res[i], tensor_gt[i])

        for i in range(len(tensor_res_out_idx)):
            if i == 0:
                self.assertEqual(tensor_res_out_idx[i], 2)
            else:
                self.assertEqual(tensor_res_out_idx[i], 1)

        # test backward
        grad_tensor = scope.var('tmp_lod_tensor_array@GRAD')
        grad_tensor_array = grad_tensor.get_lod_tensor_array()

        self.assertEqual(10, len(grad_tensor_array))

        for i in range(len(grad_tensor_array)):
            if i == 0:
                self.assertEqual(
147
                    np.array(grad_tensor_array[i])[0], np.array(random_grad[i]))
L
li099 已提交
148
                self.assertEqual(
149 150
                    np.array(grad_tensor_array[i])[1],
                    np.array(random_grad[i + 1]))
L
li099 已提交
151
            if i == 1:
152 153
                self.assertEqual(np.array(grad_tensor_array[i]),
                                 np.array(random_grad[i + 1]))
L
li099 已提交
154 155


G
Guo Sheng 已提交
156 157 158 159 160 161 162
class TestLoDTensorArrayStack(unittest.TestCase):
    """Test case for stack mode of tensor_array_to_tensor."""

    def setUp(self):
        self.op_type = "tensor_array_to_tensor"
        self.attrs = {"axis": 1, "use_stack": True}
        self.inputs = [
163 164 165
            np.random.rand(2, 3, 4).astype("float32"),
            np.random.rand(2, 3, 4).astype("float32"),
            np.random.rand(2, 3, 4).astype("float32")
G
Guo Sheng 已提交
166 167
        ]
        self.outputs = [
168 169 170
            np.stack(self.inputs, axis=self.attrs["axis"]),
            np.array([x.shape[self.attrs["axis"]] for x in self.inputs],
                     dtype="int32")
G
Guo Sheng 已提交
171
        ]
172
        self.input_grads = [np.ones_like(x) for x in self.inputs]
G
Guo Sheng 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
        self.set_program()
        for var in self.program.list_vars():
            # to avoid scope clearing after execution
            var.persistable = True

    def set_program(self):
        self.program = fluid.Program()
        with fluid.program_guard(self.program):
            self.array = array = fluid.layers.create_array(dtype='float32')
            idx = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            for i, x in enumerate(self.inputs):
                x = fluid.layers.assign(x)
                fluid.layers.array_write(x, idx + i, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(
                input=array, **self.attrs)
            loss = fluid.layers.reduce_sum(output)
            fluid.backward.append_backward(loss)
        self.output_vars = [output, output_index]

    def run_check(self, executor, scope):
        executor.run(self.program, scope=scope)
        for i, output in enumerate(self.outputs):
195
            np.allclose(np.array(
196
                scope.var(self.output_vars[i].name).get_tensor()),
197 198
                        output,
                        atol=0)
G
Guo Sheng 已提交
199 200
        tensor_array_grad = scope.var(self.array.name).get_lod_tensor_array()
        for i, input_grad in enumerate(self.input_grads):
201
            np.allclose(np.array(tensor_array_grad[i]), input_grad, atol=0)
G
Guo Sheng 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217

    def test_cpu(self):
        scope = core.Scope()
        place = core.CPUPlace()
        executor = fluid.Executor(place)
        self.run_check(executor, scope)

    def test_gpu(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            scope = core.Scope()
            executor = fluid.Executor(place)
            self.run_check(executor, scope)


class TestTensorArrayToTensorAPI(unittest.TestCase):
218

219 220 221 222 223
    def _test_case(self, inp1, inp2):
        x0 = fluid.layers.assign(inp1)
        x0.stop_gradient = False
        x1 = fluid.layers.assign(inp2)
        x1.stop_gradient = False
G
Guo Sheng 已提交
224 225 226 227
        i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
        array = fluid.layers.create_array(dtype='float32')
        fluid.layers.array_write(x0, i, array)
        fluid.layers.array_write(x1, i + 1, array)
228
        output_stack, output_index_stack = fluid.layers.tensor_array_to_tensor(
G
Guo Sheng 已提交
229
            input=array, axis=1, use_stack=True)
230
        output_concat, output_index_concat = fluid.layers.tensor_array_to_tensor(
G
Guo Sheng 已提交
231
            input=array, axis=1, use_stack=False)
232 233 234
        return output_stack, output_index_stack, output_concat, output_index_concat

    def test_case(self):
235 236
        inp0 = np.random.rand(2, 3, 4).astype("float32")
        inp1 = np.random.rand(2, 3, 4).astype("float32")
237 238 239 240 241 242 243 244 245 246

        _outs_static = self._test_case(inp0, inp1)
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        outs_static = exe.run(fetch_list=list(_outs_static))

        with fluid.dygraph.guard(place):
            outs_dynamic = self._test_case(inp0, inp1)

        for s, d in zip(outs_static, outs_dynamic):
247
            np.testing.assert_array_equal(s, d.numpy())
248 249 250 251 252 253 254

    def test_while_loop_case(self):
        with fluid.dygraph.guard():
            zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
            ten = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
            array = fluid.layers.create_array(dtype='float32')
255
            inp0 = np.random.rand(2, 3, 4).astype("float32")
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
            x0 = fluid.layers.assign(inp0)
            fluid.layers.array_write(x0, zero, array)

            def cond(i, end, array):
                return fluid.layers.less_than(i, end)

            def body(i, end, array):
                prev = fluid.layers.array_read(array, i - 1)
                fluid.layers.array_write(prev, i, array)
                return i + 1, end, array

            _, _, array = fluid.layers.while_loop(cond, body, [i, ten, array])

            self.assertTrue(fluid.layers.array_length(array), 10)
            last = fluid.layers.fill_constant(shape=[1], dtype='int64', value=9)
271 272
            np.testing.assert_array_equal(
                fluid.layers.array_read(array, last).numpy(), inp0)
G
Guo Sheng 已提交
273 274


L
li099 已提交
275 276
if __name__ == '__main__':
    unittest.main()