test_shrink_rnn_memory.py 4.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yang Yu 已提交
15
import unittest
Q
Qiao Longfei 已提交
16 17 18
import paddle.v2.fluid.core as core
from paddle.v2.fluid.executor import Executor
import paddle.v2.fluid.layers as layers
F
fengjiayi 已提交
19
from paddle.v2.fluid.backward import append_backward
20 21 22
from paddle.v2.fluid.framework import default_main_program, switch_main_program
from paddle.v2.fluid.framework import Program
import numpy as np
Y
Yang Yu 已提交
23

Y
Yu Yang 已提交
24

25 26 27 28
class TestShrinkRNNMemoryBase(unittest.TestCase):
    def setUp(self):
        self.main_program = Program()
        switch_main_program(self.main_program)
F
fengjiayi 已提交
29
        x = layers.data('x', shape=[100], dtype='float32')
Y
Yang Yu 已提交
30
        x.stop_gradient = False
31 32 33
        rank_table_tensor = layers.data(
            'rank_table_tensor', shape=[1], dtype='float32', lod_level=1)
        table = layers.lod_rank_table(x=rank_table_tensor)
Y
Yang Yu 已提交
34
        i = layers.zeros(dtype='int64', shape=[1])
35
        self.mem1 = layers.shrink_memory(x=x, i=i, table=table)
Y
Yang Yu 已提交
36 37
        i = layers.increment(x=i)
        i.stop_gradient = True
38
        self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table)
Y
Yang Yu 已提交
39 40
        i = layers.increment(x=i)
        i.stop_gradient = True
41 42 43 44 45 46 47 48 49 50 51
        self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
        mem3_mean = layers.mean(x=self.mem3)
        append_backward(loss=mem3_mean)
        self.x_grad = self.main_program.global_block().var('x@GRAD')

    def sum_lodtensor(self, tensor):
        sum_res = 0.0
        for i in xrange(np.product(tensor.get_dims())):
            sum_res += tensor.get_float_element(i)
        return sum_res

Y
Yang Yu 已提交
52

53 54
class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase):
    def test_refer_lod(self):
Y
Yang Yu 已提交
55
        cpu = core.CPUPlace()
56 57 58 59 60 61 62 63 64 65
        x_tensor = core.LoDTensor()
        x_tensor.set_lod([[0, 2, 5, 6]])
        tensor_np = np.random.random(size=(6, 100)).astype('float32')
        x_tensor.set(tensor_np, cpu)

        rank_table_tensor = core.LoDTensor()
        rank_table_tensor.set_lod([[0, 1, 3, 6]])
        rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
                              cpu)

Y
Yang Yu 已提交
66
        exe = Executor(cpu)
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
        outs = exe.run(
            feed={'x': x_tensor,
                  'rank_table_tensor': rank_table_tensor},
            fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad],
            return_numpy=False)
        self.assertTrue(np.allclose(tensor_np[0:6], outs[0]))
        self.assertTrue(np.allclose(tensor_np[0:5], outs[1]))
        self.assertTrue(np.allclose(tensor_np[0:2], outs[2]))
        self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01)


class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase):
    def test_no_lod(self):
        cpu = core.CPUPlace()
        x_tensor = core.LoDTensor()
        tensor_np = np.random.random(size=(3, 100)).astype('float32')
        x_tensor.set(tensor_np, cpu)

        rank_table_tensor = core.LoDTensor()
        rank_table_tensor.set_lod([[0, 1, 3, 6]])
        rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
                              cpu)

        exe = Executor(cpu)
        outs = exe.run(
            feed={'x': x_tensor,
                  'rank_table_tensor': rank_table_tensor},
            fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad],
            return_numpy=False)
        self.assertTrue(np.allclose(tensor_np[0:3], outs[0]))
        self.assertTrue(np.allclose(tensor_np[0:2], outs[1]))
        self.assertTrue(np.allclose(tensor_np[0:1], outs[2]))
        self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01)
Y
Yang Yu 已提交
100 101 102 103


if __name__ == '__main__':
    unittest.main()