test_seq_pool.py 6.0 KB
Newer Older
D
dzhwinter 已提交
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19
import unittest
import numpy as np
from op_test import OpTest


20 21 22
class TestSeqAvgPool(OpTest):
    def set_data(self):
        self.op_type = 'sequence_pool'
23
        # one level, batch size is 4
24
        x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
25
        lod = [[0, 4, 5, 8, 11]]
26
        self.inputs = {'X': (x, lod)}
27

28
        out = np.zeros((4, 23)).astype('float32')
29
        self.outputs = {'Out': out}
30
        return x, lod, out
31

32
    def compute(self, x, lod, out):
D
dzhwinter 已提交
33
        self.attrs = {'pooltype': "AVERAGE"}
34 35 36 37
        for i in range(4):
            sub_x = x[lod[0][i]:lod[0][i + 1], :]
            out[i] = sub_x.mean(axis=0)

38
    def setUp(self):
39 40
        x, lod, out = self.set_data()
        self.compute(x, lod, out)
41 42 43 44 45

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
46 47 48
        # Remove MaxIndex after check_grad is refined.
        self.outputs['MaxIndex'] = \
            np.zeros(self.outputs['Out'].shape).astype('int32')
49 50 51
        self.check_grad(["X"], "Out")


52 53 54
class TestSeqAvgPool2D(TestSeqAvgPool):
    def set_data(self):
        self.op_type = 'sequence_pool'
55 56 57
        # one level, batch size is 4
        x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
        lod = [[0, 4, 5, 8, 13]]
58
        self.inputs = {'X': (x, lod)}
59 60

        out = np.zeros((4, 3, 17)).astype('float32')
61
        self.outputs = {'Out': out}
62
        return x, lod, out
63

64
    def compute(self, x, lod, out):
D
dzhwinter 已提交
65
        self.attrs = {'pooltype': "AVERAGE"}
66 67 68 69 70
        for i in range(4):
            sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
            out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))


71
class TestSeqSumPool(TestSeqAvgPool):
72
    def compute(self, x, lod, out):
D
dzhwinter 已提交
73
        self.attrs = {'pooltype': "SUM"}
74 75 76
        for i in range(4):
            sub_x = x[lod[0][i]:lod[0][i + 1], :]
            out[i] = sub_x.sum(axis=0)
77

78 79

class TestSeqSumPool2D(TestSeqAvgPool2D):
80
    def compute(self, x, lod, out):
D
dzhwinter 已提交
81
        self.attrs = {'pooltype': "SUM"}
82 83 84
        for i in range(4):
            sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
            out[i] = np.reshape(sub_x.sum(axis=0), (3, 17))
85 86


L
Luo Tao 已提交
87
class TestSeqSqrtPool(TestSeqAvgPool):
88
    def compute(self, x, lod, out):
D
dzhwinter 已提交
89
        self.attrs = {'pooltype': "SQRT"}
L
Luo Tao 已提交
90 91 92 93 94 95 96
        for i in range(4):
            sub_x = x[lod[0][i]:lod[0][i + 1], :]
            len = lod[0][i + 1] - lod[0][i]
            out[i] = sub_x.sum(axis=0) / np.sqrt(len)


class TestSeqSqrtPool2D(TestSeqAvgPool2D):
97
    def compute(self, x, lod, out):
D
dzhwinter 已提交
98
        self.attrs = {'pooltype': "SQRT"}
L
Luo Tao 已提交
99 100 101 102 103 104
        for i in range(4):
            sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
            len = lod[0][i + 1] - lod[0][i]
            out[i] = np.reshape(sub_x.sum(axis=0) / np.sqrt(len), (3, 17))

    def test_check_grad(self):
105 106 107
        # Remove MaxIndex after check_grad is refined.
        self.outputs['MaxIndex'] = \
            np.zeros(self.outputs['Out'].shape).astype('int32')
L
Luo Tao 已提交
108 109 110
        self.check_grad(["X"], "Out", max_relative_error=0.06)


L
Luo Tao 已提交
111
class TestSeqMaxPool(TestSeqAvgPool):
112 113 114 115 116 117 118 119 120 121 122 123 124 125
    def set_data(self):
        self.op_type = 'sequence_pool'
        x = np.random.uniform(0.1, 1, [13, 23]).astype('float32')
        lod = [[0, 4, 5, 8, 13]]
        for i in range(4):
            l = lod[0][i + 1] - lod[0][i]
            x[lod[0][i] + np.random.randint(l), :] += 2.0

        self.inputs = {'X': (x, lod)}

        out = np.zeros((4, 23)).astype('float32')
        self.outputs = {'Out': out}
        return x, lod, out

126
    def compute(self, x, lod, out):
D
dzhwinter 已提交
127
        self.attrs = {'pooltype': "MAX"}
L
Luo Tao 已提交
128 129 130 131 132 133
        for i in range(4):
            sub_x = x[lod[0][i]:lod[0][i + 1], :]
            out[i] = np.amax(sub_x, axis=0)


class TestSeqMaxPool2D(TestSeqAvgPool2D):
134 135 136 137 138 139 140 141 142 143 144 145 146
    def set_data(self):
        self.op_type = 'sequence_pool'
        x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32')
        lod = [[0, 4, 5, 8, 13]]
        self.inputs = {'X': (x, lod)}
        for i in range(4):
            l = lod[0][i + 1] - lod[0][i]
            x[lod[0][i] + np.random.randint(l), :] += 1.0

        out = np.zeros((4, 3, 11)).astype('float32')
        self.outputs = {'Out': out}
        return x, lod, out

147
    def compute(self, x, lod, out):
D
dzhwinter 已提交
148
        self.attrs = {'pooltype': "MAX"}
L
Luo Tao 已提交
149
        for i in range(4):
150 151
            sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 11))
            out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11))
L
Luo Tao 已提交
152 153


L
Luo Tao 已提交
154
class TestSeqLastPool(TestSeqAvgPool):
155
    def compute(self, x, lod, out):
D
dzhwinter 已提交
156
        self.attrs = {'pooltype': "LAST"}
L
Luo Tao 已提交
157 158 159 160 161 162
        for i in range(4):
            sub_x = x[lod[0][i]:lod[0][i + 1], :]
            out[i] = sub_x[-1, :]


class TestSeqLastPool2D(TestSeqAvgPool2D):
163
    def compute(self, x, lod, out):
D
dzhwinter 已提交
164
        self.attrs = {'pooltype': "LAST"}
L
Luo Tao 已提交
165 166 167 168 169 170
        for i in range(4):
            sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
            out[i] = np.reshape(sub_x[-1, :], (3, 17))


class TestSeqFirstPool(TestSeqAvgPool):
171
    def compute(self, x, lod, out):
D
dzhwinter 已提交
172
        self.attrs = {'pooltype': "FIRST"}
L
Luo Tao 已提交
173 174 175 176 177 178
        for i in range(4):
            sub_x = x[lod[0][i]:lod[0][i + 1], :]
            out[i] = sub_x[0, :]


class TestSeqFirstPool2D(TestSeqAvgPool2D):
179
    def compute(self, x, lod, out):
D
dzhwinter 已提交
180
        self.attrs = {'pooltype': "FIRST"}
L
Luo Tao 已提交
181 182 183 184 185
        for i in range(4):
            sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
            out[i] = np.reshape(sub_x[0, :], (3, 17))


186 187
if __name__ == '__main__':
    unittest.main()