test_fold_op.py 8.1 KB
Newer Older
X
xiaoting 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import numpy as np
import unittest
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core

paddle.enable_static()


class TestFoldOp(OpTest):
    """
    This is for test on fold Op
    """

    def init_data(self):
        self.batch_size = 3
        self.input_channels = 3 * 2 * 2
        self.length = 12
        self.kernel_sizes = [2, 2]
        self.strides = [1, 1]
        self.paddings = [0, 0, 0, 0]
        self.dilations = [1, 1]
        self.output_sizes = [4, 5]
        input_shape = [self.batch_size, self.input_channels, self.length]
        self.x = np.random.rand(*input_shape).astype(np.float64)

    def calc_fold(self):
        output_shape = [0] * 4
        output_shape[0] = self.batch_size
        output_shape[1] = int(self.input_channels /
                              (self.kernel_sizes[0] * self.kernel_sizes[1]))
        output_shape[2] = self.output_sizes[0]
        output_shape[3] = self.output_sizes[1]
        dkernel_h = self.dilations[0] * (self.kernel_sizes[0] - 1) + 1
        dkernel_w = self.dilations[1] * (self.kernel_sizes[1] - 1) + 1
        col_height = int((self.output_sizes[0] + self.paddings[0] +
                          self.paddings[2] - dkernel_h) / self.strides[0]) + 1
        col_width = int((self.output_sizes[1] + self.paddings[1] +
                         self.paddings[3] - dkernel_w) / self.strides[1]) + 1
        output = np.zeros(output_shape).astype(np.float64)
        ############ calculate output ##############
        for b in range(output_shape[0]):
            for c in range(self.input_channels):
                w_offset = int(c % self.kernel_sizes[1])
                h_offset = int(
                    (c / self.kernel_sizes[1]) % self.kernel_sizes[0])
                c_out = int(c / self.kernel_sizes[0] / self.kernel_sizes[1])
                for h in range(col_height):
                    h_out = int(h * self.strides[0] - self.paddings[0] +
                                h_offset * self.dilations[0])
                    for w in range(col_width):
                        w_out = int(w * self.strides[1] - self.paddings[1] +
                                    w_offset * self.dilations[1])
                        if (h_out >= 0 and h_out < self.output_sizes[0]) and (
                                w_out >= 0 and w_out < self.output_sizes[1]):
72 73
                            output[b, c_out, h_out,
                                   w_out] += self.x[b, c, w + col_width * h]
X
xiaoting 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91

        self.outputs = output

    def set_data(self):
        self.init_data()
        self.calc_fold()
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)}
        self.attrs = {
            'kernel_sizes': self.kernel_sizes,
            'paddings': self.paddings,
            'dilations': self.dilations,
            'strides': self.strides,
            'output_sizes': self.output_sizes
        }
        self.outputs = {'Y': self.outputs}

    def setUp(self):
        self.op_type = 'fold'
X
xiaoting 已提交
92
        self.python_api = paddle.nn.functional.fold
X
xiaoting 已提交
93 94 95
        self.set_data()

    def test_check_output(self):
X
xiaoting 已提交
96
        self.check_output(check_eager=True)
X
xiaoting 已提交
97 98

    def test_check_grad(self):
X
xiaoting 已提交
99
        self.check_grad(['X'], 'Y', check_eager=True)
X
xiaoting 已提交
100 101 102 103 104 105 106 107


class TestFoldAPI(TestFoldOp):

    #This is for test on paddle.nn.Fold

    def setUp(self):
        self.op_type = 'fold'
X
xiaoting 已提交
108
        self.python_api = paddle.nn.functional.fold
X
xiaoting 已提交
109 110 111 112 113 114 115 116 117 118 119 120
        self.set_data()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def test_api(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                input = paddle.to_tensor(self.x)
                m = paddle.nn.Fold(**self.attrs)
                m.eval()
                result = m(input)
121 122 123
                np.testing.assert_allclose(result.numpy(),
                                           self.outputs['Y'],
                                           rtol=1e-05)
X
xiaoting 已提交
124 125 126 127 128 129

    def test_info(self):
        str(paddle.nn.Fold(**self.attrs))


class TestFoldOpError(unittest.TestCase):
130

X
xiaoting 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
    def test_errors(self):
        from paddle.nn.functional import fold
        from paddle.fluid.framework import Program, program_guard
        with program_guard(Program(), Program()):

            def test_input_shape():
                # input_shpae must be 3-D
                x = paddle.randn(shape=[2, 3, 6, 7], dtype="float32")
                out = fold(x, output_sizes=[2, 3], kernel_sizes=[2, 2])

            def test_kernel_shape():
                # kernel_size must be 2
                x = paddle.randn(shape=[2, 6, 6], dtype="float32")
                out = fold(x, output_sizes=[2, 3], kernel_sizes=[2, 2, 3])

            def test_padding_shape():
                # padding_size must be 2 or 4
                x = paddle.randn(shape=[2, 6, 6], dtype="float32")
149 150 151 152
                out = fold(x,
                           output_sizes=[2, 3],
                           kernel_sizes=[2, 2],
                           paddings=[2, 2, 3])
X
xiaoting 已提交
153 154

            def test_dilations_shape():
155
                # dialtions_size must be 2
X
xiaoting 已提交
156
                x = paddle.randn(shape=[2, 6, 6], dtype="float32")
157 158 159 160
                out = fold(x,
                           output_sizes=[2, 3],
                           kernel_sizes=[2, 2],
                           dilations=[2, 2, 3])
X
xiaoting 已提交
161 162 163 164

            def test_strides_shape():
                # strids_size must be 2
                x = paddle.randn(shape=[2, 6, 6], dtype="float32")
165 166 167 168
                out = fold(x,
                           output_sizes=[2, 3],
                           kernel_sizes=[2, 2],
                           strides=[2, 2, 3])
X
xiaoting 已提交
169 170 171 172

            def test_output_size():
                # im_h * im_w must be L
                x = paddle.randn(shape=[2, 6, 6], dtype="float32")
173 174 175 176
                out = fold(x,
                           output_sizes=[6, 6],
                           kernel_sizes=[2, 2],
                           strides=[1, 1])
X
xiaoting 已提交
177

X
xiaoting 已提交
178 179 180
            def test_output_size_2():
                # out_size must GT 1
                x = paddle.randn(shape=[2, 6, 6], dtype="float32")
181 182 183 184
                out = fold(x,
                           output_sizes=[0.1, 0.2],
                           kernel_sizes=[2, 2],
                           strides=[1, 1])
X
xiaoting 已提交
185

X
xiaoting 已提交
186 187 188
            def test_block_h_w():
                # test_block_h_w GT 0
                x = paddle.randn(shape=[2, 1, 1], dtype="float32")
189 190 191 192
                out = fold(x,
                           output_sizes=[1, 1],
                           kernel_sizes=[2, 2],
                           strides=1)
X
xiaoting 已提交
193 194 195

            def test_GT_0():
                x = paddle.randn(shape=[2, 1, 1], dtype="float32")
196 197 198 199 200 201
                out = fold(x,
                           output_sizes=[0, 0],
                           kernel_sizes=[0, 0],
                           dilations=0,
                           paddings=[0, 0],
                           strides=0)
X
xiaoting 已提交
202 203 204 205 206 207 208

            self.assertRaises(AssertionError, test_input_shape)
            self.assertRaises(AssertionError, test_kernel_shape)
            self.assertRaises(ValueError, test_padding_shape)
            self.assertRaises(AssertionError, test_dilations_shape)
            self.assertRaises(AssertionError, test_strides_shape)
            self.assertRaises(ValueError, test_output_size)
209
            self.assertRaises(TypeError, test_output_size_2)
X
xiaoting 已提交
210 211 212 213 214 215
            self.assertRaises(ValueError, test_block_h_w)
            self.assertRaises(ValueError, test_GT_0)


if __name__ == '__main__':
    unittest.main()