test_unfold_op.py 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import math
import numpy as np
import unittest
from op_test import OpTest
F
FNRE 已提交
21 22 23
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
P
update  
phlrain 已提交
24
from paddle.fluid.framework import _test_eager_guard
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40


class TestUnfoldOp(OpTest):
    """
    This is for test on unfold Op
    """

    def init_data(self):
        self.batch_size = 3
        self.input_channels = 3
        self.input_height = 20
        self.input_width = 20
        self.kernel_sizes = [3, 3]
        self.strides = [1, 1]
        self.paddings = [1, 1, 1, 1]
        self.dilations = [1, 1]
P
update  
phlrain 已提交
41
        self.python_api = paddle.nn.functional.unfold
42 43 44 45
        input_shape = [
            self.batch_size, self.input_channels, self.input_height,
            self.input_width
        ]
46
        self.x = np.random.rand(*input_shape).astype(np.float64)
47 48 49 50 51 52 53 54 55 56 57 58 59

    def calc_unfold(self):
        output_shape = [0] * 3
        output_shape[0] = self.batch_size
        output_shape[1] = self.input_channels * self.kernel_sizes[
            0] * self.kernel_sizes[1]
        dkernel_h = self.dilations[0] * (self.kernel_sizes[0] - 1) + 1
        dkernel_w = self.dilations[1] * (self.kernel_sizes[1] - 1) + 1
        out_height = int((self.input_height + self.paddings[0] +
                          self.paddings[2] - dkernel_h) / self.strides[0]) + 1
        out_width = int((self.input_width + self.paddings[1] + self.paddings[3]
                         - dkernel_w) / self.strides[1]) + 1
        output_shape[2] = out_height * out_width
60
        output = np.zeros(output_shape).astype(np.float64)
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
        ############ calculate output ##############
        for i in range(output_shape[0]):
            for j in range(output_shape[1]):
                for k in range(output_shape[2]):
                    h_out = int(k / out_width)
                    w_out = k % out_width
                    w_offset = j % self.kernel_sizes[1]
                    h_offset = int(j /
                                   self.kernel_sizes[1]) % self.kernel_sizes[0]
                    c_in = int(j /
                               (self.kernel_sizes[0] * self.kernel_sizes[1]))
                    h_in = h_offset * self.dilations[0] + h_out * self.strides[
                        0] - self.paddings[0]
                    w_in = w_offset * self.dilations[1] + w_out * self.strides[
                        1] - self.paddings[1]
                    if (h_in>=0 and h_in<self.input_height) and \
                         (w_in>=0 and w_in<self.input_width):
                        output[i, j, k] = self.x[i, c_in, h_in, w_in]

        self.outputs = output

    def set_data(self):
        self.init_data()
        self.calc_unfold()

        self.inputs = {'X': self.x}
        self.attrs = {
            'kernel_sizes': self.kernel_sizes,
            'paddings': self.paddings,
            'dilations': self.dilations,
            'strides': self.strides
        }
        self.outputs = {'Y': self.outputs}

    def setUp(self):
        self.op_type = 'unfold'
        self.set_data()

    def test_check_output(self):
P
update  
phlrain 已提交
100
        self.check_output(check_eager=True)
101 102

    def test_check_grad(self):
P
update  
phlrain 已提交
103
        self.check_grad(['X'], 'Y', check_eager=True)
104 105


F
FNRE 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
class TestUnfoldAPI(TestUnfoldOp):
    """
    This is for test on paddle.nn.Unfold
    """

    def setUp(self):
        self.op_type = 'unfold'
        self.set_data()
        self.places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.places.append(fluid.CUDAPlace(0))

    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                input = fluid.dygraph.to_variable(self.inputs['X'])
                m = paddle.nn.Unfold(**self.attrs)
                m.eval()
                result = m(input)
                self.assertTrue(np.allclose(result.numpy(), self.outputs['Y']))

P
update  
phlrain 已提交
127 128 129 130 131 132 133 134
                with _test_eager_guard():
                    input = fluid.dygraph.to_variable(self.inputs['X'])
                    m = paddle.nn.Unfold(**self.attrs)
                    m.eval()
                    result = m(input)
                    self.assertTrue(
                        np.allclose(result.numpy(), self.outputs['Y']))

F
FNRE 已提交
135 136 137 138
    def test_info(self):
        str(paddle.nn.Unfold(**self.attrs))


139
if __name__ == '__main__':
P
update  
phlrain 已提交
140
    paddle.enable_static()
141
    unittest.main()