test_unpool1d_op.py 6.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.nn.functional as F

paddle.enable_static()
paddle.seed(2022)


def _unpool_output_size(x, kernel_size, stride, padding, output_size):
    input_size = x.shape
    default_size = []
    for d in range(len(kernel_size)):
31 32
        default_size.append((input_size[-len(kernel_size) + d] - 1) *
                            stride[d] + kernel_size[d] - 2 * padding[d])
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
    if output_size is None:
        ret = default_size
    else:
        ret = output_size
    return ret


def unpool1dmax_forward_naive(input, indices, ksize, strides, paddings,
                              output_size):
    s0, s1, s2 = input.shape
    output_size = _unpool_output_size(input, ksize, strides, paddings,
                                      output_size)
    out_lsize = output_size[0]
    out = np.zeros((s0, s1, out_lsize))
    for nidx in range(s0):
        for cidx in range(s1):
            for l in range(s2):
                index = indices[nidx, cidx, l]
                lidx = index % out_lsize
                out[nidx, cidx, lidx] = input[nidx, cidx, l]

    return out


class TestUnpool1DOpAPI_dygraph(unittest.TestCase):
58

59 60 61 62 63 64 65 66
    def test_case(self):
        places = [paddle.CPUPlace()]
        if paddle.fluid.core.is_compiled_with_cuda():
            places.append(paddle.CUDAPlace(0))
        for place in places:
            paddle.disable_static()
            input_data = np.random.rand(1, 3, 16)
            input_x = paddle.to_tensor(input_data)
67 68 69 70 71 72 73 74
            output, indices = F.max_pool1d(input_x,
                                           kernel_size=2,
                                           stride=2,
                                           return_mask=True)
            output_unpool = F.max_unpool1d(output,
                                           indices,
                                           kernel_size=2,
                                           stride=2)
75 76 77 78 79 80 81 82 83
            expected_output_unpool = unpool1dmax_forward_naive(
                output.numpy(), indices.numpy(), [2], [2], [0], [16])
            self.assertTrue(
                np.allclose(output_unpool.numpy(), expected_output_unpool))

        paddle.enable_static()


class TestUnpool1DOpAPI_dygraph2(unittest.TestCase):
84

85 86 87 88 89 90 91 92
    def test_case(self):
        places = [paddle.CPUPlace()]
        if paddle.fluid.core.is_compiled_with_cuda():
            places.append(paddle.CUDAPlace(0))
        for place in places:
            paddle.disable_static()
            input_data = np.random.rand(1, 3, 16)
            input_x = paddle.to_tensor(input_data)
93 94 95 96 97 98 99 100
            output, indices = F.max_pool1d(input_x,
                                           kernel_size=2,
                                           stride=2,
                                           return_mask=True)
            output_unpool = F.max_unpool1d(output,
                                           indices,
                                           kernel_size=2,
                                           stride=None)
101 102 103 104 105 106 107 108 109
            expected_output_unpool = unpool1dmax_forward_naive(
                output.numpy(), indices.numpy(), [2], [2], [0], [16])
            self.assertTrue(
                np.allclose(output_unpool.numpy(), expected_output_unpool))

        paddle.enable_static()


class TestUnpool1DOpAPI_dygraph3(unittest.TestCase):
110

111 112 113 114 115 116 117 118
    def test_case(self):
        places = [paddle.CPUPlace()]
        if paddle.fluid.core.is_compiled_with_cuda():
            places.append(paddle.CUDAPlace(0))
        for place in places:
            paddle.disable_static()
            input_data = np.random.rand(1, 3, 16)
            input_x = paddle.to_tensor(input_data)
119 120 121
            Pool1d = paddle.nn.MaxPool1D(kernel_size=2,
                                         stride=2,
                                         return_mask=True)
122 123 124 125 126 127 128 129 130 131 132 133 134
            UnPool1d = paddle.nn.MaxUnPool1D(kernel_size=2, stride=2)

            output, indices = Pool1d(input_x)
            output_unpool = UnPool1d(output, indices)
            expected_output_unpool = unpool1dmax_forward_naive(
                output.numpy(), indices.numpy(), [2], [2], [0], [16])
            self.assertTrue(
                np.allclose(output_unpool.numpy(), expected_output_unpool))

        paddle.enable_static()


class TestUnpool1DOpAPI_static(unittest.TestCase):
135

136 137 138 139 140 141 142 143 144 145 146
    def test_case(self):
        paddle.enable_static()
        places = [paddle.CPUPlace()]
        if paddle.fluid.core.is_compiled_with_cuda():
            places.append(paddle.CUDAPlace(0))
        for place in places:
            with paddle.static.program_guard(paddle.static.Program(),
                                             paddle.static.Program()):

                input_data = np.array([[[1, 2, 3, 4], [5, 6, 7, 8],
                                        [9, 10, 11, 12]]]).astype("float32")
147 148 149 150 151 152 153 154 155 156 157
                x = paddle.fluid.data(name='x',
                                      shape=[1, 3, 4],
                                      dtype='float32')
                output, indices = F.max_pool1d(x,
                                               kernel_size=2,
                                               stride=2,
                                               return_mask=True)
                output_unpool = F.max_unpool1d(output,
                                               indices,
                                               kernel_size=2,
                                               stride=None)
158 159 160 161 162 163

                exe = paddle.fluid.Executor(place)
                fetches = exe.run(paddle.fluid.default_main_program(),
                                  feed={"x": input_data},
                                  fetch_list=[output_unpool],
                                  return_numpy=True)
164 165 166 167
                pool1d_out_np = np.array([[[2., 4.], [6., 8.],
                                           [10., 12.]]]).astype("float32")
                indices_np = np.array([[[1, 3], [1, 3], [1,
                                                         3]]]).astype("int32")
168 169 170 171 172 173 174
                expected_output_unpool = unpool1dmax_forward_naive(
                    pool1d_out_np, indices_np, [2], [2], [0], [4])
                self.assertTrue(np.allclose(fetches[0], expected_output_unpool))


if __name__ == '__main__':
    unittest.main()