test_pad_op.py 6.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
W
wanghaoshuang 已提交
16
import unittest
17

W
wanghaoshuang 已提交
18
import numpy as np
19
from op_test import OpTest
20 21
from test_attribute_var import UnittestBase

X
xiaoting 已提交
22
import paddle
23
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25
from paddle.fluid import Program, program_guard
W
wanghaoshuang 已提交
26 27


W
wanghaoshuang 已提交
28
class TestPadOp(OpTest):
W
wanghaoshuang 已提交
29
    def setUp(self):
W
wanghaoshuang 已提交
30
        self.initTestCase()
31
        self.dtype = self.get_dtype()
W
wanghaoshuang 已提交
32
        self.op_type = "pad"
33 34 35
        self.inputs = {
            'X': np.random.random(self.shape).astype(self.dtype),
        }
W
wanghaoshuang 已提交
36
        self.attrs = {}
W
wanghaoshuang 已提交
37 38
        self.attrs['paddings'] = np.array(self.paddings).flatten()
        self.attrs['pad_value'] = self.pad_value
W
wanghaoshuang 已提交
39
        self.outputs = {
40 41 42 43 44 45
            'Out': np.pad(
                self.inputs['X'],
                self.paddings,
                mode='constant',
                constant_values=self.pad_value,
            )
W
wanghaoshuang 已提交
46 47
        }

48
    def get_dtype(self):
49
        return np.float64
50

W
wanghaoshuang 已提交
51 52 53 54
    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
55
        self.check_grad(['X'], 'Out')
W
wanghaoshuang 已提交
56

W
wanghaoshuang 已提交
57 58 59
    def initTestCase(self):
        self.shape = (16, 16)
        self.paddings = [(0, 1), (2, 3)]
Y
Yang Yang(Tony) 已提交
60
        self.pad_value = 0.0
W
wanghaoshuang 已提交
61 62 63 64


class TestCase1(TestPadOp):
    def initTestCase(self):
65
        self.shape = (2, 3, 4, 5)
W
wanghaoshuang 已提交
66 67 68 69 70 71
        self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)]
        self.pad_value = 0.5


class TestCase2(TestPadOp):
    def initTestCase(self):
Z
zhupengyang 已提交
72
        self.shape = (5, 5, 5)
W
wanghaoshuang 已提交
73
        self.paddings = [(0, 0), (0, 0), (1, 2)]
Y
Yang Yang(Tony) 已提交
74
        self.pad_value = 1.0
W
wanghaoshuang 已提交
75 76 77 78


class TestCase3(TestPadOp):
    def initTestCase(self):
79
        self.shape = 100
W
wanghaoshuang 已提交
80 81 82
        self.paddings = [(0, 1)]
        self.pad_value = 0.9

W
wanghaoshuang 已提交
83

84
# ----------------Pad Fp16----------------
85 86 87


def create_test_fp16(parent):
88 89 90
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
    class TestPadFp16(parent):
        def get_dtype(self):
            return np.float16

        def test_check_grad_normal(self):
            self.check_grad(['X'], 'Out', max_relative_error=0.3)

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
    TestPadFp16.__name__ = cls_name
    globals()[cls_name] = TestPadFp16


create_test_fp16(TestPadOp)
create_test_fp16(TestCase1)
create_test_fp16(TestCase2)
create_test_fp16(TestCase3)

108 109 110 111 112 113 114

class TestPadOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            input_data = np.random.random((2, 2)).astype("float32")

            def test_Variable():
115
                paddle.nn.functional.pad(x=input_data, pad=[1, 1, 1, 1])
116 117 118 119

            self.assertRaises(TypeError, test_Variable)

            data = fluid.data(name='data', shape=[4], dtype='float16')
120
            paddle.nn.functional.pad(x=data, pad=[0, 1])
121 122


123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
class TestPaddingValueTensor(UnittestBase):
    def init_info(self):
        self.shapes = [[2, 4]]
        self.save_path = os.path.join(self.temp_dir.name, self.path_prefix())

    def test_static(self):
        main_prog = Program()
        starup_prog = Program()
        with program_guard(main_prog, starup_prog):
            fc = paddle.nn.Linear(4, 10)
            x = paddle.randn([2, 4])
            x.stop_gradient = False
            feat = fc(x)  # [2,3,10]

            out = self.call_func(feat)

            sgd = paddle.optimizer.SGD()
            sgd.minimize(paddle.mean(out))
            self.assertTrue(self.var_prefix() in str(main_prog))

            exe = paddle.static.Executor()
            exe.run(starup_prog)
            res = exe.run(fetch_list=[feat, out])
146
            gt = np.pad(res[0], [1, 1], 'constant', constant_values=[1.0, 1.0])
147
            np.testing.assert_allclose(res[1], gt)
148 149 150
            paddle.static.save_inference_model(
                self.save_path, [x], [feat, out], exe
            )
151 152
            # Test for Inference Predictor
            infer_outs = self.infer_prog()
153 154 155
            gt = np.pad(
                infer_outs[0], [1, 1], 'constant', constant_values=[1.0, 1.0]
            )
156 157 158 159 160 161 162 163 164 165
            np.testing.assert_allclose(infer_outs[1], gt)

    def path_prefix(self):
        return 'padding_value'

    def var_prefix(self):
        return "Var["

    def call_func(self, x):
        padding_value = paddle.assign([1.0])
166 167 168
        out = paddle.nn.functional.pad(
            x, pad=[1, 1, 1, 1], value=padding_value, mode='constant'
        )
169 170 171 172 173 174 175
        return out


class TestPaddingValueTensor2(TestPaddingValueTensor):
    def call_func(self, x):
        padding_value = paddle.assign([1.0])
        # test for int value
176 177
        tmp = paddle.nn.functional.pad(x, pad=[1, 1, 1, 1], value=1)
        out = paddle.nn.functional.pad(x, pad=[1, 1, 1, 1], value=padding_value)
178 179 180
        return out


181 182 183 184 185 186 187 188 189
class TestPaddingValueTensor3(unittest.TestCase):
    def test_static(self):
        np_x = np.random.random((16, 16)).astype('float32')
        main_prog = Program()
        starup_prog = Program()
        with program_guard(main_prog, starup_prog):
            x = paddle.assign(np_x).astype('float32')
            pad_value = paddle.assign([0.0]).astype('float64')
            y = paddle.nn.functional.pad(x, [0, 1, 2, 3], value=pad_value)
190 191 192 193
            loss = y.sum()
            optimize_ops, params_grads = paddle.optimizer.SGD(0.01).minimize(
                loss
            )
194 195

        exe = paddle.static.Executor(paddle.CPUPlace())
196 197
        res = exe.run(main_prog, fetch_list=[y] + [g for p, g in params_grads])
        pd_out = res[0]
198 199 200 201
        np_out = np.pad(np_x, [(0, 1), (2, 3)], constant_values=0.0)
        np.testing.assert_allclose(pd_out, np_out)


W
wanghaoshuang 已提交
202
if __name__ == '__main__':
203
    paddle.enable_static()
W
wanghaoshuang 已提交
204
    unittest.main()