test_pad_op.py 6.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
W
wanghaoshuang 已提交
16
import unittest
17

W
wanghaoshuang 已提交
18
import numpy as np
姜永久 已提交
19
from eager_op_test import OpTest
20 21
from test_attribute_var import UnittestBase

X
xiaoting 已提交
22
import paddle
23
import paddle.fluid as fluid
24
import paddle.fluid.core as core
25
from paddle.fluid import Program, program_guard
W
wanghaoshuang 已提交
26 27


姜永久 已提交
28 29 30 31
def pad_wrapper(x, paddings, pad_value):
    return paddle._C_ops.pad(x, paddings, float(pad_value))


W
wanghaoshuang 已提交
32
class TestPadOp(OpTest):
W
wanghaoshuang 已提交
33
    def setUp(self):
W
wanghaoshuang 已提交
34
        self.initTestCase()
35
        self.dtype = self.get_dtype()
W
wanghaoshuang 已提交
36
        self.op_type = "pad"
姜永久 已提交
37
        self.python_api = pad_wrapper
38 39 40
        self.inputs = {
            'X': np.random.random(self.shape).astype(self.dtype),
        }
W
wanghaoshuang 已提交
41
        self.attrs = {}
W
wanghaoshuang 已提交
42 43
        self.attrs['paddings'] = np.array(self.paddings).flatten()
        self.attrs['pad_value'] = self.pad_value
W
wanghaoshuang 已提交
44
        self.outputs = {
45 46 47 48 49 50
            'Out': np.pad(
                self.inputs['X'],
                self.paddings,
                mode='constant',
                constant_values=self.pad_value,
            )
W
wanghaoshuang 已提交
51 52
        }

53
    def get_dtype(self):
54
        return np.float64
55

W
wanghaoshuang 已提交
56 57 58 59
    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
60
        self.check_grad(['X'], 'Out')
W
wanghaoshuang 已提交
61

W
wanghaoshuang 已提交
62 63 64
    def initTestCase(self):
        self.shape = (16, 16)
        self.paddings = [(0, 1), (2, 3)]
Y
Yang Yang(Tony) 已提交
65
        self.pad_value = 0.0
W
wanghaoshuang 已提交
66 67 68 69


class TestCase1(TestPadOp):
    def initTestCase(self):
70
        self.shape = (2, 3, 4, 5)
W
wanghaoshuang 已提交
71 72 73 74 75 76
        self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)]
        self.pad_value = 0.5


class TestCase2(TestPadOp):
    def initTestCase(self):
Z
zhupengyang 已提交
77
        self.shape = (5, 5, 5)
W
wanghaoshuang 已提交
78
        self.paddings = [(0, 0), (0, 0), (1, 2)]
Y
Yang Yang(Tony) 已提交
79
        self.pad_value = 1.0
W
wanghaoshuang 已提交
80 81 82 83


class TestCase3(TestPadOp):
    def initTestCase(self):
84
        self.shape = 100
W
wanghaoshuang 已提交
85 86 87
        self.paddings = [(0, 1)]
        self.pad_value = 0.9

W
wanghaoshuang 已提交
88

89
# ----------------Pad Fp16----------------
90 91 92


def create_test_fp16(parent):
93 94 95
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
    class TestPadFp16(parent):
        def get_dtype(self):
            return np.float16

        def test_check_grad_normal(self):
            self.check_grad(['X'], 'Out', max_relative_error=0.3)

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
    TestPadFp16.__name__ = cls_name
    globals()[cls_name] = TestPadFp16


create_test_fp16(TestPadOp)
create_test_fp16(TestCase1)
create_test_fp16(TestCase2)
create_test_fp16(TestCase3)

113 114 115 116 117 118 119

class TestPadOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            input_data = np.random.random((2, 2)).astype("float32")

            def test_Variable():
120
                paddle.nn.functional.pad(x=input_data, pad=[1, 1, 1, 1])
121 122 123 124

            self.assertRaises(TypeError, test_Variable)

            data = fluid.data(name='data', shape=[4], dtype='float16')
125
            paddle.nn.functional.pad(x=data, pad=[0, 1])
126 127


128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
class TestPaddingValueTensor(UnittestBase):
    def init_info(self):
        self.shapes = [[2, 4]]
        self.save_path = os.path.join(self.temp_dir.name, self.path_prefix())

    def test_static(self):
        main_prog = Program()
        starup_prog = Program()
        with program_guard(main_prog, starup_prog):
            fc = paddle.nn.Linear(4, 10)
            x = paddle.randn([2, 4])
            x.stop_gradient = False
            feat = fc(x)  # [2,3,10]

            out = self.call_func(feat)

            sgd = paddle.optimizer.SGD()
            sgd.minimize(paddle.mean(out))
            self.assertTrue(self.var_prefix() in str(main_prog))

            exe = paddle.static.Executor()
            exe.run(starup_prog)
            res = exe.run(fetch_list=[feat, out])
151
            gt = np.pad(res[0], [1, 1], 'constant', constant_values=[1.0, 1.0])
152
            np.testing.assert_allclose(res[1], gt)
153 154 155
            paddle.static.save_inference_model(
                self.save_path, [x], [feat, out], exe
            )
156 157
            # Test for Inference Predictor
            infer_outs = self.infer_prog()
158 159 160
            gt = np.pad(
                infer_outs[0], [1, 1], 'constant', constant_values=[1.0, 1.0]
            )
161 162 163 164 165 166 167 168 169 170
            np.testing.assert_allclose(infer_outs[1], gt)

    def path_prefix(self):
        return 'padding_value'

    def var_prefix(self):
        return "Var["

    def call_func(self, x):
        padding_value = paddle.assign([1.0])
171 172 173
        out = paddle.nn.functional.pad(
            x, pad=[1, 1, 1, 1], value=padding_value, mode='constant'
        )
174 175 176 177 178 179 180
        return out


class TestPaddingValueTensor2(TestPaddingValueTensor):
    def call_func(self, x):
        padding_value = paddle.assign([1.0])
        # test for int value
181 182
        tmp = paddle.nn.functional.pad(x, pad=[1, 1, 1, 1], value=1)
        out = paddle.nn.functional.pad(x, pad=[1, 1, 1, 1], value=padding_value)
183 184 185
        return out


186 187 188 189 190 191 192 193 194
class TestPaddingValueTensor3(unittest.TestCase):
    def test_static(self):
        np_x = np.random.random((16, 16)).astype('float32')
        main_prog = Program()
        starup_prog = Program()
        with program_guard(main_prog, starup_prog):
            x = paddle.assign(np_x).astype('float32')
            pad_value = paddle.assign([0.0]).astype('float64')
            y = paddle.nn.functional.pad(x, [0, 1, 2, 3], value=pad_value)
195 196 197 198
            loss = y.sum()
            optimize_ops, params_grads = paddle.optimizer.SGD(0.01).minimize(
                loss
            )
199 200

        exe = paddle.static.Executor(paddle.CPUPlace())
201 202
        res = exe.run(main_prog, fetch_list=[y] + [g for p, g in params_grads])
        pd_out = res[0]
203 204 205 206
        np_out = np.pad(np_x, [(0, 1), (2, 3)], constant_values=0.0)
        np.testing.assert_allclose(pd_out, np_out)


W
wanghaoshuang 已提交
207
if __name__ == '__main__':
208
    paddle.enable_static()
W
wanghaoshuang 已提交
209
    unittest.main()