test_clip_op.py 10.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

W
wanghaoshuang 已提交
15 16
import unittest
import numpy as np
Y
Yang Zhang 已提交
17
import paddle
18 19
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
20
from op_test import OpTest
C
chentianyu03 已提交
21
from paddle.fluid.framework import _test_eager_guard
W
wanghaoshuang 已提交
22 23


24
class TestClipOp(OpTest):
25

W
wanghaoshuang 已提交
26
    def setUp(self):
27
        self.max_relative_error = 0.006
C
chentianyu03 已提交
28
        self.python_api = paddle.clip
29 30

        self.inputs = {}
31
        self.initTestCase()
32

33
        self.op_type = "clip"
W
wanghaoshuang 已提交
34
        self.attrs = {}
35 36
        self.attrs['min'] = self.min
        self.attrs['max'] = self.max
37 38 39 40 41 42 43 44 45 46
        if 'Min' in self.inputs:
            min_v = self.inputs['Min']
        else:
            min_v = self.attrs['min']

        if 'Max' in self.inputs:
            max_v = self.inputs['Max']
        else:
            max_v = self.attrs['max']

Z
zhangbo9674 已提交
47
        input = np.random.random(self.shape).astype(self.dtype)
48 49 50 51
        input[np.abs(input - min_v) < self.max_relative_error] = 0.5
        input[np.abs(input - max_v) < self.max_relative_error] = 0.5
        self.inputs['X'] = input
        self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
W
wanghaoshuang 已提交
52

53
    def test_check_output(self):
54
        paddle.enable_static()
C
chentianyu03 已提交
55
        self.check_output(check_eager=True)
56
        paddle.disable_static()
W
wanghaoshuang 已提交
57

58
    def test_check_grad_normal(self):
59
        paddle.enable_static()
C
chentianyu03 已提交
60
        self.check_grad(['X'], 'Out', check_eager=True)
61
        paddle.disable_static()
62 63

    def initTestCase(self):
Z
zhangbo9674 已提交
64
        self.dtype = np.float32
65 66 67
        self.shape = (4, 10, 10)
        self.max = 0.8
        self.min = 0.3
Z
zhangbo9674 已提交
68 69
        self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
        self.inputs['Min'] = np.array([0.1]).astype(self.dtype)
70 71 72


class TestCase1(TestClipOp):
73

74
    def initTestCase(self):
Z
zhangbo9674 已提交
75
        self.dtype = np.float32
76 77
        self.shape = (8, 16, 8)
        self.max = 0.7
Y
Yang Yang(Tony) 已提交
78
        self.min = 0.0
79 80 81


class TestCase2(TestClipOp):
82

83
    def initTestCase(self):
Z
zhangbo9674 已提交
84
        self.dtype = np.float32
85
        self.shape = (8, 16)
Y
Yang Yang(Tony) 已提交
86 87
        self.max = 1.0
        self.min = 0.0
88

W
wanghaoshuang 已提交
89

90
class TestCase3(TestClipOp):
91

92
    def initTestCase(self):
Z
zhangbo9674 已提交
93
        self.dtype = np.float32
94 95 96
        self.shape = (4, 8, 16)
        self.max = 0.7
        self.min = 0.2
W
wanghaoshuang 已提交
97 98


99
class TestCase4(TestClipOp):
100

101
    def initTestCase(self):
Z
zhangbo9674 已提交
102
        self.dtype = np.float32
103 104 105
        self.shape = (4, 8, 8)
        self.max = 0.7
        self.min = 0.2
Z
zhangbo9674 已提交
106 107
        self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
        self.inputs['Min'] = np.array([0.3]).astype(self.dtype)
108 109


Y
Yang Zhang 已提交
110
class TestCase5(TestClipOp):
111

Y
Yang Zhang 已提交
112
    def initTestCase(self):
Z
zhangbo9674 已提交
113
        self.dtype = np.float32
Y
Yang Zhang 已提交
114 115 116 117 118
        self.shape = (4, 8, 16)
        self.max = 0.5
        self.min = 0.5


Z
zhangbo9674 已提交
119
class TestCase6(TestClipOp):
120

Z
zhangbo9674 已提交
121 122 123 124 125 126 127 128 129
    def initTestCase(self):
        self.dtype == np.float16
        self.shape = (4, 8, 8)
        self.max = 0.7
        self.min = 0.2
        self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
        self.inputs['Min'] = np.array([0.3]).astype(self.dtype)


130
class TestClipOpError(unittest.TestCase):
131

132
    def test_errors(self):
133
        paddle.enable_static()
134 135 136 137 138 139 140 141 142 143 144 145 146
        with program_guard(Program(), Program()):
            input_data = np.random.random((2, 4)).astype("float32")

            def test_Variable():
                fluid.layers.clip(x=input_data, min=-1.0, max=1.0)

            self.assertRaises(TypeError, test_Variable)

            def test_dtype():
                x2 = fluid.layers.data(name='x2', shape=[1], dtype='int32')
                fluid.layers.clip(x=x2, min=-1.0, max=1.0)

            self.assertRaises(TypeError, test_dtype)
147
        paddle.disable_static()
148 149


Y
Yang Zhang 已提交
150
class TestClipAPI(unittest.TestCase):
151

152 153 154
    def _executed_api(self, x, min=None, max=None):
        return paddle.clip(x, min, max)

Y
Yang Zhang 已提交
155
    def test_clip(self):
Y
Yang Zhang 已提交
156
        paddle.enable_static()
Y
Yang Zhang 已提交
157 158 159 160 161 162
        data_shape = [1, 9, 9, 4]
        data = np.random.random(data_shape).astype('float32')
        images = fluid.data(name='image', shape=data_shape, dtype='float32')
        min = fluid.data(name='min', shape=[1], dtype='float32')
        max = fluid.data(name='max', shape=[1], dtype='float32')

163 164
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
Y
Yang Zhang 已提交
165 166
        exe = fluid.Executor(place)

167 168 169 170 171 172 173 174
        out_1 = self._executed_api(images, min=min, max=max)
        out_2 = self._executed_api(images, min=0.2, max=0.9)
        out_3 = self._executed_api(images, min=0.3)
        out_4 = self._executed_api(images, max=0.7)
        out_5 = self._executed_api(images, min=min)
        out_6 = self._executed_api(images, max=max)
        out_7 = self._executed_api(images, max=-1.)
        out_8 = self._executed_api(images)
175 176 177 178 179 180 181 182 183
        out_9 = self._executed_api(paddle.cast(images, 'float64'),
                                   min=0.2,
                                   max=0.9)
        out_10 = self._executed_api(paddle.cast(images * 10, 'int32'),
                                    min=2,
                                    max=8)
        out_11 = self._executed_api(paddle.cast(images * 10, 'int64'),
                                    min=2,
                                    max=8)
184 185

        res1, res2, res3, res4, res5, res6, res7, res8, res9, res10, res11 = exe.run(
Y
Yang Zhang 已提交
186 187 188 189 190 191
            fluid.default_main_program(),
            feed={
                "image": data,
                "min": np.array([0.2]).astype('float32'),
                "max": np.array([0.8]).astype('float32')
            },
Y
Yang Zhang 已提交
192
            fetch_list=[
193 194
                out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8, out_9,
                out_10, out_11
Y
Yang Zhang 已提交
195
            ])
Y
Yang Zhang 已提交
196

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
        np.testing.assert_allclose(res1, data.clip(0.2, 0.8), rtol=1e-05)
        np.testing.assert_allclose(res2, data.clip(0.2, 0.9), rtol=1e-05)
        np.testing.assert_allclose(res3, data.clip(min=0.3), rtol=1e-05)
        np.testing.assert_allclose(res4, data.clip(max=0.7), rtol=1e-05)
        np.testing.assert_allclose(res5, data.clip(min=0.2), rtol=1e-05)
        np.testing.assert_allclose(res6, data.clip(max=0.8), rtol=1e-05)
        np.testing.assert_allclose(res7, data.clip(max=-1), rtol=1e-05)
        np.testing.assert_allclose(res8, data, rtol=1e-05)
        np.testing.assert_allclose(res9,
                                   data.astype(np.float64).clip(0.2, 0.9),
                                   rtol=1e-05)
        np.testing.assert_allclose(res10,
                                   (data * 10).astype(np.int32).clip(2, 8),
                                   rtol=1e-05)
        np.testing.assert_allclose(res11,
                                   (data * 10).astype(np.int64).clip(2, 8),
                                   rtol=1e-05)
214
        paddle.disable_static()
Y
Yang Zhang 已提交
215

216
    def func_clip_dygraph(self):
217
        paddle.disable_static()
218 219
        place = fluid.CUDAPlace(
            0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
Y
Yang Zhang 已提交
220 221 222
        paddle.disable_static(place)
        data_shape = [1, 9, 9, 4]
        data = np.random.random(data_shape).astype('float32')
Z
Zhou Wei 已提交
223 224 225
        images = paddle.to_tensor(data, dtype='float32')
        v_min = paddle.to_tensor(np.array([0.2], dtype=np.float32))
        v_max = paddle.to_tensor(np.array([0.8], dtype=np.float32))
Y
Yang Zhang 已提交
226

227 228 229 230 231
        out_1 = self._executed_api(images, min=0.2, max=0.8)
        images = paddle.to_tensor(data, dtype='float32')
        out_2 = self._executed_api(images, min=0.2, max=0.9)
        images = paddle.to_tensor(data, dtype='float32')
        out_3 = self._executed_api(images, min=v_min, max=v_max)
Y
Yang Zhang 已提交
232

233 234 235 236 237 238
        out_4 = self._executed_api(paddle.cast(images * 10, 'int32'),
                                   min=2,
                                   max=8)
        out_5 = self._executed_api(paddle.cast(images * 10, 'int64'),
                                   min=2,
                                   max=8)
239 240
        # test with numpy.generic
        out_6 = self._executed_api(images, min=np.abs(0.2), max=np.abs(0.8))
241

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
        np.testing.assert_allclose(out_1.numpy(),
                                   data.clip(0.2, 0.8),
                                   rtol=1e-05)
        np.testing.assert_allclose(out_2.numpy(),
                                   data.clip(0.2, 0.9),
                                   rtol=1e-05)
        np.testing.assert_allclose(out_3.numpy(),
                                   data.clip(0.2, 0.8),
                                   rtol=1e-05)
        np.testing.assert_allclose(out_4.numpy(),
                                   (data * 10).astype(np.int32).clip(2, 8),
                                   rtol=1e-05)
        np.testing.assert_allclose(out_5.numpy(),
                                   (data * 10).astype(np.int64).clip(2, 8),
                                   rtol=1e-05)
        np.testing.assert_allclose(out_6.numpy(),
                                   data.clip(0.2, 0.8),
                                   rtol=1e-05)
Y
Yang Zhang 已提交
260

261 262 263 264 265 266 267
    def test_clip_dygraph(self):
        with _test_eager_guard():
            self.func_clip_dygraph()
        self.func_clip_dygraph()

    def test_clip_dygraph_default_max(self):
        paddle.disable_static()
C
chentianyu03 已提交
268
        with _test_eager_guard():
269 270 271 272 273 274 275 276 277 278 279 280
            x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
            x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
            x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
            egr_out1 = paddle.clip(x_int32, min=1)
            egr_out2 = paddle.clip(x_int64, min=1)
            egr_out3 = paddle.clip(x_f32, min=1)
        x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
        x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
        x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
        out1 = paddle.clip(x_int32, min=1)
        out2 = paddle.clip(x_int64, min=1)
        out3 = paddle.clip(x_f32, min=1)
281 282 283
        np.testing.assert_allclose(out1.numpy(), egr_out1.numpy(), rtol=1e-05)
        np.testing.assert_allclose(out2.numpy(), egr_out2.numpy(), rtol=1e-05)
        np.testing.assert_allclose(out3.numpy(), egr_out3.numpy(), rtol=1e-05)
C
chentianyu03 已提交
284

Y
Yang Zhang 已提交
285 286 287 288 289 290
    def test_errors(self):
        paddle.enable_static()
        x1 = fluid.data(name='x1', shape=[1], dtype="int16")
        x2 = fluid.data(name='x2', shape=[1], dtype="int8")
        self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8)
        self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8)
291
        paddle.disable_static()
Y
Yang Zhang 已提交
292 293


294
class TestInplaceClipAPI(TestClipAPI):
295

296 297 298 299
    def _executed_api(self, x, min=None, max=None):
        return x.clip_(min, max)


W
wanghaoshuang 已提交
300 301
if __name__ == '__main__':
    unittest.main()