test_clip_op.py 14.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

W
wanghaoshuang 已提交
15
import unittest
16

W
wanghaoshuang 已提交
17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16
19

Y
Yang Zhang 已提交
20
import paddle
21
from paddle import fluid
22
from paddle.fluid import Program, core, program_guard
W
wanghaoshuang 已提交
23 24


25
class TestClipOp(OpTest):
W
wanghaoshuang 已提交
26
    def setUp(self):
27
        self.max_relative_error = 0.006
C
chentianyu03 已提交
28
        self.python_api = paddle.clip
29 30

        self.inputs = {}
31
        self.initTestCase()
32

33
        self.op_type = "clip"
W
wanghaoshuang 已提交
34
        self.attrs = {}
35 36
        self.attrs['min'] = self.min
        self.attrs['max'] = self.max
37 38 39 40 41 42 43 44 45 46
        if 'Min' in self.inputs:
            min_v = self.inputs['Min']
        else:
            min_v = self.attrs['min']

        if 'Max' in self.inputs:
            max_v = self.inputs['Max']
        else:
            max_v = self.attrs['max']

Z
zhangbo9674 已提交
47
        input = np.random.random(self.shape).astype(self.dtype)
48 49 50 51
        input[np.abs(input - min_v) < self.max_relative_error] = 0.5
        input[np.abs(input - max_v) < self.max_relative_error] = 0.5
        self.inputs['X'] = input
        self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
52 53 54
        self.check_cinn = ('Min' not in self.inputs) and (
            'Max' not in self.inputs
        )
W
wanghaoshuang 已提交
55

56
    def test_check_output(self):
57
        paddle.enable_static()
58
        self.check_output(check_cinn=self.check_cinn)
59
        paddle.disable_static()
W
wanghaoshuang 已提交
60

61
    def test_check_grad_normal(self):
62
        paddle.enable_static()
W
wanghuancoder 已提交
63
        self.check_grad(['X'], 'Out')
64
        paddle.disable_static()
65 66

    def initTestCase(self):
Z
zhangbo9674 已提交
67
        self.dtype = np.float32
68 69 70
        self.shape = (4, 10, 10)
        self.max = 0.8
        self.min = 0.3
Z
zhangbo9674 已提交
71 72
        self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
        self.inputs['Min'] = np.array([0.1]).astype(self.dtype)
73 74 75 76


class TestCase1(TestClipOp):
    def initTestCase(self):
Z
zhangbo9674 已提交
77
        self.dtype = np.float32
78 79
        self.shape = (8, 16, 8)
        self.max = 0.7
Y
Yang Yang(Tony) 已提交
80
        self.min = 0.0
81 82 83 84


class TestCase2(TestClipOp):
    def initTestCase(self):
Z
zhangbo9674 已提交
85
        self.dtype = np.float32
86
        self.shape = (8, 16)
Y
Yang Yang(Tony) 已提交
87 88
        self.max = 1.0
        self.min = 0.0
89

W
wanghaoshuang 已提交
90

91 92
class TestCase3(TestClipOp):
    def initTestCase(self):
Z
zhangbo9674 已提交
93
        self.dtype = np.float32
94 95 96
        self.shape = (4, 8, 16)
        self.max = 0.7
        self.min = 0.2
W
wanghaoshuang 已提交
97 98


99 100
class TestCase4(TestClipOp):
    def initTestCase(self):
Z
zhangbo9674 已提交
101
        self.dtype = np.float32
102 103 104
        self.shape = (4, 8, 8)
        self.max = 0.7
        self.min = 0.2
Z
zhangbo9674 已提交
105 106
        self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
        self.inputs['Min'] = np.array([0.3]).astype(self.dtype)
107 108


Y
Yang Zhang 已提交
109 110
class TestCase5(TestClipOp):
    def initTestCase(self):
Z
zhangbo9674 已提交
111
        self.dtype = np.float32
Y
Yang Zhang 已提交
112 113 114 115 116
        self.shape = (4, 8, 16)
        self.max = 0.5
        self.min = 0.5


117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
class TestFP16Case1(TestClipOp):
    def initTestCase(self):
        self.dtype = np.float16
        self.shape = (8, 16, 8)
        self.max = 0.7
        self.min = 0.0


class TestFP16Case2(TestClipOp):
    def initTestCase(self):
        self.dtype = np.float16
        self.shape = (8, 16)
        self.max = 1.0
        self.min = 0.0


class TestFP16Case3(TestClipOp):
    def initTestCase(self):
        self.dtype = np.float16
        self.shape = (4, 8, 16)
        self.max = 0.7
        self.min = 0.2


class TestFP16Case4(TestClipOp):
Z
zhangbo9674 已提交
142
    def initTestCase(self):
143
        self.dtype = np.float16
Z
zhangbo9674 已提交
144 145 146 147 148 149 150
        self.shape = (4, 8, 8)
        self.max = 0.7
        self.min = 0.2
        self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
        self.inputs['Min'] = np.array([0.3]).astype(self.dtype)


151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
class TestFP16Case5(TestClipOp):
    def initTestCase(self):
        self.dtype = np.float16
        self.shape = (4, 8, 16)
        self.max = 0.5
        self.min = 0.5


@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA and not support the bfloat16",
)
class TestClipBF16Op(OpTest):
    def setUp(self):
        self.max_relative_error = 0.006
        self.python_api = paddle.clip

        self.inputs = {}
        self.initTestCase()

        self.op_type = "clip"
        self.attrs = {}
        self.attrs['min'] = self.min
        self.attrs['max'] = self.max
        if 'Min' in self.inputs:
            min_v = self.inputs['Min']
        else:
            min_v = self.attrs['min']

        if 'Max' in self.inputs:
            max_v = self.inputs['Max']
        else:
            max_v = self.attrs['max']

        input = np.random.random(self.shape).astype(np.float32)
        input[np.abs(input - min_v) < self.max_relative_error] = 0.5
        input[np.abs(input - max_v) < self.max_relative_error] = 0.5
        self.inputs['X'] = convert_float_to_uint16(input)
        out = np.clip(input, min_v, max_v)
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def test_check_output(self):
        if paddle.is_compiled_with_cuda():
            place = paddle.CUDAPlace(0)
            paddle.enable_static()
            self.check_output_with_place(place)
            paddle.disable_static()

    def test_check_grad_normal(self):
        if paddle.is_compiled_with_cuda():
            place = paddle.CUDAPlace(0)
            paddle.enable_static()
            self.check_grad_with_place(place, ['X'], 'Out')
            paddle.disable_static()

    def initTestCase(self):
        self.shape = (4, 10, 10)
        self.max = 0.8
        self.min = 0.3
        self.inputs['Max'] = np.array([0.8]).astype(np.float32)
        self.inputs['Min'] = np.array([0.1]).astype(np.float32)


class TestBF16Case1(TestClipBF16Op):
    def initTestCase(self):
        self.shape = (8, 16, 8)
        self.max = 0.7
        self.min = 0.0


class TestBF16Case2(TestClipBF16Op):
    def initTestCase(self):
        self.shape = (8, 16)
        self.max = 1.0
        self.min = 0.0


class TestBF16Case3(TestClipBF16Op):
    def initTestCase(self):
        self.shape = (4, 8, 16)
        self.max = 0.7
        self.min = 0.2


class TestBF16Case4(TestClipBF16Op):
    def initTestCase(self):
        self.shape = (4, 8, 8)
        self.max = 0.7
        self.min = 0.2
        self.inputs['Max'] = np.array([0.8]).astype(np.float32)
        self.inputs['Min'] = np.array([0.3]).astype(np.float32)


class TestBF16Case5(TestClipBF16Op):
    def initTestCase(self):
        self.shape = (4, 8, 16)
        self.max = 0.5
        self.min = 0.5


252 253
class TestClipOpError(unittest.TestCase):
    def test_errors(self):
254
        paddle.enable_static()
255 256 257 258
        with program_guard(Program(), Program()):
            input_data = np.random.random((2, 4)).astype("float32")

            def test_Variable():
259
                paddle.clip(x=input_data, min=-1.0, max=1.0)
260 261

            self.assertRaises(TypeError, test_Variable)
262
        paddle.disable_static()
263 264


Y
Yang Zhang 已提交
265
class TestClipAPI(unittest.TestCase):
266 267 268
    def _executed_api(self, x, min=None, max=None):
        return paddle.clip(x, min, max)

Y
Yang Zhang 已提交
269
    def test_clip(self):
Y
Yang Zhang 已提交
270
        paddle.enable_static()
Y
Yang Zhang 已提交
271 272
        data_shape = [1, 9, 9, 4]
        data = np.random.random(data_shape).astype('float32')
273 274 275 276 277
        images = paddle.static.data(
            name='image', shape=data_shape, dtype='float32'
        )
        min = paddle.static.data(name='min', shape=[1], dtype='float32')
        max = paddle.static.data(name='max', shape=[1], dtype='float32')
Y
Yang Zhang 已提交
278

279 280 281 282 283
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
Y
Yang Zhang 已提交
284 285
        exe = fluid.Executor(place)

286 287 288 289 290 291
        out_1 = self._executed_api(images, min=min, max=max)
        out_2 = self._executed_api(images, min=0.2, max=0.9)
        out_3 = self._executed_api(images, min=0.3)
        out_4 = self._executed_api(images, max=0.7)
        out_5 = self._executed_api(images, min=min)
        out_6 = self._executed_api(images, max=max)
292
        out_7 = self._executed_api(images, max=-1.0)
293
        out_8 = self._executed_api(images)
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
        out_9 = self._executed_api(
            paddle.cast(images, 'float64'), min=0.2, max=0.9
        )
        out_10 = self._executed_api(
            paddle.cast(images * 10, 'int32'), min=2, max=8
        )
        out_11 = self._executed_api(
            paddle.cast(images * 10, 'int64'), min=2, max=8
        )

        (
            res1,
            res2,
            res3,
            res4,
            res5,
            res6,
            res7,
            res8,
            res9,
            res10,
            res11,
        ) = exe.run(
Y
Yang Zhang 已提交
317 318 319 320
            fluid.default_main_program(),
            feed={
                "image": data,
                "min": np.array([0.2]).astype('float32'),
321
                "max": np.array([0.8]).astype('float32'),
Y
Yang Zhang 已提交
322
            },
Y
Yang Zhang 已提交
323
            fetch_list=[
324 325 326 327 328 329 330 331 332 333 334 335 336
                out_1,
                out_2,
                out_3,
                out_4,
                out_5,
                out_6,
                out_7,
                out_8,
                out_9,
                out_10,
                out_11,
            ],
        )
Y
Yang Zhang 已提交
337

338 339 340 341 342 343 344 345
        np.testing.assert_allclose(res1, data.clip(0.2, 0.8), rtol=1e-05)
        np.testing.assert_allclose(res2, data.clip(0.2, 0.9), rtol=1e-05)
        np.testing.assert_allclose(res3, data.clip(min=0.3), rtol=1e-05)
        np.testing.assert_allclose(res4, data.clip(max=0.7), rtol=1e-05)
        np.testing.assert_allclose(res5, data.clip(min=0.2), rtol=1e-05)
        np.testing.assert_allclose(res6, data.clip(max=0.8), rtol=1e-05)
        np.testing.assert_allclose(res7, data.clip(max=-1), rtol=1e-05)
        np.testing.assert_allclose(res8, data, rtol=1e-05)
346 347 348 349 350 351 352 353 354
        np.testing.assert_allclose(
            res9, data.astype(np.float64).clip(0.2, 0.9), rtol=1e-05
        )
        np.testing.assert_allclose(
            res10, (data * 10).astype(np.int32).clip(2, 8), rtol=1e-05
        )
        np.testing.assert_allclose(
            res11, (data * 10).astype(np.int64).clip(2, 8), rtol=1e-05
        )
355
        paddle.disable_static()
Y
Yang Zhang 已提交
356

357
    def test_clip_dygraph(self):
358
        paddle.disable_static()
359 360 361 362 363
        place = (
            fluid.CUDAPlace(0)
            if fluid.core.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
Y
Yang Zhang 已提交
364 365 366
        paddle.disable_static(place)
        data_shape = [1, 9, 9, 4]
        data = np.random.random(data_shape).astype('float32')
Z
Zhou Wei 已提交
367 368 369
        images = paddle.to_tensor(data, dtype='float32')
        v_min = paddle.to_tensor(np.array([0.2], dtype=np.float32))
        v_max = paddle.to_tensor(np.array([0.8], dtype=np.float32))
Y
Yang Zhang 已提交
370

371 372 373 374 375
        out_1 = self._executed_api(images, min=0.2, max=0.8)
        images = paddle.to_tensor(data, dtype='float32')
        out_2 = self._executed_api(images, min=0.2, max=0.9)
        images = paddle.to_tensor(data, dtype='float32')
        out_3 = self._executed_api(images, min=v_min, max=v_max)
Y
Yang Zhang 已提交
376

377 378 379 380 381 382
        out_4 = self._executed_api(
            paddle.cast(images * 10, 'int32'), min=2, max=8
        )
        out_5 = self._executed_api(
            paddle.cast(images * 10, 'int64'), min=2, max=8
        )
383 384
        # test with numpy.generic
        out_6 = self._executed_api(images, min=np.abs(0.2), max=np.abs(0.8))
385

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
        np.testing.assert_allclose(
            out_1.numpy(), data.clip(0.2, 0.8), rtol=1e-05
        )
        np.testing.assert_allclose(
            out_2.numpy(), data.clip(0.2, 0.9), rtol=1e-05
        )
        np.testing.assert_allclose(
            out_3.numpy(), data.clip(0.2, 0.8), rtol=1e-05
        )
        np.testing.assert_allclose(
            out_4.numpy(), (data * 10).astype(np.int32).clip(2, 8), rtol=1e-05
        )
        np.testing.assert_allclose(
            out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8), rtol=1e-05
        )
        np.testing.assert_allclose(
            out_6.numpy(), data.clip(0.2, 0.8), rtol=1e-05
        )
Y
Yang Zhang 已提交
404

405 406
    def test_clip_dygraph_default_max(self):
        paddle.disable_static()
407 408 409 410 411 412
        x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
        x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
        x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
        egr_out1 = paddle.clip(x_int32, min=1)
        egr_out2 = paddle.clip(x_int64, min=1)
        egr_out3 = paddle.clip(x_f32, min=1)
413 414 415 416 417 418
        x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
        x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
        x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
        out1 = paddle.clip(x_int32, min=1)
        out2 = paddle.clip(x_int64, min=1)
        out3 = paddle.clip(x_f32, min=1)
419 420 421
        np.testing.assert_allclose(out1.numpy(), egr_out1.numpy(), rtol=1e-05)
        np.testing.assert_allclose(out2.numpy(), egr_out2.numpy(), rtol=1e-05)
        np.testing.assert_allclose(out3.numpy(), egr_out3.numpy(), rtol=1e-05)
C
chentianyu03 已提交
422

Y
Yang Zhang 已提交
423 424
    def test_errors(self):
        paddle.enable_static()
425 426
        x1 = paddle.static.data(name='x1', shape=[1], dtype="int16")
        x2 = paddle.static.data(name='x2', shape=[1], dtype="int8")
Y
Yang Zhang 已提交
427 428
        self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8)
        self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8)
429
        paddle.disable_static()
Y
Yang Zhang 已提交
430 431


432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
class TestClipOpFp16(unittest.TestCase):
    def test_fp16(self):
        paddle.enable_static()
        data_shape = [1, 9, 9, 4]
        data = np.random.random(data_shape).astype('float16')

        with paddle.static.program_guard(paddle.static.Program()):
            images = paddle.static.data(
                name='image1', shape=data_shape, dtype='float16'
            )
            min = paddle.static.data(name='min1', shape=[1], dtype='float16')
            max = paddle.static.data(name='max1', shape=[1], dtype='float16')
            out = paddle.clip(images, min, max)
            if fluid.core.is_compiled_with_cuda():
                place = paddle.CUDAPlace(0)
                exe = paddle.static.Executor(place)
                res1 = exe.run(
                    feed={
                        "image1": data,
                        "min1": np.array([0.2]).astype('float16'),
                        "max1": np.array([0.8]).astype('float16'),
                    },
                    fetch_list=[out],
                )
        paddle.disable_static()


459 460 461 462 463
class TestInplaceClipAPI(TestClipAPI):
    def _executed_api(self, x, min=None, max=None):
        return x.clip_(min, max)


W
wanghaoshuang 已提交
464 465
if __name__ == '__main__':
    unittest.main()