test_activation_op.py 115.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
C
Clementine 已提交
18
from scipy.special import expit, erf
J
joejiong 已提交
19

20
from op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
21
import paddle
22
import paddle.nn as nn
23
import paddle.nn.functional as F
J
joejiong 已提交
24 25
import paddle.fluid as fluid
import paddle.fluid.core as core
26
from paddle.fluid import compiler, Program, program_guard
27
from paddle.fluid.framework import _test_eager_guard
Q
qijun 已提交
28

29 30
paddle.enable_static()

Q
qijun 已提交
31

32
class TestSqrtOpError(unittest.TestCase):
33

Z
Zhaolong Xing 已提交
34 35 36 37 38 39
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
40 41 42
            in2 = fluid.layers.data(name='input2',
                                    shape=[12, 10],
                                    dtype="int32")
Z
Zhaolong Xing 已提交
43 44
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

45 46 47
            in3 = fluid.layers.data(name='input3',
                                    shape=[12, 10],
                                    dtype="float16")
Z
Zhaolong Xing 已提交
48 49 50
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
51
class TestActivation(OpTest):
52

Q
qijun 已提交
53 54
    def setUp(self):
        self.op_type = "exp"
55
        self.init_dtype()
56
        self.init_kernel_type()
C
chentianyu03 已提交
57 58
        self.check_eager = True
        self.python_api = paddle.exp
59

60
        np.random.seed(2049)
61 62 63 64 65
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
66 67

    def test_check_output(self):
68 69 70 71
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
72 73

    def test_check_grad(self):
74 75
        if self.dtype == np.float16:
            return
76 77 78 79
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
80

81
    def init_dtype(self):
82
        self.dtype = np.float64
83

84 85 86
    def init_kernel_type(self):
        pass

Q
qijun 已提交
87

R
ronnywang 已提交
88
class TestExpm1(TestActivation):
89

R
ronnywang 已提交
90 91
    def setUp(self):
        self.op_type = "expm1"
92
        self.python_api = paddle.expm1
R
ronnywang 已提交
93 94 95 96 97 98 99 100 101 102
        self.init_dtype()

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
103 104 105 106
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
107 108 109


class TestExpm1API(unittest.TestCase):
110

R
ronnywang 已提交
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
134
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
135 136 137 138 139

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
140

R
ronnywang 已提交
141 142 143 144
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
145
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


159
class TestParameter(object):
160

161 162
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
163
            np_x = np.array([0.1])
164
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
165
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
166 167
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
W
WuHaobo 已提交
168 169
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = eval("np.%s(np_x)" % self.op_type)
170
            np.testing.assert_allclose(result, expected, rtol=1e-05)
171 172 173 174 175 176 177

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
178
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
179 180


C
chengduo 已提交
181
class TestSigmoid(TestActivation):
182

Q
qijun 已提交
183 184
    def setUp(self):
        self.op_type = "sigmoid"
185 186
        self.init_dtype()

187
        np.random.seed(1024)
188 189 190 191 192
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
193

194 195 196
    def init_dtype(self):
        self.dtype = np.float32

197
    def test_check_grad(self):
198 199 200 201
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

202

203 204 205
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSigmoidBF16(OpTest):
206

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


M
minghaoBD 已提交
232
class TestSilu(TestActivation):
233

M
minghaoBD 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
        self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
272
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
273 274 275 276 277 278 279 280 281

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
282
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
283 284 285 286 287 288 289
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
290 291 292
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
M
minghaoBD 已提交
293 294
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
295 296 297
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
M
minghaoBD 已提交
298 299 300
            F.silu(x_fp16)


C
chengduo 已提交
301
class TestLogSigmoid(TestActivation):
302

303 304
    def setUp(self):
        self.op_type = "logsigmoid"
305 306
        self.init_dtype()

307
        np.random.seed(2048)
308 309 310
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

311
        self.inputs = {'X': x}
312
        self.outputs = {'Out': out}
313 314

    def test_check_grad(self):
315 316
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
317
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
318 319


320
class TestLogSigmoidAPI(unittest.TestCase):
321
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
322
    def setUp(self):
323
        np.random.seed(1024)
324
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
J
joejiong 已提交
325
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
326 327 328
            else paddle.CPUPlace()

    def test_static_api(self):
329
        paddle.enable_static()
330
        with paddle.static.program_guard(paddle.static.Program()):
331
            x = paddle.fluid.data('X', [11, 17])
332
            out1 = F.log_sigmoid(x)
333 334 335 336 337 338
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
339
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
340 341 342 343

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
344
        out1 = F.log_sigmoid(x)
345 346 347 348
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
349
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
350 351
        paddle.enable_static()

352
    def test_fluid_api(self):
353
        paddle.enable_static()
354
        with paddle.static.program_guard(paddle.static.Program()):
355
            x = paddle.fluid.data('X', [11, 17])
356 357 358 359
            out = paddle.fluid.layers.logsigmoid(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
360
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
361

362
    def test_errors(self):
363
        paddle.enable_static()
364 365
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
366
            self.assertRaises(TypeError, F.log_sigmoid, 1)
367
            # The input dtype must be float16, float32, float64.
368 369 370
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
371
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
372
            # support the input dtype is float16
373 374 375
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
376
            F.log_sigmoid(x_fp16)
377 378


379
class TestTanh(TestActivation, TestParameter):
380

381 382
    def setUp(self):
        self.op_type = "tanh"
383
        self.init_dtype()
384
        np.random.seed(1024)
385 386 387 388 389
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
390 391

    def test_check_grad(self):
392 393
        if self.dtype == np.float16:
            return
394
        self.check_grad(['X'], 'Out')
395

396 397 398 399 400 401
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

402

W
WangXi 已提交
403 404 405 406
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
407
        np.random.seed(1024)
W
WangXi 已提交
408
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
J
joejiong 已提交
409
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
W
WangXi 已提交
410
            else paddle.CPUPlace()
411 412 413 414
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
415 416

    def test_static_api(self):
417
        paddle.enable_static()
W
WangXi 已提交
418
        with paddle.static.program_guard(paddle.static.Program()):
419
            x = paddle.fluid.data('X', [10, 12], self.dtype)
420
            out1 = self.tanh(x)
W
WangXi 已提交
421 422 423 424 425 426
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
427
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
428 429 430

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
431
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
432 433 434 435 436 437
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
438
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
439 440 441
        paddle.enable_static()

    def test_fluid_api(self):
442
        paddle.enable_static()
W
WangXi 已提交
443 444 445 446 447 448
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12], self.dtype)
            out = fluid.layers.tanh(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tanh(self.x_np)
449
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
W
WangXi 已提交
450 451

    def test_errors(self):
452
        paddle.enable_static()
W
WangXi 已提交
453 454
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
455
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
456
            # The input dtype must be float16, float32.
457 458 459
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
460
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
461
            # support the input dtype is float16
462 463 464
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
465 466 467 468 469 470 471
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
472 473


474
class TestAtan(TestActivation, TestParameter):
475

476 477 478 479
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

480
        np.random.seed(1024)
481 482 483 484 485 486 487 488 489
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
490
        self.check_grad(['X'], 'Out')
491

W
WuHaobo 已提交
492 493 494 495 496 497 498 499 500 501 502
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

503 504 505 506 507 508 509 510
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

511

512
class TestSinh(TestActivation):
513

514 515 516 517
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()

518
        np.random.seed(1024)
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.sinh(x).numpy()
            z_expected = np.sinh(np_x)
536
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
537 538 539 540 541 542

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
543 544 545 546
            data_x = fluid.layers.data(name="data_x",
                                       shape=test_data_shape,
                                       append_batch_size=False,
                                       dtype="float32")
547 548 549 550

            pd_sinh_out = fluid.layers.sinh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
551 552 553
            np_sinh_res, = exe.run(fluid.default_main_program(),
                                   feed={"data_x": input_x},
                                   fetch_list=[pd_sinh_out])
554 555

        expected_res = np.sinh(input_x)
556
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.sinh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
572

573 574 575 576 577 578 579 580 581 582 583 584 585
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sinh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.sinh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.sinh(x_fp16)


class TestCosh(TestActivation):
586

587 588 589 590
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()

591
        np.random.seed(1024)
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.cosh(x).numpy()
            z_expected = np.cosh(np_x)
609
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
610 611 612 613 614 615

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
616 617 618 619
            data_x = fluid.layers.data(name="data_x",
                                       shape=test_data_shape,
                                       append_batch_size=False,
                                       dtype="float32")
620 621 622 623

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
624 625 626
            np_cosh_res, = exe.run(fluid.default_main_program(),
                                   feed={"data_x": input_x},
                                   fetch_list=[pd_cosh_out])
627 628

        expected_res = np.cosh(input_x)
629
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.cosh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
645

646 647 648 649 650 651 652 653 654 655 656 657
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.cosh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.cosh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.cosh(x_fp16)


658 659 660 661 662 663
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
664

K
Kavya Srinet 已提交
665 666
    def setUp(self):
        self.op_type = "tanh_shrink"
667 668
        self.init_dtype()

669
        np.random.seed(1024)
670 671
        x = np.random.uniform(10, 20, [10, 17]).astype(self.dtype)
        out = ref_tanhshrink(x)
672

673
        self.inputs = {'X': x}
674
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
675 676

    def test_check_grad(self):
677 678
        if self.dtype == np.float16:
            return
679
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
680

681

682 683 684
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
685
        np.random.seed(1024)
686
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
J
joejiong 已提交
687
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
688 689 690
            else paddle.CPUPlace()

    def test_static_api(self):
691
        paddle.enable_static()
692
        with paddle.static.program_guard(paddle.static.Program()):
693
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
694 695 696 697 698 699 700
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
701
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
702 703 704 705 706 707 708 709 710

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
711
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
712 713 714
        paddle.enable_static()

    def test_fluid_api(self):
715
        paddle.enable_static()
716 717 718 719 720 721
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.tanh_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_tanhshrink(self.x_np)
722
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
723 724

    def test_errors(self):
725
        paddle.enable_static()
726 727 728 729
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
730 731 732
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
733 734
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
735 736 737
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
738 739 740
            F.tanhshrink(x_fp16)


741 742 743 744 745 746
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
747
class TestHardShrink(TestActivation):
748

749 750
    def setUp(self):
        self.op_type = "hard_shrink"
751 752
        self.init_dtype()

753 754
        self.threshold = 0.5
        self.set_attrs()
755
        np.random.seed(1024)
Z
zhupengyang 已提交
756
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
757
        out = ref_hardshrink(x, self.threshold)
758

759
        self.attrs = {'threshold': self.threshold}
760
        self.inputs = {'X': x}
761
        self.outputs = {'Out': out}
762

763 764 765
    def set_attrs(self):
        pass

766
    def test_check_grad(self):
767 768
        if self.dtype == np.float16:
            return
769
        self.check_grad(['X'], 'Out')
770 771


772
class TestHardShrink_threshold_negative(TestHardShrink):
773

774 775 776 777
    def set_attrs(self):
        self.threshold = -0.1


778 779 780
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
781
        np.random.seed(1024)
782
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
783
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
784 785 786
            else paddle.CPUPlace()

    def test_static_api(self):
787
        paddle.enable_static()
788
        with paddle.static.program_guard(paddle.static.Program()):
789
            x = paddle.fluid.data('X', [10, 12])
790 791 792 793 794 795 796
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
797
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
798 799 800

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
801
        x = paddle.to_tensor(self.x_np)
802 803 804 805 806
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
807
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
808 809 810 811 812 813

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
814
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
815 816 817
        paddle.enable_static()

    def test_fluid_api(self):
818
        paddle.enable_static()
819 820 821 822 823 824
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.hard_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardshrink(self.x_np, 0.5)
825
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
826

827
    def test_errors(self):
828
        paddle.enable_static()
829
        with paddle.static.program_guard(paddle.static.Program()):
830
            # The input type must be Variable.
831
            self.assertRaises(TypeError, F.hardshrink, 1)
832
            # The input dtype must be float16, float32, float64.
833 834 835
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
836
            self.assertRaises(TypeError, F.hardshrink, x_int32)
837
            # support the input dtype is float16
838 839 840
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
841
            F.hardshrink(x_fp16)
842 843


844 845 846 847 848 849 850 851 852 853 854
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
855
        np.random.seed(1024)
856
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
J
joejiong 已提交
857
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
858 859 860
            else paddle.CPUPlace()

    def test_static_api(self):
861
        paddle.enable_static()
862
        with paddle.static.program_guard(paddle.static.Program()):
863
            x = paddle.fluid.data('X', [10, 12])
864 865 866 867 868 869 870
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
871
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
872 873 874

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
875
        x = paddle.to_tensor(self.x_np)
876 877 878 879 880
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
881
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
882 883 884 885 886 887

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
888
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
889 890 891
        paddle.enable_static()

    def test_errors(self):
892
        paddle.enable_static()
893 894 895 896
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
897 898 899
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
900 901
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
902 903 904
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
905 906 907
            F.hardtanh(x_fp16)


908 909 910 911 912 913 914 915
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
        out - threshold)
    return out


class TestSoftshrink(TestActivation):
916

917 918
    def setUp(self):
        self.op_type = "softshrink"
919 920
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
921 922
        self.init_dtype()

923
        threshold = 0.8
924

925
        np.random.seed(1023)
926 927 928 929
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
930
        self.outputs = {'Out': out}
931 932

    def test_check_grad(self):
933 934
        if self.dtype == np.float16:
            return
935
        self.check_grad(['X'], 'Out', check_eager=True)
936

937

938 939 940 941
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
942
        np.random.seed(1024)
943
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
J
joejiong 已提交
944
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
945 946 947
            else paddle.CPUPlace()

    def test_static_api(self):
948
        paddle.enable_static()
949
        with paddle.static.program_guard(paddle.static.Program()):
950
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
951 952 953 954 955 956 957
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
958
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
959 960 961 962 963 964 965 966 967

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
968
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
969 970 971
        paddle.enable_static()

    def test_fluid_api(self):
972
        paddle.enable_static()
973 974 975 976 977 978
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softshrink(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softshrink(self.x_np, self.threshold)
979
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
980

981
    def test_errors(self):
982
        paddle.enable_static()
983
        with paddle.static.program_guard(paddle.static.Program()):
984
            # The input type must be Variable.
985
            self.assertRaises(TypeError, F.softshrink, 1)
986
            # The input dtype must be float16, float32, float64.
987 988 989
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
990
            self.assertRaises(TypeError, F.softshrink, x_int32)
991
            # The threshold must be no less than zero
992 993 994
            x_fp32 = paddle.fluid.data(name='x_fp32',
                                       shape=[12, 10],
                                       dtype='float32')
995
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
996
            # support the input dtype is float16
997 998 999
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1000
            F.softshrink(x_fp16)
1001 1002


1003
class TestSqrt(TestActivation, TestParameter):
1004

1005 1006
    def setUp(self):
        self.op_type = "sqrt"
1007
        self.python_api = paddle.sqrt
1008 1009
        self.init_dtype()

1010
        np.random.seed(1023)
1011 1012 1013 1014 1015
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1016 1017

    def test_check_grad(self):
1018 1019
        if self.dtype == np.float16:
            return
1020 1021 1022 1023
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1024

1025

1026 1027 1028
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSqrtBF16(OpTest):
1029

1030 1031
    def setUp(self):
        self.op_type = "sqrt"
1032
        self.python_api = paddle.sqrt
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
        self.init_dtype()

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
1049
        self.check_output_with_place(place, check_eager=True)
1050 1051 1052

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1053
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1054 1055


Z
zhoukunsheng 已提交
1056
class TestRsqrt(TestActivation):
1057

Z
zhoukunsheng 已提交
1058 1059
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1060
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1061 1062
        self.init_dtype()

1063
        np.random.seed(1024)
Z
zhupengyang 已提交
1064
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1065 1066 1067 1068 1069 1070 1071 1072
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1073 1074 1075 1076
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.0005,
                        check_eager=True)
Z
zhoukunsheng 已提交
1077 1078


C
chengduo 已提交
1079
class TestAbs(TestActivation):
1080

1081 1082
    def setUp(self):
        self.op_type = "abs"
1083 1084
        self.init_dtype()

1085
        np.random.seed(1024)
1086
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
1087
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1088
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1089
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1090 1091
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1092 1093 1094 1095
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1096 1097

    def test_check_grad(self):
1098 1099
        if self.dtype == np.float16:
            return
1100
        self.check_grad(['X'], 'Out', check_eager=False)
1101

1102

C
chengduo 已提交
1103
class TestCeil(TestActivation):
1104

D
dzhwinter 已提交
1105 1106
    def setUp(self):
        self.op_type = "ceil"
1107 1108
        self.check_eager = True
        self.python_api = paddle.ceil
1109 1110
        self.init_dtype()

1111
        np.random.seed(1024)
Z
zhupengyang 已提交
1112
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1113 1114 1115 1116
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1117

D
dzhwinter 已提交
1118
    # The same reason with TestFloor
C
chengduo 已提交
1119
    def test_check_grad(self):
1120 1121 1122
        pass


C
chengduo 已提交
1123
class TestFloor(TestActivation):
1124

D
dzhwinter 已提交
1125 1126
    def setUp(self):
        self.op_type = "floor"
1127 1128
        self.check_eager = True
        self.python_api = paddle.floor
1129 1130
        self.init_dtype()

1131
        np.random.seed(1024)
Z
zhupengyang 已提交
1132
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1133 1134 1135 1136
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1137

D
dzhwinter 已提交
1138
    # the gradient on floor, ceil, round is undefined.
1139
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1140 1141
    # The same reason with TestFloor
    def test_check_grad(self):
1142 1143 1144
        pass


C
chengduo 已提交
1145
class TestCos(TestActivation):
1146

C
add cos  
chengduoZH 已提交
1147 1148
    def setUp(self):
        self.op_type = "cos"
1149 1150
        self.init_dtype()

1151
        np.random.seed(1024)
Z
zhupengyang 已提交
1152
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1153 1154 1155 1156
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1157 1158

    def test_check_grad(self):
1159 1160
        if self.dtype == np.float16:
            return
1161
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1162

1163

J
joejiong 已提交
1164
class TestTan(TestActivation):
1165

J
joejiong 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1190
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', [10, 12], self.dtype)
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1201
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1216
class TestAcos(TestActivation):
1217

1218 1219 1220 1221
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

1222
        np.random.seed(1024)
Z
zhupengyang 已提交
1223
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
1224 1225 1226 1227 1228 1229 1230 1231
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1232
        self.check_grad(['X'], 'Out')
1233 1234


1235
class TestSin(TestActivation, TestParameter):
1236

C
add sin  
chengduoZH 已提交
1237 1238
    def setUp(self):
        self.op_type = "sin"
1239 1240
        self.init_dtype()

1241
        np.random.seed(1024)
Z
zhupengyang 已提交
1242
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1243 1244 1245 1246
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1247 1248

    def test_check_grad(self):
1249 1250
        if self.dtype == np.float16:
            return
1251
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1252 1253


1254
class TestAsin(TestActivation):
1255

1256 1257 1258 1259
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

1260
        np.random.seed(2048)
Z
zhupengyang 已提交
1261
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
1262 1263 1264 1265 1266 1267 1268 1269
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1270
        self.check_grad(['X'], 'Out')
1271 1272


X
xiaoting 已提交
1273
class TestAcosh(TestActivation):
1274

X
xiaoting 已提交
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(2, 3, [10, 12]).astype(self.dtype)
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestAsinh(TestActivation):
1293

X
xiaoting 已提交
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(1, 2, [10, 12]).astype(self.dtype)
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestAtanh(TestActivation):
1312

X
xiaoting 已提交
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()

        np.random.seed(400)
        x = np.random.uniform(-0.9, 0.9, [10, 12]).astype(self.dtype)
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


C
chengduo 已提交
1330
class TestRound(TestActivation):
1331

D
dzhwinter 已提交
1332 1333
    def setUp(self):
        self.op_type = "round"
1334 1335
        self.check_eager = True
        self.python_api = paddle.round
1336 1337
        self.init_dtype()

1338
        np.random.seed(1024)
Z
zhupengyang 已提交
1339
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1340 1341 1342 1343
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1344

C
chengduo 已提交
1345
    def test_check_grad(self):
1346 1347 1348
        pass


C
chengduo 已提交
1349
class TestRelu(TestActivation):
1350

1351
    def setUp(self):
Q
qijun 已提交
1352
        self.op_type = "relu"
K
Kexin Zhao 已提交
1353 1354
        self.init_dtype()

1355
        np.random.seed(1024)
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
        if self.dtype == np.uint16:
            x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
            x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1368 1369

        self.outputs = {'Out': out}
1370 1371

    def test_check_grad(self):
K
Kexin Zhao 已提交
1372 1373
        if self.dtype == np.float16:
            return
1374
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1375 1376


1377 1378 1379
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1380
        np.random.seed(1024)
1381
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
1382
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1383
            else paddle.CPUPlace()
1384 1385 1386 1387
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1388 1389

    def test_static_api(self):
1390
        paddle.enable_static()
1391
        with paddle.static.program_guard(paddle.static.Program()):
1392
            x = paddle.fluid.data('X', [10, 12])
1393
            out1 = self.relu(x)
1394 1395 1396 1397 1398 1399
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1400
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1401 1402 1403 1404 1405

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1406 1407
        out1 = m(x)
        out2 = self.relu(x)
1408 1409
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1410
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1411 1412
        paddle.enable_static()

1413
    def test_errors(self):
1414
        paddle.enable_static()
1415
        with paddle.static.program_guard(paddle.static.Program()):
1416
            # The input type must be Variable.
1417
            self.assertRaises(TypeError, self.relu, 1)
1418
            # The input dtype must be float16, float32, float64.
1419 1420 1421
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
1422
            self.assertRaises(TypeError, self.relu, x_int32)
1423
            # support the input dtype is float16
1424 1425 1426
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
1427 1428 1429 1430 1431 1432 1433
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1434 1435


1436 1437 1438 1439 1440 1441
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1442
class TestLeakyRelu(TestActivation):
1443

1444 1445 1446
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1447 1448 1449
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1450
        alpha = self.get_alpha()
A
Adam 已提交
1451

1452
        np.random.seed(1024)
A
Adam 已提交
1453 1454
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
1455 1456
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1457

1458
        self.inputs = {'X': x}
A
Adam 已提交
1459
        self.outputs = {'Out': out}
1460
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1461 1462 1463 1464

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1465
        self.check_grad(['X'], 'Out')
1466 1467


1468
class TestLeakyReluAlpha1(TestLeakyRelu):
1469

1470 1471 1472 1473 1474
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
1475

1476 1477 1478 1479 1480
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
1481

1482 1483 1484 1485 1486 1487 1488 1489
    def get_alpha(self):
        return -2.0


class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    # fluid.layers.leaky_relu
    def setUp(self):
1490
        np.random.seed(1024)
1491
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
1492
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1493 1494 1495
            else paddle.CPUPlace()

    def test_static_api(self):
1496
        paddle.enable_static()
1497
        with paddle.static.program_guard(paddle.static.Program()):
1498
            x = paddle.fluid.data('X', [10, 12])
1499 1500 1501 1502 1503 1504 1505
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1506
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1507 1508 1509

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1510
        x = paddle.to_tensor(self.x_np)
1511 1512 1513 1514 1515
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1516
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1517 1518 1519 1520 1521 1522

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1523
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1524 1525 1526
        paddle.enable_static()

    def test_fluid_api(self):
1527
        paddle.enable_static()
1528 1529 1530 1531 1532 1533
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.leaky_relu(x, 0.01)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_leaky_relu(self.x_np)
1534
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1535

1536
    def test_errors(self):
1537
        paddle.enable_static()
1538
        with paddle.static.program_guard(paddle.static.Program()):
1539
            # The input type must be Variable.
1540
            self.assertRaises(TypeError, F.leaky_relu, 1)
1541
            # The input dtype must be float16, float32, float64.
1542 1543 1544
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1545 1546
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1547 1548 1549
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1550
            F.leaky_relu(x_fp16)
1551 1552


1553 1554
def gelu(x, approximate):
    if approximate:
1555 1556
        y_ref = 0.5 * x * (
            1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
1557 1558 1559 1560 1561 1562
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
1563

C
Clementine 已提交
1564 1565 1566
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1567
        approximate = True
1568
        np.random.seed(1024)
1569 1570
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
1571

1572
        self.inputs = {'X': x}
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
1583

1584 1585 1586 1587
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
1588
        np.random.seed(2048)
C
Clementine 已提交
1589
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1590
        out = gelu(x, approximate)
C
Clementine 已提交
1591

1592
        self.inputs = {'X': x}
C
Clementine 已提交
1593
        self.outputs = {'Out': out}
1594
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1595 1596 1597 1598

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1599
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1600 1601


1602 1603 1604
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1605
        np.random.seed(1024)
1606
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
J
joejiong 已提交
1607
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1608 1609 1610
            else paddle.CPUPlace()

    def test_static_api(self):
1611
        paddle.enable_static()
1612
        with paddle.static.program_guard(paddle.static.Program()):
1613
            x = paddle.fluid.data('X', [11, 17])
1614 1615 1616 1617 1618 1619 1620
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1621
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1622 1623 1624 1625 1626 1627 1628 1629 1630

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1631
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1632 1633 1634 1635 1636 1637

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1638
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1639 1640 1641
        paddle.enable_static()

    def test_errors(self):
1642
        paddle.enable_static()
1643 1644 1645 1646
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1647 1648 1649
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
1650 1651
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1652 1653 1654
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
1655 1656 1657
            F.gelu(x_fp16)


C
chengduo 已提交
1658
class TestBRelu(TestActivation):
1659

1660 1661
    def setUp(self):
        self.op_type = "brelu"
1662 1663
        self.init_dtype()

1664
        np.random.seed(1024)
Z
zhupengyang 已提交
1665
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1666 1667
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1668 1669
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1670
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1671 1672 1673
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1674 1675 1676

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1677
        self.outputs = {'Out': t}
1678 1679

    def test_check_grad(self):
1680 1681
        if self.dtype == np.float16:
            return
1682
        self.check_grad(['X'], 'Out')
1683

1684

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
class TestBreluAPI(unittest.TestCase):
    # test paddle.fluid.layers.brelu
    def setUp(self):
        np.random.seed(1024)
        self.t_min = 0.
        self.t_max = 24.
        self.x_np = np.random.uniform(-1, 30, [10, 12]).astype('float32')
        self.out_ref = np.copy(self.x_np)
        self.out_ref[self.out_ref < self.t_min] = self.t_min
        self.out_ref[self.out_ref > self.t_max] = self.t_max
        self.out_ref = self.out_ref.astype('float32')
J
joejiong 已提交
1696
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1697 1698 1699 1700 1701 1702 1703 1704
            else paddle.CPUPlace()

    def test_fluid_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', [10, 12])
            out = paddle.fluid.layers.brelu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
1705
            np.testing.assert_allclose(self.out_ref, res[0], rtol=1e-05)
1706 1707 1708 1709

            paddle.disable_static(self.place)
            x = paddle.to_tensor(self.x_np)
            out = paddle.fluid.layers.brelu(x)
1710
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
1711 1712
            paddle.enable_static()

1713 1714 1715 1716 1717 1718 1719 1720
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
1721 1722 1723
            x_fp16 = fluid.layers.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1724 1725 1726
            fluid.layers.brelu(x_fp16)


1727 1728 1729 1730 1731 1732 1733
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
1734
class TestRelu6(TestActivation):
1735

K
Kavya Srinet 已提交
1736
    def setUp(self):
1737
        self.op_type = "relu6"
1738
        self.init_dtype()
1739
        self.python_api = paddle.nn.functional.relu6
1740

1741
        np.random.seed(1024)
Z
zhupengyang 已提交
1742
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
1743
        x[np.abs(x) < 0.005] = 0.02
1744
        out = ref_relu6(x)
1745

1746 1747
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
1748
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
1749

1750 1751 1752
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1753
        self.check_grad(['X'], 'Out', check_eager=True)
1754 1755


1756 1757 1758
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
1759
        np.random.seed(1024)
1760 1761
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
J
joejiong 已提交
1762
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1763 1764 1765
            else paddle.CPUPlace()

    def test_static_api(self):
1766
        paddle.enable_static()
1767
        with paddle.static.program_guard(paddle.static.Program()):
1768
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1769 1770 1771 1772 1773 1774 1775
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
1776
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1777 1778 1779 1780 1781 1782 1783 1784 1785

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
1786
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1787 1788 1789
        paddle.enable_static()

    def test_fluid_api(self):
1790
        paddle.enable_static()
1791 1792 1793 1794 1795 1796
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.relu6(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
1797
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1798

1799
    def test_errors(self):
1800
        paddle.enable_static()
1801
        with paddle.static.program_guard(paddle.static.Program()):
1802
            # The input type must be Variable.
1803
            self.assertRaises(TypeError, F.relu6, 1)
1804
            # The input dtype must be float16, float32, float64.
1805 1806 1807
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1808
            self.assertRaises(TypeError, F.relu6, x_int32)
1809
            # support the input dtype is float16
1810 1811 1812
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1813
            F.relu6(x_fp16)
1814 1815


1816
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
1817 1818 1819 1820
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
1821
    return (x * np.minimum(np.maximum(x + offset, 0.), threshold) /
Z
Zhang Ting 已提交
1822
            scale).astype(x_dtype)
1823 1824


H
huangjun12 已提交
1825
class TestHardSwish(TestActivation):
1826

H
huangjun12 已提交
1827 1828 1829
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
1830
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
1831

1832
        np.random.seed(1024)
Z
zhupengyang 已提交
1833
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
1834 1835 1836 1837 1838 1839
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
1840
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
1841

1842
        self.inputs = {'X': x}
H
huangjun12 已提交
1843 1844 1845 1846
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
1847 1848 1849 1850
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
1851 1852


1853 1854 1855 1856
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
1857
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1858 1859 1860 1861
            else paddle.CPUPlace()

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
1862
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1863 1864 1865 1866 1867 1868 1869
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
1870
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1871 1872 1873

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhang Ting 已提交
1874
        x = paddle.to_tensor([11648., 11448.])
1875 1876 1877
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
Z
Zhang Ting 已提交
1878
        out_ref = [11648., 11448.]
1879
        for r in [out1, out2]:
1880
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1881
        paddle.enable_static()
1882 1883 1884 1885 1886 1887 1888 1889

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
1890
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1891 1892 1893 1894

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_swish(x)
1895
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
1896 1897 1898 1899
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
1900
            # The input type must be Variable.
1901
            self.assertRaises(TypeError, F.hardswish, 1)
1902
            # The input dtype must be float16, float32, float64.
1903 1904 1905
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1906
            self.assertRaises(TypeError, F.hardswish, x_int32)
1907
            # support the input dtype is float16
1908 1909 1910
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1911
            F.hardswish(x_fp16)
1912

1913 1914 1915 1916 1917
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

1918

C
chengduo 已提交
1919
class TestSoftRelu(TestActivation):
1920

1921 1922
    def setUp(self):
        self.op_type = "soft_relu"
1923 1924
        self.init_dtype()

1925
        np.random.seed(4096)
1926
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1927
        threshold = 2.0
Q
qijun 已提交
1928 1929
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
1930
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
1931 1932 1933
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
1934 1935 1936 1937 1938
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
1939 1940

    def test_check_grad(self):
1941 1942
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1943
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
1944

1945

1946
class TestSoftReluOpError(unittest.TestCase):
1947

1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.soft_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.soft_relu(x_fp16)


1960
def elu(x, alpha):
Z
zhupengyang 已提交
1961
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
1962 1963 1964
    return out_ref.astype(x.dtype)


C
chengduo 已提交
1965
class TestELU(TestActivation):
1966

1967 1968
    def setUp(self):
        self.op_type = "elu"
1969 1970
        self.init_dtype()

1971
        np.random.seed(1024)
Z
zhupengyang 已提交
1972
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
Z
zhupengyang 已提交
1973
        alpha = self.get_alpha()
1974
        out = elu(x, alpha)
1975 1976 1977 1978
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
1979
        self.outputs = {'Out': out}
1980 1981

    def test_check_grad(self):
1982 1983
        if self.dtype == np.float16:
            return
1984
        self.check_grad(['X'], 'Out')
1985

Z
zhupengyang 已提交
1986 1987 1988 1989 1990
    def get_alpha(self):
        return 1.


class TestELUAlpha(TestELU):
1991

Z
zhupengyang 已提交
1992 1993 1994
    def get_alpha(self):
        return -0.2

1995

1996 1997 1998
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
1999
        np.random.seed(1024)
2000
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
J
joejiong 已提交
2001
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2002
            else paddle.CPUPlace()
2003 2004 2005 2006
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2007 2008

    def test_static_api(self):
2009
        paddle.enable_static()
2010
        with paddle.static.program_guard(paddle.static.Program()):
2011
            x = paddle.fluid.data('X', [10, 12])
2012
            out1 = self.elu(x)
2013 2014 2015 2016 2017 2018
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2019
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2020 2021 2022 2023

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2024 2025
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2026 2027 2028 2029
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2030
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2031

2032 2033
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2034 2035 2036 2037
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2038
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2039 2040
        paddle.enable_static()

2041
    def test_errors(self):
2042
        paddle.enable_static()
2043 2044
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2045
            self.assertRaises(TypeError, self.elu, 1)
2046
            # The input dtype must be float16, float32, float64.
2047 2048 2049
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
2050
            self.assertRaises(TypeError, self.elu, x_int32)
2051
            # support the input dtype is float16
2052 2053 2054
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
2055 2056 2057
            self.elu(x_fp16)


Z
zhupengyang 已提交
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2070 2071 2072 2073 2074 2075
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
2076

2077 2078 2079 2080
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()

2081
        self.python_api = paddle.nn.functional.celu
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
        np.random.seed(1024)
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2093
        self.check_grad(['X'], 'Out', check_eager=True)
2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118


class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2119
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2130
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2131 2132 2133 2134 2135 2136 2137

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2138
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2139 2140 2141 2142 2143 2144 2145 2146
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2147 2148 2149
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
2150 2151
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2152 2153 2154
            x_fp32 = paddle.fluid.data(name='x_fp32',
                                       shape=[10, 12],
                                       dtype='float32')
2155 2156
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2157 2158 2159
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
2160 2161
            self.celu(x_fp16)

2162 2163 2164 2165 2166
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2167

C
chengduo 已提交
2168
class TestReciprocal(TestActivation):
2169

Q
qijun 已提交
2170 2171
    def setUp(self):
        self.op_type = "reciprocal"
2172
        self.python_api = paddle.reciprocal
2173 2174
        self.init_dtype()

2175
        np.random.seed(1024)
2176 2177 2178 2179 2180
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2181 2182

    def test_check_grad(self):
2183 2184
        if self.dtype == np.float16:
            return
2185 2186 2187 2188
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2189 2190


C
chengduo 已提交
2191
class TestLog(TestActivation):
2192

Q
qijun 已提交
2193 2194
    def setUp(self):
        self.op_type = "log"
2195 2196
        self.check_eager = True
        self.python_api = paddle.log
2197 2198
        self.init_dtype()

2199
        np.random.seed(1024)
2200 2201 2202 2203 2204
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2205 2206

    def test_check_grad(self):
2207 2208
        if self.dtype == np.float16:
            return
2209
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2210

2211
    def test_error(self):
2212 2213 2214 2215 2216 2217 2218 2219
        in1 = fluid.layers.data(name="in1",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int32")
        in2 = fluid.layers.data(name="in2",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int64")
2220 2221 2222 2223

        self.assertRaises(TypeError, fluid.layers.log, in1)
        self.assertRaises(TypeError, fluid.layers.log, in2)

2224

J
joejiong 已提交
2225
class TestLog2(TestActivation):
2226

J
joejiong 已提交
2227 2228
    def setUp(self):
        self.op_type = "log2"
2229 2230
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2242
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2255 2256 2257
            data_x = paddle.static.data(name="data_x",
                                        shape=[11, 17],
                                        dtype="float64")
J
joejiong 已提交
2258 2259 2260 2261

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2262 2263 2264
            res1, = exe.run(paddle.static.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
J
joejiong 已提交
2265
        expected_res = np.log2(input_x)
2266
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2267 2268 2269 2270 2271 2272 2273 2274

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2275
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2276 2277


J
joejiong 已提交
2278
class TestLog10(TestActivation):
2279

J
joejiong 已提交
2280 2281
    def setUp(self):
        self.op_type = "log10"
2282 2283
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2295
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2308 2309 2310
            data_x = paddle.static.data(name="data_x",
                                        shape=[11, 17],
                                        dtype="float64")
J
joejiong 已提交
2311 2312 2313 2314

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2315 2316 2317
            res1, = exe.run(paddle.static.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
J
joejiong 已提交
2318
        expected_res = np.log10(input_x)
2319
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2320 2321 2322 2323 2324 2325 2326 2327

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2328
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2329 2330


2331
class TestLog1p(TestActivation):
2332

2333 2334
    def setUp(self):
        self.op_type = "log1p"
2335 2336
        self.check_eager = True
        self.python_api = paddle.log1p
2337 2338
        self.init_dtype()

2339
        np.random.seed(1024)
2340 2341 2342 2343 2344 2345 2346 2347 2348
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2349
        self.check_grad(['X'], 'Out', check_eager=True)
2350 2351 2352 2353

    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2354 2355 2356 2357
            data_x = fluid.layers.data(name="data_x",
                                       shape=[11, 17],
                                       append_batch_size=False,
                                       dtype="float64")
2358 2359 2360 2361

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2362 2363 2364
            res1, = exe.run(fluid.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
2365
        expected_res = np.log1p(input_x)
2366
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2367 2368 2369 2370 2371 2372 2373 2374

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2375
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2376 2377


C
chengduo 已提交
2378
class TestSquare(TestActivation):
2379

Q
qijun 已提交
2380 2381
    def setUp(self):
        self.op_type = "square"
2382
        self.python_api = paddle.square
2383 2384
        self.init_dtype()

2385
        np.random.seed(1024)
2386 2387 2388 2389 2390
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2391 2392

    def test_check_grad(self):
2393 2394
        if self.dtype == np.float16:
            return
2395 2396 2397 2398
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.007,
                        check_eager=True)
2399 2400 2401

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2402

2403

2404 2405 2406
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSquareBF16(OpTest):
2407

2408 2409
    def setUp(self):
        self.op_type = "square"
2410
        self.python_api = paddle.square
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2427
        self.check_output_with_place(place, check_eager=True)
2428 2429 2430

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2431 2432 2433 2434
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   numeric_grad_delta=0.5,
                                   check_eager=True)
2435 2436


C
chengduo 已提交
2437
class TestPow(TestActivation):
2438

2439 2440
    def setUp(self):
        self.op_type = "pow"
2441
        self.python_api = paddle.pow
2442
        self.check_eager = True
2443 2444
        self.init_dtype()

2445
        np.random.seed(1024)
2446 2447 2448 2449
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2450
        self.attrs = {'factor': 3.0}
2451
        self.outputs = {'Out': out}
2452

2453 2454 2455
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2456
    def test_check_grad(self):
2457 2458
        if self.dtype == np.float16:
            return
2459
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2460

2461

2462
class TestPow_factor_tensor(TestActivation):
2463

2464 2465
    def setUp(self):
        self.op_type = "pow"
2466 2467
        self.check_eager = False
        self.python_api = paddle.pow
2468 2469
        self.init_dtype()

2470
        np.random.seed(1024)
2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2483
        self.check_output(check_eager=self.check_eager)
2484 2485 2486 2487

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2488
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2489 2490 2491

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
2492 2493 2494 2495 2496 2497 2498 2499
        x = fluid.layers.data(name="x",
                              shape=[11, 17],
                              append_batch_size=False,
                              dtype="float32")
        res = fluid.layers.data(name="res",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float32")
2500 2501 2502 2503 2504

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
2505 2506 2507
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2508 2509

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2510
        res_1, res_2, res, res_6 = exe.run(
2511 2512
            fluid.default_main_program(),
            feed={"x": input},
W
WuHaobo 已提交
2513
            fetch_list=[out_1, out_2, res, out_6])
2514

2515 2516 2517
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2518

2519
    def test_error(self):
2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
        in1 = fluid.layers.data(name="in1",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int32")
        in2 = fluid.layers.data(name="in2",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int64")
        in3 = fluid.layers.data(name="in3",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float32")
        in4 = fluid.layers.data(name="in4",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float64")
2536 2537 2538 2539 2540 2541 2542 2543

        factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)

        self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)

2544

2545 2546 2547 2548 2549
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2550
class TestSTanh(TestActivation):
2551

2552 2553 2554 2555 2556 2557
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2558 2559
    def setUp(self):
        self.op_type = "stanh"
2560
        self.init_dtype()
2561 2562
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2563

2564
        np.random.seed(1024)
2565
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
2566 2567
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2568

2569
        self.inputs = {'X': x}
2570
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2571
        self.outputs = {'Out': out}
2572

Q
qijun 已提交
2573
    def test_check_grad(self):
2574 2575
        if self.dtype == np.float16:
            return
2576
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2577

2578

2579
class TestSTanhScaleA(TestSTanh):
2580

2581 2582 2583 2584 2585
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
2586

2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
    def get_scale_b(self):
        return 0.5


class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
2616
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2617 2618 2619 2620 2621 2622 2623

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
2624
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.stanh(x, self.scale_a, self.scale_b)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
2635
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2636

2637
    def test_errors(self):
2638 2639
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2640
            # The input type must be Variable.
2641
            self.assertRaises(TypeError, paddle.stanh, 1)
2642
            # The input dtype must be float16, float32, float64.
2643 2644 2645
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2646
            self.assertRaises(TypeError, paddle.stanh, x_int32)
2647
            # support the input dtype is float16
2648 2649 2650
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2651 2652 2653 2654
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
2655

2656 2657 2658 2659 2660
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
2661

2662 2663
    def get_scale_b(self):
        return 0.5
2664 2665


2666 2667 2668 2669 2670 2671 2672
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
    out = np.select([x_beta <= threshold, x_beta > threshold],
                    [np.log(1 + np.exp(x_beta)) / beta, x])
    return out


C
chengduo 已提交
2673
class TestSoftplus(TestActivation):
2674

K
kexinzhao 已提交
2675 2676
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
2677
        self.python_api = paddle.nn.functional.softplus
2678 2679
        self.init_dtype()

2680 2681
        beta = 2
        threshold = 15
2682

2683
        np.random.seed(1024)
2684 2685 2686 2687
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
2688
        self.outputs = {'Out': out}
K
kexinzhao 已提交
2689

W
Wang Bojun 已提交
2690 2691
        self.check_eager = True

K
kexinzhao 已提交
2692
    def test_check_grad(self):
2693 2694
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
2695 2696 2697
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
2698

2699

2700 2701 2702
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftplusBF16(OpTest):
2703

2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


2730 2731 2732 2733 2734
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
2735
        np.random.seed(1024)
2736
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
2737
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2738 2739 2740
            else paddle.CPUPlace()

    def test_static_api(self):
2741
        paddle.enable_static()
2742
        with paddle.static.program_guard(paddle.static.Program()):
2743
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2744 2745 2746 2747 2748 2749 2750
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
2751
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2752 2753 2754 2755 2756 2757 2758 2759 2760

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
2761
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2762 2763 2764
        paddle.enable_static()

    def test_fluid_api(self):
2765
        paddle.enable_static()
2766 2767 2768 2769 2770 2771
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softplus(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softplus(self.x_np)
2772
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2773 2774

    def test_errors(self):
2775
        paddle.enable_static()
2776 2777 2778 2779
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
2780 2781 2782
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2783 2784
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
2785 2786 2787
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2788 2789 2790 2791 2792 2793 2794 2795
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
2796
class TestSoftsign(TestActivation):
2797

2798 2799
    def setUp(self):
        self.op_type = "softsign"
2800
        self.init_dtype()
2801
        self.python_api = paddle.nn.functional.softsign
2802

2803
        np.random.seed(1024)
2804 2805 2806
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_softsign(x)
        self.inputs = {'X': x}
2807
        self.outputs = {'Out': out}
2808 2809

    def test_check_grad(self):
2810 2811
        if self.dtype == np.float16:
            return
2812
        self.check_grad(['X'], 'Out', check_eager=True)
2813 2814


2815 2816 2817
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
2818
        np.random.seed(1024)
2819
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
2820
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2821 2822 2823
            else paddle.CPUPlace()

    def test_static_api(self):
2824
        paddle.enable_static()
2825
        with paddle.static.program_guard(paddle.static.Program()):
2826
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2827 2828 2829 2830 2831 2832 2833
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
2834
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2835 2836 2837 2838 2839 2840 2841 2842 2843

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
2844
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2845 2846 2847
        paddle.enable_static()

    def test_fluid_api(self):
2848
        paddle.enable_static()
2849 2850 2851 2852 2853 2854
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softsign(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softsign(self.x_np)
2855
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2856 2857

    def test_errors(self):
2858
        paddle.enable_static()
2859 2860 2861 2862
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
2863 2864 2865
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2866 2867
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
2868 2869 2870
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2871 2872 2873
            F.softsign(x_fp16)


2874 2875 2876 2877 2878
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
2879
class TestThresholdedRelu(TestActivation):
2880

2881 2882
    def setUp(self):
        self.op_type = "thresholded_relu"
2883 2884
        self.init_dtype()

2885
        threshold = 15
2886

2887 2888 2889 2890 2891 2892
        np.random.seed(1024)
        x = np.random.uniform(-20, 20, [10, 12]).astype(self.dtype)
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
2893
        self.outputs = {'Out': out}
2894 2895

    def test_check_grad(self):
2896 2897
        if self.dtype == np.float16:
            return
2898
        self.check_grad(['X'], 'Out')
2899 2900


2901 2902 2903 2904 2905 2906 2907
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
J
joejiong 已提交
2908
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2909 2910 2911 2912 2913
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2914
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2915 2916 2917 2918 2919 2920 2921
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
2922
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2923 2924 2925 2926 2927 2928 2929 2930 2931

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
2932
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.thresholded_relu(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
2943
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2944

2945
    def test_errors(self):
2946 2947
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2948
            # The input type must be Variable.
2949
            self.assertRaises(TypeError, F.thresholded_relu, 1)
2950
            # The input dtype must be float16, float32, float64.
2951 2952 2953
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2954
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
2955
            # support the input dtype is float16
2956 2957 2958
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2959
            F.thresholded_relu(x_fp16)
2960 2961


2962 2963 2964 2965
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
    return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype)


C
chengduo 已提交
2966
class TestHardSigmoid(TestActivation):
2967

2968 2969
    def setUp(self):
        self.op_type = "hard_sigmoid"
2970 2971 2972 2973
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
2974

2975 2976 2977
        x = np.random.uniform(-5, 5, [10, 12]).astype(self.dtype)
        lower_threshold = -self.offset / self.slope
        upper_threshold = (1. - self.offset) / self.slope
Z
zhupengyang 已提交
2978

2979
        # Same reason as TestAbs
2980 2981 2982
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
2983

2984
        out = ref_hardsigmoid(x, self.slope, self.offset)
2985

2986 2987
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
2988
        self.outputs = {'Out': out}
2989

2990 2991
    def set_attrs(self):
        pass
2992

2993

2994
class TestHardSigmoidFP32(TestHardSigmoid):
2995

2996 2997 2998 2999 3000
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
3001

3002 3003 3004 3005 3006 3007 3008 3009 3010
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3011
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3012 3013 3014 3015
            else paddle.CPUPlace()

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3016
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3017 3018 3019 3020 3021 3022 3023
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3024
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3025 3026 3027 3028 3029 3030 3031 3032 3033

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3034
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3035
        paddle.enable_static()
3036 3037 3038 3039 3040 3041 3042 3043

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_sigmoid(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3044
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3045 3046 3047 3048

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_sigmoid(x)
3049
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3050 3051 3052 3053
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3054
            # The input type must be Variable.
3055
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3056
            # The input dtype must be float16, float32, float64.
3057 3058 3059
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3060
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3061
            # support the input dtype is float16
3062 3063 3064
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3065
            F.hardsigmoid(x_fp16)
3066 3067


3068 3069 3070 3071 3072
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3073
class TestSwish(TestActivation):
3074

A
Abhinav Arora 已提交
3075 3076
    def setUp(self):
        self.op_type = "swish"
3077
        self.python_api = paddle.nn.functional.swish
3078
        self.init_dtype()
3079
        self.check_eager = True
3080

3081
        np.random.seed(1024)
3082 3083 3084
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3085
        self.attrs = {'beta': 1.0}
3086
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3087 3088

    def test_check_grad(self):
3089 3090
        if self.dtype == np.float16:
            return
3091 3092 3093 3094
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3095

A
Abhinav Arora 已提交
3096

3097 3098 3099 3100 3101
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3102
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3103 3104 3105 3106 3107
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3108
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3109 3110 3111 3112 3113 3114 3115
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3116
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3117

3118
    def func_test_dygraph_api(self):
3119 3120 3121 3122 3123 3124 3125
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3126
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3127 3128
        paddle.enable_static()

3129
    def test_dygraph_api(self):
3130
        with _test_eager_guard():
3131 3132
            self.func_test_dygraph_api()
        self.func_test_dygraph_api()
3133

3134 3135 3136 3137 3138 3139 3140 3141
    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3142
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3143

3144
    def test_errors(self):
3145 3146
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3147
            # The input type must be Variable.
3148
            self.assertRaises(TypeError, F.swish, 1)
3149
            # The input dtype must be float16, float32, float64.
3150 3151 3152
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3153
            self.assertRaises(TypeError, F.swish, x_int32)
3154
            # support the input dtype is float16
3155 3156 3157
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3158
            F.swish(x_fp16)
3159 3160


3161 3162 3163 3164 3165 3166 3167
def ref_mish(x, threshold=20.):
    softplus = np.select([x <= threshold, x > threshold],
                         [np.log(1 + np.exp(x)), x])
    return x * np.tanh(softplus)


class TestMish(TestActivation):
3168

3169 3170
    def setUp(self):
        self.op_type = "mish"
3171
        self.python_api = paddle.fluid.layers.nn.mish
3172 3173 3174 3175 3176 3177 3178 3179
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3180 3181 3182
    def test_check_output(self):
        self.check_output(check_eager=True)

3183 3184 3185
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3186
        self.check_grad(['X'], 'Out', check_eager=True)
3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207


class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3208
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3209 3210 3211 3212 3213 3214 3215 3216 3217

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3218
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.mish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3229
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3230 3231 3232 3233 3234 3235 3236

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3237 3238 3239
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3240 3241
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3242 3243 3244
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3245 3246 3247
            F.mish(x_fp16)


3248 3249
#------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
3250

3251
    class TestOpErrors(unittest.TestCase):
3252

3253 3254 3255 3256
        def test_errors(self):
            with program_guard(Program(), Program()):
                op = getattr(fluid.layers, op_type)
                # The input dtype of op_type must be float32, float64.
3257 3258 3259 3260 3261 3262
                in1 = fluid.layers.data(name='input2',
                                        shape=[12, 10],
                                        dtype="int32")
                in2 = fluid.layers.data(name='input3',
                                        shape=[12, 10],
                                        dtype="int64")
3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
                self.assertRaises(TypeError, op, in1)
                self.assertRaises(TypeError, op, in2)

    cls_name = "{0}_{1}".format(op_type, "test_errors")
    TestOpErrors.__name__ = cls_name
    globals()[cls_name] = TestOpErrors


create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')
J
joejiong 已提交
3283
create_test_error_class('tan')
X
xiaoting 已提交
3284 3285 3286
create_test_error_class('acosh')
create_test_error_class('asinh')
create_test_error_class('atanh')
3287 3288


3289 3290
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3291

3292 3293 3294
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
3295

3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
3310 3311 3312 3313 3314
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
3315

J
joejiong 已提交
3316
    @unittest.skipIf(not paddle.is_compiled_with_cuda(),
C
chengduo 已提交
3317 3318
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
3319

C
chengduo 已提交
3320 3321
        def init_dtype(self):
            self.dtype = np.float16
3322

C
chengduo 已提交
3323
        def test_check_output(self):
3324
            place = core.CUDAPlace(0)
C
chengduo 已提交
3325 3326 3327
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3328

C
chengduo 已提交
3329 3330 3331 3332
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3333 3334 3335
                self.check_grad_with_place(place, ['X'],
                                           'Out',
                                           max_relative_error=grad_atol)
C
chengduo 已提交
3336 3337 3338 3339 3340 3341 3342

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3343
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3344
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3345
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3346 3347
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3348
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3349
create_test_act_fp16_class(TestHardShrink)
3350
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3351 3352 3353 3354 3355
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3356
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3357
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3358
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3359
create_test_act_fp16_class(TestSin)
3360
create_test_act_fp16_class(TestSinh)
3361 3362
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3363 3364 3365
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3366 3367
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3368
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3369 3370
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3371
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3372
create_test_act_fp16_class(TestELU)
3373
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3374 3375
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3376 3377 3378 3379
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3380
create_test_act_fp16_class(TestLog10, atol=5e-2)
3381
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3382 3383
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3384
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3385 3386 3387 3388 3389
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3390
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3391
create_test_act_fp16_class(TestHardSwish)
3392
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3393

3394 3395 3396 3397 3398

def create_test_act_bf16_class(parent,
                               atol=1e-2,
                               grad_check=True,
                               grad_atol=0.80):
3399

3400 3401 3402
    @unittest.skipIf(not paddle.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActBF16(parent):
3403

3404 3405 3406 3407 3408 3409 3410 3411 3412
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3413 3414 3415
            self.check_grad_with_place(place, ['X'],
                                       'Out',
                                       max_relative_error=grad_atol)
3416 3417 3418 3419 3420 3421 3422 3423

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)

Q
qijun 已提交
3424 3425
if __name__ == "__main__":
    unittest.main()