test_activation_op.py 115.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
C
Clementine 已提交
18
from scipy.special import expit, erf
J
joejiong 已提交
19

20
from op_test import OpTest, convert_float_to_uint16
21
import paddle
22
import paddle.nn.functional as F
J
joejiong 已提交
23 24
import paddle.fluid as fluid
import paddle.fluid.core as core
25
from paddle.fluid import Program, program_guard
26
from paddle.fluid.framework import _test_eager_guard
Q
qijun 已提交
27

28 29
paddle.enable_static()

Q
qijun 已提交
30

31
class TestSqrtOpError(unittest.TestCase):
32

Z
Zhaolong Xing 已提交
33 34 35 36 37 38
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
39 40 41
            in2 = fluid.layers.data(name='input2',
                                    shape=[12, 10],
                                    dtype="int32")
Z
Zhaolong Xing 已提交
42 43
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

44 45 46
            in3 = fluid.layers.data(name='input3',
                                    shape=[12, 10],
                                    dtype="float16")
Z
Zhaolong Xing 已提交
47 48 49
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
50
class TestActivation(OpTest):
51

Q
qijun 已提交
52 53
    def setUp(self):
        self.op_type = "exp"
54
        self.init_dtype()
55
        self.init_kernel_type()
C
chentianyu03 已提交
56 57
        self.check_eager = True
        self.python_api = paddle.exp
58

59
        np.random.seed(2049)
60 61 62 63 64
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
65 66

    def test_check_output(self):
67 68 69 70
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
71 72

    def test_check_grad(self):
73 74
        if self.dtype == np.float16:
            return
75 76 77 78
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
79

80
    def init_dtype(self):
81
        self.dtype = np.float64
82

83 84 85
    def init_kernel_type(self):
        pass

Q
qijun 已提交
86

R
ronnywang 已提交
87
class TestExpm1(TestActivation):
88

R
ronnywang 已提交
89 90
    def setUp(self):
        self.op_type = "expm1"
91
        self.python_api = paddle.expm1
R
ronnywang 已提交
92 93 94 95 96 97 98 99 100 101
        self.init_dtype()

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
102 103 104 105
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
106 107 108


class TestExpm1API(unittest.TestCase):
109

R
ronnywang 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
133
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
134 135 136 137 138

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
139

R
ronnywang 已提交
140 141 142 143
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
144
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


158
class TestParameter(object):
159

160 161
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
162
            np_x = np.array([0.1])
163
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
164
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
165 166
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
W
WuHaobo 已提交
167 168
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = eval("np.%s(np_x)" % self.op_type)
169
            np.testing.assert_allclose(result, expected, rtol=1e-05)
170 171 172 173 174 175 176

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
177
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
178 179


C
chengduo 已提交
180
class TestSigmoid(TestActivation):
181

Q
qijun 已提交
182 183
    def setUp(self):
        self.op_type = "sigmoid"
184 185
        self.init_dtype()

186
        np.random.seed(1024)
187 188 189 190 191
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
192

193 194 195
    def init_dtype(self):
        self.dtype = np.float32

196
    def test_check_grad(self):
197 198 199 200
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

201

202 203 204
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSigmoidBF16(OpTest):
205

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


M
minghaoBD 已提交
231
class TestSilu(TestActivation):
232

M
minghaoBD 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
        self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
271
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
272 273 274 275 276 277 278 279 280

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
281
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
282 283 284 285 286 287 288
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
289 290 291
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
M
minghaoBD 已提交
292 293
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
294 295 296
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
M
minghaoBD 已提交
297 298 299
            F.silu(x_fp16)


C
chengduo 已提交
300
class TestLogSigmoid(TestActivation):
301

302 303
    def setUp(self):
        self.op_type = "logsigmoid"
304 305
        self.init_dtype()

306
        np.random.seed(2048)
307 308 309
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

310
        self.inputs = {'X': x}
311
        self.outputs = {'Out': out}
312 313

    def test_check_grad(self):
314 315
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
316
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
317 318


319
class TestLogSigmoidAPI(unittest.TestCase):
320
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
321
    def setUp(self):
322
        np.random.seed(1024)
323
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
J
joejiong 已提交
324
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
325 326 327
            else paddle.CPUPlace()

    def test_static_api(self):
328
        paddle.enable_static()
329
        with paddle.static.program_guard(paddle.static.Program()):
330
            x = paddle.fluid.data('X', [11, 17])
331
            out1 = F.log_sigmoid(x)
332 333 334 335 336 337
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
338
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
339 340 341 342

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
343
        out1 = F.log_sigmoid(x)
344 345 346 347
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
348
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
349 350
        paddle.enable_static()

351
    def test_fluid_api(self):
352
        paddle.enable_static()
353
        with paddle.static.program_guard(paddle.static.Program()):
354
            x = paddle.fluid.data('X', [11, 17])
355 356 357 358
            out = paddle.fluid.layers.logsigmoid(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
359
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
360

361
    def test_errors(self):
362
        paddle.enable_static()
363 364
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
365
            self.assertRaises(TypeError, F.log_sigmoid, 1)
366
            # The input dtype must be float16, float32, float64.
367 368 369
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
370
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
371
            # support the input dtype is float16
372 373 374
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
375
            F.log_sigmoid(x_fp16)
376 377


378
class TestTanh(TestActivation, TestParameter):
379

380 381
    def setUp(self):
        self.op_type = "tanh"
382
        self.init_dtype()
383
        np.random.seed(1024)
384 385 386 387 388
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
389 390

    def test_check_grad(self):
391 392
        if self.dtype == np.float16:
            return
393
        self.check_grad(['X'], 'Out')
394

395 396 397 398 399 400
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

401

W
WangXi 已提交
402 403 404 405
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
406
        np.random.seed(1024)
W
WangXi 已提交
407
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
J
joejiong 已提交
408
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
W
WangXi 已提交
409
            else paddle.CPUPlace()
410 411 412 413
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
414 415

    def test_static_api(self):
416
        paddle.enable_static()
W
WangXi 已提交
417
        with paddle.static.program_guard(paddle.static.Program()):
418
            x = paddle.fluid.data('X', [10, 12], self.dtype)
419
            out1 = self.tanh(x)
W
WangXi 已提交
420 421 422 423 424 425
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
426
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
427 428 429

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
430
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
431 432 433 434 435 436
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
437
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
438 439 440
        paddle.enable_static()

    def test_fluid_api(self):
441
        paddle.enable_static()
W
WangXi 已提交
442 443 444 445 446 447
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12], self.dtype)
            out = fluid.layers.tanh(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tanh(self.x_np)
448
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
W
WangXi 已提交
449 450

    def test_errors(self):
451
        paddle.enable_static()
W
WangXi 已提交
452 453
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
454
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
455
            # The input dtype must be float16, float32.
456 457 458
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
459
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
460
            # support the input dtype is float16
461 462 463
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
464 465 466 467 468 469 470
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
471 472


473
class TestAtan(TestActivation, TestParameter):
474

475 476 477 478
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

479
        np.random.seed(1024)
480 481 482 483 484 485 486 487 488
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
489
        self.check_grad(['X'], 'Out')
490

W
WuHaobo 已提交
491 492 493 494 495 496 497 498 499 500 501
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

502 503 504 505 506 507 508 509
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

510

511
class TestSinh(TestActivation):
512

513 514 515 516
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()

517
        np.random.seed(1024)
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.sinh(x).numpy()
            z_expected = np.sinh(np_x)
535
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
536 537 538 539 540 541

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
542 543 544 545
            data_x = fluid.layers.data(name="data_x",
                                       shape=test_data_shape,
                                       append_batch_size=False,
                                       dtype="float32")
546 547 548 549

            pd_sinh_out = fluid.layers.sinh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
550 551 552
            np_sinh_res, = exe.run(fluid.default_main_program(),
                                   feed={"data_x": input_x},
                                   fetch_list=[pd_sinh_out])
553 554

        expected_res = np.sinh(input_x)
555
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.sinh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
571

572 573 574 575 576 577 578 579 580 581 582 583 584
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sinh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.sinh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.sinh(x_fp16)


class TestCosh(TestActivation):
585

586 587 588 589
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()

590
        np.random.seed(1024)
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.cosh(x).numpy()
            z_expected = np.cosh(np_x)
608
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
609 610 611 612 613 614

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
615 616 617 618
            data_x = fluid.layers.data(name="data_x",
                                       shape=test_data_shape,
                                       append_batch_size=False,
                                       dtype="float32")
619 620 621 622

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
623 624 625
            np_cosh_res, = exe.run(fluid.default_main_program(),
                                   feed={"data_x": input_x},
                                   fetch_list=[pd_cosh_out])
626 627

        expected_res = np.cosh(input_x)
628
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.cosh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
644

645 646 647 648 649 650 651 652 653 654 655 656
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.cosh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.cosh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.cosh(x_fp16)


657 658 659 660 661 662
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
663

K
Kavya Srinet 已提交
664 665
    def setUp(self):
        self.op_type = "tanh_shrink"
666 667
        self.init_dtype()

668
        np.random.seed(1024)
669 670
        x = np.random.uniform(10, 20, [10, 17]).astype(self.dtype)
        out = ref_tanhshrink(x)
671

672
        self.inputs = {'X': x}
673
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
674 675

    def test_check_grad(self):
676 677
        if self.dtype == np.float16:
            return
678
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
679

680

681 682 683
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
684
        np.random.seed(1024)
685
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
J
joejiong 已提交
686
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
687 688 689
            else paddle.CPUPlace()

    def test_static_api(self):
690
        paddle.enable_static()
691
        with paddle.static.program_guard(paddle.static.Program()):
692
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
693 694 695 696 697 698 699
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
700
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
701 702 703 704 705 706 707 708 709

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
710
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
711 712 713
        paddle.enable_static()

    def test_fluid_api(self):
714
        paddle.enable_static()
715 716 717 718 719 720
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.tanh_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_tanhshrink(self.x_np)
721
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
722 723

    def test_errors(self):
724
        paddle.enable_static()
725 726 727 728
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
729 730 731
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
732 733
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
734 735 736
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
737 738 739
            F.tanhshrink(x_fp16)


740 741 742 743 744 745
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
746
class TestHardShrink(TestActivation):
747

748 749
    def setUp(self):
        self.op_type = "hard_shrink"
750 751
        self.init_dtype()

752 753
        self.threshold = 0.5
        self.set_attrs()
754
        np.random.seed(1024)
Z
zhupengyang 已提交
755
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
756
        out = ref_hardshrink(x, self.threshold)
757

758
        self.attrs = {'threshold': self.threshold}
759
        self.inputs = {'X': x}
760
        self.outputs = {'Out': out}
761

762 763 764
    def set_attrs(self):
        pass

765
    def test_check_grad(self):
766 767
        if self.dtype == np.float16:
            return
768
        self.check_grad(['X'], 'Out')
769 770


771
class TestHardShrink_threshold_negative(TestHardShrink):
772

773 774 775 776
    def set_attrs(self):
        self.threshold = -0.1


777 778 779
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
780
        np.random.seed(1024)
781
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
782
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
783 784 785
            else paddle.CPUPlace()

    def test_static_api(self):
786
        paddle.enable_static()
787
        with paddle.static.program_guard(paddle.static.Program()):
788
            x = paddle.fluid.data('X', [10, 12])
789 790 791 792 793 794 795
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
796
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
797 798 799

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
800
        x = paddle.to_tensor(self.x_np)
801 802 803 804 805
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
806
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
807 808 809 810 811 812

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
813
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
814 815 816
        paddle.enable_static()

    def test_fluid_api(self):
817
        paddle.enable_static()
818 819 820 821 822 823
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.hard_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardshrink(self.x_np, 0.5)
824
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
825

826
    def test_errors(self):
827
        paddle.enable_static()
828
        with paddle.static.program_guard(paddle.static.Program()):
829
            # The input type must be Variable.
830
            self.assertRaises(TypeError, F.hardshrink, 1)
831
            # The input dtype must be float16, float32, float64.
832 833 834
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
835
            self.assertRaises(TypeError, F.hardshrink, x_int32)
836
            # support the input dtype is float16
837 838 839
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
840
            F.hardshrink(x_fp16)
841 842


843 844 845 846 847 848 849 850 851 852 853
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
854
        np.random.seed(1024)
855
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
J
joejiong 已提交
856
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
857 858 859
            else paddle.CPUPlace()

    def test_static_api(self):
860
        paddle.enable_static()
861
        with paddle.static.program_guard(paddle.static.Program()):
862
            x = paddle.fluid.data('X', [10, 12])
863 864 865 866 867 868 869
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
870
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
871 872 873

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
874
        x = paddle.to_tensor(self.x_np)
875 876 877 878 879
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
880
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
881 882 883 884 885 886

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
887
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
888 889 890
        paddle.enable_static()

    def test_errors(self):
891
        paddle.enable_static()
892 893 894 895
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
896 897 898
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
899 900
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
901 902 903
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
904 905 906
            F.hardtanh(x_fp16)


907 908 909 910 911 912 913 914
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
        out - threshold)
    return out


class TestSoftshrink(TestActivation):
915

916 917
    def setUp(self):
        self.op_type = "softshrink"
918 919
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
920 921
        self.init_dtype()

922
        threshold = 0.8
923

924
        np.random.seed(1023)
925 926 927 928
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
929
        self.outputs = {'Out': out}
930 931

    def test_check_grad(self):
932 933
        if self.dtype == np.float16:
            return
934
        self.check_grad(['X'], 'Out', check_eager=True)
935

936

937 938 939 940
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
941
        np.random.seed(1024)
942
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
J
joejiong 已提交
943
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
944 945 946
            else paddle.CPUPlace()

    def test_static_api(self):
947
        paddle.enable_static()
948
        with paddle.static.program_guard(paddle.static.Program()):
949
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
950 951 952 953 954 955 956
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
957
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
958 959 960 961 962 963 964 965 966

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
967
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
968 969 970
        paddle.enable_static()

    def test_fluid_api(self):
971
        paddle.enable_static()
972 973 974 975 976 977
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softshrink(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softshrink(self.x_np, self.threshold)
978
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
979

980
    def test_errors(self):
981
        paddle.enable_static()
982
        with paddle.static.program_guard(paddle.static.Program()):
983
            # The input type must be Variable.
984
            self.assertRaises(TypeError, F.softshrink, 1)
985
            # The input dtype must be float16, float32, float64.
986 987 988
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
989
            self.assertRaises(TypeError, F.softshrink, x_int32)
990
            # The threshold must be no less than zero
991 992 993
            x_fp32 = paddle.fluid.data(name='x_fp32',
                                       shape=[12, 10],
                                       dtype='float32')
994
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
995
            # support the input dtype is float16
996 997 998
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
999
            F.softshrink(x_fp16)
1000 1001


1002
class TestSqrt(TestActivation, TestParameter):
1003

1004 1005
    def setUp(self):
        self.op_type = "sqrt"
1006
        self.python_api = paddle.sqrt
1007 1008
        self.init_dtype()

1009
        np.random.seed(1023)
1010 1011 1012 1013 1014
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1015 1016

    def test_check_grad(self):
1017 1018
        if self.dtype == np.float16:
            return
1019 1020 1021 1022
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1023

1024

1025 1026 1027
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSqrtBF16(OpTest):
1028

1029 1030
    def setUp(self):
        self.op_type = "sqrt"
1031
        self.python_api = paddle.sqrt
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
        self.init_dtype()

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
1048
        self.check_output_with_place(place, check_eager=True)
1049 1050 1051

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1052
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1053 1054


Z
zhoukunsheng 已提交
1055
class TestRsqrt(TestActivation):
1056

Z
zhoukunsheng 已提交
1057 1058
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1059
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1060 1061
        self.init_dtype()

1062
        np.random.seed(1024)
Z
zhupengyang 已提交
1063
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1064 1065 1066 1067 1068 1069 1070 1071
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1072 1073 1074 1075
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.0005,
                        check_eager=True)
Z
zhoukunsheng 已提交
1076 1077


C
chengduo 已提交
1078
class TestAbs(TestActivation):
1079

1080 1081
    def setUp(self):
        self.op_type = "abs"
1082 1083
        self.init_dtype()

1084
        np.random.seed(1024)
1085
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
1086
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1087
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1088
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1089 1090
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1091 1092 1093 1094
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1095 1096

    def test_check_grad(self):
1097 1098
        if self.dtype == np.float16:
            return
1099
        self.check_grad(['X'], 'Out', check_eager=False)
1100

1101

C
chengduo 已提交
1102
class TestCeil(TestActivation):
1103

D
dzhwinter 已提交
1104 1105
    def setUp(self):
        self.op_type = "ceil"
1106 1107
        self.check_eager = True
        self.python_api = paddle.ceil
1108 1109
        self.init_dtype()

1110
        np.random.seed(1024)
Z
zhupengyang 已提交
1111
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1112 1113 1114 1115
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1116

D
dzhwinter 已提交
1117
    # The same reason with TestFloor
C
chengduo 已提交
1118
    def test_check_grad(self):
1119 1120 1121
        pass


C
chengduo 已提交
1122
class TestFloor(TestActivation):
1123

D
dzhwinter 已提交
1124 1125
    def setUp(self):
        self.op_type = "floor"
1126 1127
        self.check_eager = True
        self.python_api = paddle.floor
1128 1129
        self.init_dtype()

1130
        np.random.seed(1024)
Z
zhupengyang 已提交
1131
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1132 1133 1134 1135
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1136

D
dzhwinter 已提交
1137
    # the gradient on floor, ceil, round is undefined.
1138
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1139 1140
    # The same reason with TestFloor
    def test_check_grad(self):
1141 1142 1143
        pass


C
chengduo 已提交
1144
class TestCos(TestActivation):
1145

C
add cos  
chengduoZH 已提交
1146 1147
    def setUp(self):
        self.op_type = "cos"
1148 1149
        self.init_dtype()

1150
        np.random.seed(1024)
Z
zhupengyang 已提交
1151
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1152 1153 1154 1155
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1156 1157

    def test_check_grad(self):
1158 1159
        if self.dtype == np.float16:
            return
1160
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1161

1162

J
joejiong 已提交
1163
class TestTan(TestActivation):
1164

J
joejiong 已提交
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1189
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', [10, 12], self.dtype)
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1200
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1215
class TestAcos(TestActivation):
1216

1217 1218 1219 1220
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

1221
        np.random.seed(1024)
Z
zhupengyang 已提交
1222
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
1223 1224 1225 1226 1227 1228 1229 1230
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1231
        self.check_grad(['X'], 'Out')
1232 1233


1234
class TestSin(TestActivation, TestParameter):
1235

C
add sin  
chengduoZH 已提交
1236 1237
    def setUp(self):
        self.op_type = "sin"
1238 1239
        self.init_dtype()

1240
        np.random.seed(1024)
Z
zhupengyang 已提交
1241
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1242 1243 1244 1245
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1246 1247

    def test_check_grad(self):
1248 1249
        if self.dtype == np.float16:
            return
1250
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1251 1252


1253
class TestAsin(TestActivation):
1254

1255 1256 1257 1258
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

1259
        np.random.seed(2048)
Z
zhupengyang 已提交
1260
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
1261 1262 1263 1264 1265 1266 1267 1268
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1269
        self.check_grad(['X'], 'Out')
1270 1271


X
xiaoting 已提交
1272
class TestAcosh(TestActivation):
1273

X
xiaoting 已提交
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(2, 3, [10, 12]).astype(self.dtype)
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestAsinh(TestActivation):
1292

X
xiaoting 已提交
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(1, 2, [10, 12]).astype(self.dtype)
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestAtanh(TestActivation):
1311

X
xiaoting 已提交
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()

        np.random.seed(400)
        x = np.random.uniform(-0.9, 0.9, [10, 12]).astype(self.dtype)
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


C
chengduo 已提交
1329
class TestRound(TestActivation):
1330

D
dzhwinter 已提交
1331 1332
    def setUp(self):
        self.op_type = "round"
1333 1334
        self.check_eager = True
        self.python_api = paddle.round
1335 1336
        self.init_dtype()

1337
        np.random.seed(1024)
Z
zhupengyang 已提交
1338
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
1339 1340 1341 1342
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1343

C
chengduo 已提交
1344
    def test_check_grad(self):
1345 1346 1347
        pass


C
chengduo 已提交
1348
class TestRelu(TestActivation):
1349

1350
    def setUp(self):
Q
qijun 已提交
1351
        self.op_type = "relu"
K
Kexin Zhao 已提交
1352 1353
        self.init_dtype()

1354
        np.random.seed(1024)
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
        if self.dtype == np.uint16:
            x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
            x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1367 1368

        self.outputs = {'Out': out}
1369 1370

    def test_check_grad(self):
K
Kexin Zhao 已提交
1371 1372
        if self.dtype == np.float16:
            return
1373
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1374 1375


1376 1377 1378
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1379
        np.random.seed(1024)
1380
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
1381
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1382
            else paddle.CPUPlace()
1383 1384 1385 1386
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1387 1388

    def test_static_api(self):
1389
        paddle.enable_static()
1390
        with paddle.static.program_guard(paddle.static.Program()):
1391
            x = paddle.fluid.data('X', [10, 12])
1392
            out1 = self.relu(x)
1393 1394 1395 1396 1397 1398
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1399
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1400 1401 1402 1403 1404

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1405 1406
        out1 = m(x)
        out2 = self.relu(x)
1407 1408
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1409
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1410 1411
        paddle.enable_static()

1412
    def test_errors(self):
1413
        paddle.enable_static()
1414
        with paddle.static.program_guard(paddle.static.Program()):
1415
            # The input type must be Variable.
1416
            self.assertRaises(TypeError, self.relu, 1)
1417
            # The input dtype must be float16, float32, float64.
1418 1419 1420
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
1421
            self.assertRaises(TypeError, self.relu, x_int32)
1422
            # support the input dtype is float16
1423 1424 1425
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
1426 1427 1428 1429 1430 1431 1432
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1433 1434


1435 1436 1437 1438 1439 1440
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1441
class TestLeakyRelu(TestActivation):
1442

1443 1444 1445
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1446 1447 1448
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1449
        alpha = self.get_alpha()
A
Adam 已提交
1450

1451
        np.random.seed(1024)
A
Adam 已提交
1452 1453
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
1454 1455
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1456

1457
        self.inputs = {'X': x}
A
Adam 已提交
1458
        self.outputs = {'Out': out}
1459
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1460 1461 1462 1463

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1464
        self.check_grad(['X'], 'Out')
1465 1466


1467
class TestLeakyReluAlpha1(TestLeakyRelu):
1468

1469 1470 1471 1472 1473
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
1474

1475 1476 1477 1478 1479
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
1480

1481 1482 1483 1484 1485 1486 1487 1488
    def get_alpha(self):
        return -2.0


class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    # fluid.layers.leaky_relu
    def setUp(self):
1489
        np.random.seed(1024)
1490
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
1491
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1492 1493 1494
            else paddle.CPUPlace()

    def test_static_api(self):
1495
        paddle.enable_static()
1496
        with paddle.static.program_guard(paddle.static.Program()):
1497
            x = paddle.fluid.data('X', [10, 12])
1498 1499 1500 1501 1502 1503 1504
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1505
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1506 1507 1508

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1509
        x = paddle.to_tensor(self.x_np)
1510 1511 1512 1513 1514
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1515
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1516 1517 1518 1519 1520 1521

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1522
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1523 1524 1525
        paddle.enable_static()

    def test_fluid_api(self):
1526
        paddle.enable_static()
1527 1528 1529 1530 1531 1532
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.leaky_relu(x, 0.01)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_leaky_relu(self.x_np)
1533
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1534

1535
    def test_errors(self):
1536
        paddle.enable_static()
1537
        with paddle.static.program_guard(paddle.static.Program()):
1538
            # The input type must be Variable.
1539
            self.assertRaises(TypeError, F.leaky_relu, 1)
1540
            # The input dtype must be float16, float32, float64.
1541 1542 1543
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1544 1545
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1546 1547 1548
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1549
            F.leaky_relu(x_fp16)
1550 1551


1552 1553
def gelu(x, approximate):
    if approximate:
1554 1555
        y_ref = 0.5 * x * (
            1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
1556 1557 1558 1559 1560 1561
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
1562

C
Clementine 已提交
1563 1564 1565
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1566
        approximate = True
1567
        np.random.seed(1024)
1568 1569
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
1570

1571
        self.inputs = {'X': x}
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
1582

1583 1584 1585 1586
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
1587
        np.random.seed(2048)
C
Clementine 已提交
1588
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1589
        out = gelu(x, approximate)
C
Clementine 已提交
1590

1591
        self.inputs = {'X': x}
C
Clementine 已提交
1592
        self.outputs = {'Out': out}
1593
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1594 1595 1596 1597

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1598
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1599 1600


1601 1602 1603
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1604
        np.random.seed(1024)
1605
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
J
joejiong 已提交
1606
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1607 1608 1609
            else paddle.CPUPlace()

    def test_static_api(self):
1610
        paddle.enable_static()
1611
        with paddle.static.program_guard(paddle.static.Program()):
1612
            x = paddle.fluid.data('X', [11, 17])
1613 1614 1615 1616 1617 1618 1619
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1620
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1621 1622 1623 1624 1625 1626 1627 1628 1629

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1630
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1631 1632 1633 1634 1635 1636

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1637
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1638 1639 1640
        paddle.enable_static()

    def test_errors(self):
1641
        paddle.enable_static()
1642 1643 1644 1645
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1646 1647 1648
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
1649 1650
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1651 1652 1653
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
1654 1655 1656
            F.gelu(x_fp16)


C
chengduo 已提交
1657
class TestBRelu(TestActivation):
1658

1659 1660
    def setUp(self):
        self.op_type = "brelu"
1661 1662
        self.init_dtype()

1663
        np.random.seed(1024)
Z
zhupengyang 已提交
1664
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1665 1666
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1667 1668
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1669
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1670 1671 1672
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1673 1674 1675

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1676
        self.outputs = {'Out': t}
1677 1678

    def test_check_grad(self):
1679 1680
        if self.dtype == np.float16:
            return
1681
        self.check_grad(['X'], 'Out')
1682

1683

1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
class TestBreluAPI(unittest.TestCase):
    # test paddle.fluid.layers.brelu
    def setUp(self):
        np.random.seed(1024)
        self.t_min = 0.
        self.t_max = 24.
        self.x_np = np.random.uniform(-1, 30, [10, 12]).astype('float32')
        self.out_ref = np.copy(self.x_np)
        self.out_ref[self.out_ref < self.t_min] = self.t_min
        self.out_ref[self.out_ref > self.t_max] = self.t_max
        self.out_ref = self.out_ref.astype('float32')
J
joejiong 已提交
1695
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1696 1697 1698 1699 1700 1701 1702 1703
            else paddle.CPUPlace()

    def test_fluid_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', [10, 12])
            out = paddle.fluid.layers.brelu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
1704
            np.testing.assert_allclose(self.out_ref, res[0], rtol=1e-05)
1705 1706 1707 1708

            paddle.disable_static(self.place)
            x = paddle.to_tensor(self.x_np)
            out = paddle.fluid.layers.brelu(x)
1709
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
1710 1711
            paddle.enable_static()

1712 1713 1714 1715 1716 1717 1718 1719
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
1720 1721 1722
            x_fp16 = fluid.layers.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1723 1724 1725
            fluid.layers.brelu(x_fp16)


1726 1727 1728 1729 1730 1731 1732
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
1733
class TestRelu6(TestActivation):
1734

K
Kavya Srinet 已提交
1735
    def setUp(self):
1736
        self.op_type = "relu6"
1737
        self.init_dtype()
1738
        self.python_api = paddle.nn.functional.relu6
1739

1740
        np.random.seed(1024)
Z
zhupengyang 已提交
1741
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
1742
        x[np.abs(x) < 0.005] = 0.02
1743
        out = ref_relu6(x)
1744

1745 1746
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
1747
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
1748

1749 1750 1751
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1752
        self.check_grad(['X'], 'Out', check_eager=True)
1753 1754


1755 1756 1757
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
1758
        np.random.seed(1024)
1759 1760
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
J
joejiong 已提交
1761
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1762 1763 1764
            else paddle.CPUPlace()

    def test_static_api(self):
1765
        paddle.enable_static()
1766
        with paddle.static.program_guard(paddle.static.Program()):
1767
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1768 1769 1770 1771 1772 1773 1774
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
1775
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1776 1777 1778 1779 1780 1781 1782 1783 1784

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
1785
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1786 1787 1788
        paddle.enable_static()

    def test_fluid_api(self):
1789
        paddle.enable_static()
1790 1791 1792 1793 1794 1795
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.relu6(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
1796
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1797

1798
    def test_errors(self):
1799
        paddle.enable_static()
1800
        with paddle.static.program_guard(paddle.static.Program()):
1801
            # The input type must be Variable.
1802
            self.assertRaises(TypeError, F.relu6, 1)
1803
            # The input dtype must be float16, float32, float64.
1804 1805 1806
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1807
            self.assertRaises(TypeError, F.relu6, x_int32)
1808
            # support the input dtype is float16
1809 1810 1811
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1812
            F.relu6(x_fp16)
1813 1814


1815
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
1816 1817 1818 1819
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
1820
    return (x * np.minimum(np.maximum(x + offset, 0.), threshold) /
Z
Zhang Ting 已提交
1821
            scale).astype(x_dtype)
1822 1823


H
huangjun12 已提交
1824
class TestHardSwish(TestActivation):
1825

H
huangjun12 已提交
1826 1827 1828
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
1829
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
1830

1831
        np.random.seed(1024)
Z
zhupengyang 已提交
1832
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
1833 1834 1835 1836 1837 1838
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
1839
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
1840

1841
        self.inputs = {'X': x}
H
huangjun12 已提交
1842 1843 1844 1845
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
1846 1847 1848 1849
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
1850 1851


1852 1853 1854 1855
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
1856
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1857 1858 1859 1860
            else paddle.CPUPlace()

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
1861
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1862 1863 1864 1865 1866 1867 1868
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
1869
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1870 1871 1872

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhang Ting 已提交
1873
        x = paddle.to_tensor([11648., 11448.])
1874 1875 1876
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
Z
Zhang Ting 已提交
1877
        out_ref = [11648., 11448.]
1878
        for r in [out1, out2]:
1879
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1880
        paddle.enable_static()
1881 1882 1883 1884 1885 1886 1887 1888

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
1889
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1890 1891 1892 1893

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_swish(x)
1894
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
1895 1896 1897 1898
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
1899
            # The input type must be Variable.
1900
            self.assertRaises(TypeError, F.hardswish, 1)
1901
            # The input dtype must be float16, float32, float64.
1902 1903 1904
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1905
            self.assertRaises(TypeError, F.hardswish, x_int32)
1906
            # support the input dtype is float16
1907 1908 1909
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1910
            F.hardswish(x_fp16)
1911

1912 1913 1914 1915 1916
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

1917

C
chengduo 已提交
1918
class TestSoftRelu(TestActivation):
1919

1920 1921
    def setUp(self):
        self.op_type = "soft_relu"
1922 1923
        self.init_dtype()

1924
        np.random.seed(4096)
1925
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1926
        threshold = 2.0
Q
qijun 已提交
1927 1928
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
1929
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
1930 1931 1932
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
1933 1934 1935 1936 1937
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
1938 1939

    def test_check_grad(self):
1940 1941
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1942
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
1943

1944

1945
class TestSoftReluOpError(unittest.TestCase):
1946

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.soft_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.soft_relu(x_fp16)


1959
def elu(x, alpha):
Z
zhupengyang 已提交
1960
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
1961 1962 1963
    return out_ref.astype(x.dtype)


C
chengduo 已提交
1964
class TestELU(TestActivation):
1965

1966 1967
    def setUp(self):
        self.op_type = "elu"
1968 1969
        self.init_dtype()

1970
        np.random.seed(1024)
Z
zhupengyang 已提交
1971
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
Z
zhupengyang 已提交
1972
        alpha = self.get_alpha()
1973
        out = elu(x, alpha)
1974 1975 1976 1977
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
1978
        self.outputs = {'Out': out}
1979 1980

    def test_check_grad(self):
1981 1982
        if self.dtype == np.float16:
            return
1983
        self.check_grad(['X'], 'Out')
1984

Z
zhupengyang 已提交
1985 1986 1987 1988 1989
    def get_alpha(self):
        return 1.


class TestELUAlpha(TestELU):
1990

Z
zhupengyang 已提交
1991 1992 1993
    def get_alpha(self):
        return -0.2

1994

1995 1996 1997
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
1998
        np.random.seed(1024)
1999
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
J
joejiong 已提交
2000
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2001
            else paddle.CPUPlace()
2002 2003 2004 2005
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2006 2007

    def test_static_api(self):
2008
        paddle.enable_static()
2009
        with paddle.static.program_guard(paddle.static.Program()):
2010
            x = paddle.fluid.data('X', [10, 12])
2011
            out1 = self.elu(x)
2012 2013 2014 2015 2016 2017
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2018
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2019 2020 2021 2022

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2023 2024
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2025 2026 2027 2028
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2029
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2030

2031 2032
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2033 2034 2035 2036
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2037
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2038 2039
        paddle.enable_static()

2040
    def test_errors(self):
2041
        paddle.enable_static()
2042 2043
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2044
            self.assertRaises(TypeError, self.elu, 1)
2045
            # The input dtype must be float16, float32, float64.
2046 2047 2048
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
2049
            self.assertRaises(TypeError, self.elu, x_int32)
2050
            # support the input dtype is float16
2051 2052 2053
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
2054 2055 2056
            self.elu(x_fp16)


Z
zhupengyang 已提交
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2069 2070 2071 2072 2073 2074
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
2075

2076 2077 2078 2079
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()

2080
        self.python_api = paddle.nn.functional.celu
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
        np.random.seed(1024)
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2092
        self.check_grad(['X'], 'Out', check_eager=True)
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117


class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2118
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2129
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2130 2131 2132 2133 2134 2135 2136

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2137
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2138 2139 2140 2141 2142 2143 2144 2145
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2146 2147 2148
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
2149 2150
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2151 2152 2153
            x_fp32 = paddle.fluid.data(name='x_fp32',
                                       shape=[10, 12],
                                       dtype='float32')
2154 2155
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2156 2157 2158
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
2159 2160
            self.celu(x_fp16)

2161 2162 2163 2164 2165
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2166

C
chengduo 已提交
2167
class TestReciprocal(TestActivation):
2168

Q
qijun 已提交
2169 2170
    def setUp(self):
        self.op_type = "reciprocal"
2171
        self.python_api = paddle.reciprocal
2172 2173
        self.init_dtype()

2174
        np.random.seed(1024)
2175 2176 2177 2178 2179
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2180 2181

    def test_check_grad(self):
2182 2183
        if self.dtype == np.float16:
            return
2184 2185 2186 2187
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2188 2189


C
chengduo 已提交
2190
class TestLog(TestActivation):
2191

Q
qijun 已提交
2192 2193
    def setUp(self):
        self.op_type = "log"
2194 2195
        self.check_eager = True
        self.python_api = paddle.log
2196 2197
        self.init_dtype()

2198
        np.random.seed(1024)
2199 2200 2201 2202 2203
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2204 2205

    def test_check_grad(self):
2206 2207
        if self.dtype == np.float16:
            return
2208
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2209

2210
    def test_error(self):
2211 2212 2213 2214 2215 2216 2217 2218
        in1 = fluid.layers.data(name="in1",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int32")
        in2 = fluid.layers.data(name="in2",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int64")
2219 2220 2221 2222

        self.assertRaises(TypeError, fluid.layers.log, in1)
        self.assertRaises(TypeError, fluid.layers.log, in2)

2223

J
joejiong 已提交
2224
class TestLog2(TestActivation):
2225

J
joejiong 已提交
2226 2227
    def setUp(self):
        self.op_type = "log2"
2228 2229
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2241
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2254 2255 2256
            data_x = paddle.static.data(name="data_x",
                                        shape=[11, 17],
                                        dtype="float64")
J
joejiong 已提交
2257 2258 2259 2260

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2261 2262 2263
            res1, = exe.run(paddle.static.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
J
joejiong 已提交
2264
        expected_res = np.log2(input_x)
2265
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2266 2267 2268 2269 2270 2271 2272 2273

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2274
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2275 2276


J
joejiong 已提交
2277
class TestLog10(TestActivation):
2278

J
joejiong 已提交
2279 2280
    def setUp(self):
        self.op_type = "log10"
2281 2282
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2294
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2307 2308 2309
            data_x = paddle.static.data(name="data_x",
                                        shape=[11, 17],
                                        dtype="float64")
J
joejiong 已提交
2310 2311 2312 2313

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2314 2315 2316
            res1, = exe.run(paddle.static.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
J
joejiong 已提交
2317
        expected_res = np.log10(input_x)
2318
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2319 2320 2321 2322 2323 2324 2325 2326

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2327
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2328 2329


2330
class TestLog1p(TestActivation):
2331

2332 2333
    def setUp(self):
        self.op_type = "log1p"
2334 2335
        self.check_eager = True
        self.python_api = paddle.log1p
2336 2337
        self.init_dtype()

2338
        np.random.seed(1024)
2339 2340 2341 2342 2343 2344 2345 2346 2347
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2348
        self.check_grad(['X'], 'Out', check_eager=True)
2349 2350 2351 2352

    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2353 2354 2355 2356
            data_x = fluid.layers.data(name="data_x",
                                       shape=[11, 17],
                                       append_batch_size=False,
                                       dtype="float64")
2357 2358 2359 2360

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2361 2362 2363
            res1, = exe.run(fluid.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
2364
        expected_res = np.log1p(input_x)
2365
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2366 2367 2368 2369 2370 2371 2372 2373

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2374
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2375 2376


C
chengduo 已提交
2377
class TestSquare(TestActivation):
2378

Q
qijun 已提交
2379 2380
    def setUp(self):
        self.op_type = "square"
2381
        self.python_api = paddle.square
2382 2383
        self.init_dtype()

2384
        np.random.seed(1024)
2385 2386 2387 2388 2389
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2390 2391

    def test_check_grad(self):
2392 2393
        if self.dtype == np.float16:
            return
2394 2395 2396 2397
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.007,
                        check_eager=True)
2398 2399 2400

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2401

2402

2403 2404 2405
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSquareBF16(OpTest):
2406

2407 2408
    def setUp(self):
        self.op_type = "square"
2409
        self.python_api = paddle.square
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2426
        self.check_output_with_place(place, check_eager=True)
2427 2428 2429

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2430 2431 2432 2433
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   numeric_grad_delta=0.5,
                                   check_eager=True)
2434 2435


C
chengduo 已提交
2436
class TestPow(TestActivation):
2437

2438 2439
    def setUp(self):
        self.op_type = "pow"
2440
        self.python_api = paddle.pow
2441
        self.check_eager = True
2442 2443
        self.init_dtype()

2444
        np.random.seed(1024)
2445 2446 2447 2448
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2449
        self.attrs = {'factor': 3.0}
2450
        self.outputs = {'Out': out}
2451

2452 2453 2454
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2455
    def test_check_grad(self):
2456 2457
        if self.dtype == np.float16:
            return
2458
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2459

2460

2461
class TestPow_factor_tensor(TestActivation):
2462

2463 2464
    def setUp(self):
        self.op_type = "pow"
2465 2466
        self.check_eager = False
        self.python_api = paddle.pow
2467 2468
        self.init_dtype()

2469
        np.random.seed(1024)
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2482
        self.check_output(check_eager=self.check_eager)
2483 2484 2485 2486

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2487
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2488 2489 2490

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
2491 2492 2493 2494 2495 2496 2497 2498
        x = fluid.layers.data(name="x",
                              shape=[11, 17],
                              append_batch_size=False,
                              dtype="float32")
        res = fluid.layers.data(name="res",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float32")
2499 2500 2501 2502 2503

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
2504 2505 2506
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2507 2508

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2509
        res_1, res_2, res, res_6 = exe.run(
2510 2511
            fluid.default_main_program(),
            feed={"x": input},
W
WuHaobo 已提交
2512
            fetch_list=[out_1, out_2, res, out_6])
2513

2514 2515 2516
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2517

2518
    def test_error(self):
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
        in1 = fluid.layers.data(name="in1",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int32")
        in2 = fluid.layers.data(name="in2",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int64")
        in3 = fluid.layers.data(name="in3",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float32")
        in4 = fluid.layers.data(name="in4",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float64")
2535 2536 2537 2538 2539 2540 2541 2542

        factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)

        self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)

2543

2544 2545 2546 2547 2548
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2549
class TestSTanh(TestActivation):
2550

2551 2552 2553 2554 2555 2556
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2557 2558
    def setUp(self):
        self.op_type = "stanh"
2559
        self.init_dtype()
2560 2561
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2562

2563
        np.random.seed(1024)
2564
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
2565 2566
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2567

2568
        self.inputs = {'X': x}
2569
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2570
        self.outputs = {'Out': out}
2571

Q
qijun 已提交
2572
    def test_check_grad(self):
2573 2574
        if self.dtype == np.float16:
            return
2575
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2576

2577

2578
class TestSTanhScaleA(TestSTanh):
2579

2580 2581 2582 2583 2584
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
2585

2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
    def get_scale_b(self):
        return 0.5


class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
2615
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2616 2617 2618 2619 2620 2621 2622

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
2623
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.stanh(x, self.scale_a, self.scale_b)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
2634
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2635

2636
    def test_errors(self):
2637 2638
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2639
            # The input type must be Variable.
2640
            self.assertRaises(TypeError, paddle.stanh, 1)
2641
            # The input dtype must be float16, float32, float64.
2642 2643 2644
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2645
            self.assertRaises(TypeError, paddle.stanh, x_int32)
2646
            # support the input dtype is float16
2647 2648 2649
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2650 2651 2652 2653
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
2654

2655 2656 2657 2658 2659
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
2660

2661 2662
    def get_scale_b(self):
        return 0.5
2663 2664


2665 2666 2667 2668 2669 2670 2671
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
    out = np.select([x_beta <= threshold, x_beta > threshold],
                    [np.log(1 + np.exp(x_beta)) / beta, x])
    return out


C
chengduo 已提交
2672
class TestSoftplus(TestActivation):
2673

K
kexinzhao 已提交
2674 2675
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
2676
        self.python_api = paddle.nn.functional.softplus
2677 2678
        self.init_dtype()

2679 2680
        beta = 2
        threshold = 15
2681

2682
        np.random.seed(1024)
2683 2684 2685 2686
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
2687
        self.outputs = {'Out': out}
K
kexinzhao 已提交
2688

W
Wang Bojun 已提交
2689 2690
        self.check_eager = True

K
kexinzhao 已提交
2691
    def test_check_grad(self):
2692 2693
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
2694 2695 2696
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
2697

2698

2699 2700 2701
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftplusBF16(OpTest):
2702

2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


2729 2730 2731 2732 2733
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
2734
        np.random.seed(1024)
2735
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
2736
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2737 2738 2739
            else paddle.CPUPlace()

    def test_static_api(self):
2740
        paddle.enable_static()
2741
        with paddle.static.program_guard(paddle.static.Program()):
2742
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2743 2744 2745 2746 2747 2748 2749
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
2750
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2751 2752 2753 2754 2755 2756 2757 2758 2759

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
2760
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2761 2762 2763
        paddle.enable_static()

    def test_fluid_api(self):
2764
        paddle.enable_static()
2765 2766 2767 2768 2769 2770
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softplus(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softplus(self.x_np)
2771
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2772 2773

    def test_errors(self):
2774
        paddle.enable_static()
2775 2776 2777 2778
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
2779 2780 2781
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2782 2783
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
2784 2785 2786
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2787 2788 2789 2790 2791 2792 2793 2794
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
2795
class TestSoftsign(TestActivation):
2796

2797 2798
    def setUp(self):
        self.op_type = "softsign"
2799
        self.init_dtype()
2800
        self.python_api = paddle.nn.functional.softsign
2801

2802
        np.random.seed(1024)
2803 2804 2805
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_softsign(x)
        self.inputs = {'X': x}
2806
        self.outputs = {'Out': out}
2807 2808

    def test_check_grad(self):
2809 2810
        if self.dtype == np.float16:
            return
2811
        self.check_grad(['X'], 'Out', check_eager=True)
2812 2813


2814 2815 2816
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
2817
        np.random.seed(1024)
2818
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
2819
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2820 2821 2822
            else paddle.CPUPlace()

    def test_static_api(self):
2823
        paddle.enable_static()
2824
        with paddle.static.program_guard(paddle.static.Program()):
2825
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2826 2827 2828 2829 2830 2831 2832
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
2833
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2834 2835 2836 2837 2838 2839 2840 2841 2842

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
2843
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2844 2845 2846
        paddle.enable_static()

    def test_fluid_api(self):
2847
        paddle.enable_static()
2848 2849 2850 2851 2852 2853
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softsign(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softsign(self.x_np)
2854
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2855 2856

    def test_errors(self):
2857
        paddle.enable_static()
2858 2859 2860 2861
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
2862 2863 2864
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2865 2866
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
2867 2868 2869
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2870 2871 2872
            F.softsign(x_fp16)


2873 2874 2875 2876 2877
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
2878
class TestThresholdedRelu(TestActivation):
2879

2880 2881
    def setUp(self):
        self.op_type = "thresholded_relu"
2882 2883
        self.init_dtype()

2884
        threshold = 15
2885

2886 2887 2888 2889 2890 2891
        np.random.seed(1024)
        x = np.random.uniform(-20, 20, [10, 12]).astype(self.dtype)
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
2892
        self.outputs = {'Out': out}
2893 2894

    def test_check_grad(self):
2895 2896
        if self.dtype == np.float16:
            return
2897
        self.check_grad(['X'], 'Out')
2898 2899


2900 2901 2902 2903 2904 2905 2906
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
J
joejiong 已提交
2907
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2908 2909 2910 2911 2912
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2913
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2914 2915 2916 2917 2918 2919 2920
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
2921
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2922 2923 2924 2925 2926 2927 2928 2929 2930

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
2931
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.thresholded_relu(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
2942
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2943

2944
    def test_errors(self):
2945 2946
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2947
            # The input type must be Variable.
2948
            self.assertRaises(TypeError, F.thresholded_relu, 1)
2949
            # The input dtype must be float16, float32, float64.
2950 2951 2952
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2953
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
2954
            # support the input dtype is float16
2955 2956 2957
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2958
            F.thresholded_relu(x_fp16)
2959 2960


2961 2962 2963 2964
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
    return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype)


C
chengduo 已提交
2965
class TestHardSigmoid(TestActivation):
2966

2967 2968
    def setUp(self):
        self.op_type = "hard_sigmoid"
2969 2970 2971 2972
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
2973

2974 2975 2976
        x = np.random.uniform(-5, 5, [10, 12]).astype(self.dtype)
        lower_threshold = -self.offset / self.slope
        upper_threshold = (1. - self.offset) / self.slope
Z
zhupengyang 已提交
2977

2978
        # Same reason as TestAbs
2979 2980 2981
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
2982

2983
        out = ref_hardsigmoid(x, self.slope, self.offset)
2984

2985 2986
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
2987
        self.outputs = {'Out': out}
2988

2989 2990
    def set_attrs(self):
        pass
2991

2992

2993
class TestHardSigmoidFP32(TestHardSigmoid):
2994

2995 2996 2997 2998 2999
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
3000

3001 3002 3003 3004 3005 3006 3007 3008 3009
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3010
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3011 3012 3013 3014
            else paddle.CPUPlace()

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3015
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3016 3017 3018 3019 3020 3021 3022
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3023
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3024 3025 3026 3027 3028 3029 3030 3031 3032

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3033
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3034
        paddle.enable_static()
3035 3036 3037 3038 3039 3040 3041 3042

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_sigmoid(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3043
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3044 3045 3046 3047

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_sigmoid(x)
3048
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3049 3050 3051 3052
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3053
            # The input type must be Variable.
3054
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3055
            # The input dtype must be float16, float32, float64.
3056 3057 3058
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3059
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3060
            # support the input dtype is float16
3061 3062 3063
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3064
            F.hardsigmoid(x_fp16)
3065 3066


3067 3068 3069 3070 3071
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3072
class TestSwish(TestActivation):
3073

A
Abhinav Arora 已提交
3074 3075
    def setUp(self):
        self.op_type = "swish"
3076
        self.python_api = paddle.nn.functional.swish
3077
        self.init_dtype()
3078
        self.check_eager = True
3079

3080
        np.random.seed(1024)
3081 3082 3083
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3084
        self.attrs = {'beta': 1.0}
3085
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3086 3087

    def test_check_grad(self):
3088 3089
        if self.dtype == np.float16:
            return
3090 3091 3092 3093
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3094

A
Abhinav Arora 已提交
3095

3096 3097 3098 3099 3100
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3101
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3102 3103 3104 3105 3106
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3107
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3108 3109 3110 3111 3112 3113 3114
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3115
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3116

3117
    def func_test_dygraph_api(self):
3118 3119 3120 3121 3122 3123 3124
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3125
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3126 3127
        paddle.enable_static()

3128
    def test_dygraph_api(self):
3129
        with _test_eager_guard():
3130 3131
            self.func_test_dygraph_api()
        self.func_test_dygraph_api()
3132

3133 3134 3135 3136 3137 3138 3139 3140
    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3141
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3142

3143
    def test_errors(self):
3144 3145
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3146
            # The input type must be Variable.
3147
            self.assertRaises(TypeError, F.swish, 1)
3148
            # The input dtype must be float16, float32, float64.
3149 3150 3151
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3152
            self.assertRaises(TypeError, F.swish, x_int32)
3153
            # support the input dtype is float16
3154 3155 3156
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3157
            F.swish(x_fp16)
3158 3159


3160 3161 3162 3163 3164 3165 3166
def ref_mish(x, threshold=20.):
    softplus = np.select([x <= threshold, x > threshold],
                         [np.log(1 + np.exp(x)), x])
    return x * np.tanh(softplus)


class TestMish(TestActivation):
3167

3168 3169
    def setUp(self):
        self.op_type = "mish"
3170
        self.python_api = paddle.fluid.layers.nn.mish
3171 3172 3173 3174 3175 3176 3177 3178
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3179 3180 3181
    def test_check_output(self):
        self.check_output(check_eager=True)

3182 3183 3184
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3185
        self.check_grad(['X'], 'Out', check_eager=True)
3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206


class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3207
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3208 3209 3210 3211 3212 3213 3214 3215 3216

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3217
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3218 3219 3220 3221 3222 3223 3224 3225 3226 3227
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.mish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3228
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3229 3230 3231 3232 3233 3234 3235

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3236 3237 3238
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3239 3240
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3241 3242 3243
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3244 3245 3246
            F.mish(x_fp16)


3247 3248
#------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
3249

3250
    class TestOpErrors(unittest.TestCase):
3251

3252 3253 3254 3255
        def test_errors(self):
            with program_guard(Program(), Program()):
                op = getattr(fluid.layers, op_type)
                # The input dtype of op_type must be float32, float64.
3256 3257 3258 3259 3260 3261
                in1 = fluid.layers.data(name='input2',
                                        shape=[12, 10],
                                        dtype="int32")
                in2 = fluid.layers.data(name='input3',
                                        shape=[12, 10],
                                        dtype="int64")
3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281
                self.assertRaises(TypeError, op, in1)
                self.assertRaises(TypeError, op, in2)

    cls_name = "{0}_{1}".format(op_type, "test_errors")
    TestOpErrors.__name__ = cls_name
    globals()[cls_name] = TestOpErrors


create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')
J
joejiong 已提交
3282
create_test_error_class('tan')
X
xiaoting 已提交
3283 3284 3285
create_test_error_class('acosh')
create_test_error_class('asinh')
create_test_error_class('atanh')
3286 3287


3288 3289
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3290

3291 3292 3293
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
3294

3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
3309 3310 3311 3312 3313
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
3314

J
joejiong 已提交
3315
    @unittest.skipIf(not paddle.is_compiled_with_cuda(),
C
chengduo 已提交
3316 3317
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
3318

C
chengduo 已提交
3319 3320
        def init_dtype(self):
            self.dtype = np.float16
3321

C
chengduo 已提交
3322
        def test_check_output(self):
3323
            place = core.CUDAPlace(0)
C
chengduo 已提交
3324 3325 3326
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3327

C
chengduo 已提交
3328 3329 3330 3331
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3332 3333 3334
                self.check_grad_with_place(place, ['X'],
                                           'Out',
                                           max_relative_error=grad_atol)
C
chengduo 已提交
3335 3336 3337 3338 3339 3340 3341

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3342
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3343
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3344
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3345 3346
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3347
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3348
create_test_act_fp16_class(TestHardShrink)
3349
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3350 3351 3352 3353 3354
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3355
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3356
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3357
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3358
create_test_act_fp16_class(TestSin)
3359
create_test_act_fp16_class(TestSinh)
3360 3361
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3362 3363 3364
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3365 3366
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3367
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3368 3369
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3370
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3371
create_test_act_fp16_class(TestELU)
3372
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3373 3374
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3375 3376 3377 3378
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3379
create_test_act_fp16_class(TestLog10, atol=5e-2)
3380
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3381 3382
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3383
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3384 3385 3386 3387 3388
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3389
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3390
create_test_act_fp16_class(TestHardSwish)
3391
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3392

3393 3394 3395 3396 3397

def create_test_act_bf16_class(parent,
                               atol=1e-2,
                               grad_check=True,
                               grad_atol=0.80):
3398

3399 3400 3401
    @unittest.skipIf(not paddle.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActBF16(parent):
3402

3403 3404 3405 3406 3407 3408 3409 3410 3411
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3412 3413 3414
            self.check_grad_with_place(place, ['X'],
                                       'Out',
                                       max_relative_error=grad_atol)
3415 3416 3417 3418 3419 3420 3421 3422

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)

Q
qijun 已提交
3423 3424
if __name__ == "__main__":
    unittest.main()