test_activation_op.py 111.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
18
from op_test import OpTest, convert_float_to_uint16
19 20
from scipy.special import erf, expit

21
import paddle
J
joejiong 已提交
22 23
import paddle.fluid as fluid
import paddle.fluid.core as core
24
import paddle.nn.functional as F
25
from paddle.fluid import Program, program_guard
26
from paddle.fluid.framework import _test_eager_guard
Q
qijun 已提交
27

28 29
paddle.enable_static()

Q
qijun 已提交
30

31
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
32 33 34 35
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
36
            self.assertRaises(TypeError, paddle.sqrt, in1)
Z
Zhaolong Xing 已提交
37
            # The input dtype of sqrt op must be float16, float32, float64.
38 39 40
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
41
            self.assertRaises(TypeError, paddle.sqrt, in2)
Z
Zhaolong Xing 已提交
42

43 44 45
            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16"
            )
46
            paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
47 48


C
chengduo 已提交
49
class TestActivation(OpTest):
Q
qijun 已提交
50 51
    def setUp(self):
        self.op_type = "exp"
52
        self.init_dtype()
53
        self.init_shape()
54
        self.init_kernel_type()
C
chentianyu03 已提交
55 56
        self.check_eager = True
        self.python_api = paddle.exp
57

58
        np.random.seed(2049)
59
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
60 61 62 63
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
64 65

    def test_check_output(self):
66 67 68 69
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
74 75 76 77
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

Q
qijun 已提交
88

89 90 91 92 93
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
94 95 96
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
97
        self.python_api = paddle.expm1
R
ronnywang 已提交
98
        self.init_dtype()
99
        self.init_shape()
R
ronnywang 已提交
100 101

        np.random.seed(2049)
102
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
103 104 105 106 107 108
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
109 110 111 112
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
113 114


115 116 117 118 119
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
144
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
145 146 147 148 149 150 151 152 153

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
154
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


168
class TestParameter:
169 170
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
171
            np_x = np.array([0.1])
172
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
173
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
174 175
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
176
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
177
            expected = eval("np.%s(np_x)" % self.op_type)
178
            np.testing.assert_allclose(result, expected, rtol=1e-05)
179 180 181 182 183 184 185

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
186
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
187 188


C
chengduo 已提交
189
class TestSigmoid(TestActivation):
Q
qijun 已提交
190 191
    def setUp(self):
        self.op_type = "sigmoid"
192
        self.init_dtype()
193
        self.init_shape()
194

195
        np.random.seed(1024)
196
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
197 198 199 200
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
201

202 203 204
    def init_dtype(self):
        self.dtype = np.float32

205
    def test_check_grad(self):
206 207 208 209
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

210

211 212 213 214 215
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


216 217 218
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
219 220 221 222
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()
223
        self.init_shape()
224 225

        np.random.seed(1024)
226
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
227 228 229 230 231 232 233 234 235 236
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

237 238 239
    def init_shape(self):
        self.shape = [11, 17]

240 241 242 243 244 245 246 247 248
    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


249 250 251 252 253 254 255 256
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
257 258 259 260
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()
261
        self.init_shape()
M
minghaoBD 已提交
262 263

        np.random.seed(1024)
264
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


279 280 281 282 283
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []


M
minghaoBD 已提交
284 285 286 287
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
288 289 290
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
291
            else paddle.CPUPlace()
292
        )
M
minghaoBD 已提交
293 294 295 296 297 298 299 300 301 302 303 304

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
305
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
306 307 308 309 310 311 312 313 314

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
315
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
316 317 318 319 320 321 322
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
323 324 325
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
M
minghaoBD 已提交
326 327
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
328 329 330
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
M
minghaoBD 已提交
331 332 333
            F.silu(x_fp16)


C
chengduo 已提交
334
class TestLogSigmoid(TestActivation):
335 336
    def setUp(self):
        self.op_type = "logsigmoid"
337
        self.init_dtype()
338
        self.init_shape()
339

340
        np.random.seed(2048)
341
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
342 343
        out = np.log(1 / (1 + np.exp(-x)))

344
        self.inputs = {'X': x}
345
        self.outputs = {'Out': out}
346 347

    def test_check_grad(self):
348 349
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
350
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
351 352


353 354 355 356 357
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


358
class TestLogSigmoidAPI(unittest.TestCase):
359
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
360
    def setUp(self):
361
        np.random.seed(1024)
362
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
363 364 365
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
366
            else paddle.CPUPlace()
367
        )
368 369

    def test_static_api(self):
370
        paddle.enable_static()
371
        with paddle.static.program_guard(paddle.static.Program()):
372
            x = paddle.fluid.data('X', [11, 17])
373
            out1 = F.log_sigmoid(x)
374 375 376 377 378 379
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
380
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
381 382 383 384

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
385
        out1 = F.log_sigmoid(x)
386 387 388 389
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
390
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
391 392 393
        paddle.enable_static()

    def test_errors(self):
394
        paddle.enable_static()
395 396
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
397
            self.assertRaises(TypeError, F.log_sigmoid, 1)
398
            # The input dtype must be float16, float32, float64.
399 400 401
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
402
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
403
            # support the input dtype is float16
404 405 406
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
407
            F.log_sigmoid(x_fp16)
408 409


410
class TestTanh(TestActivation, TestParameter):
411 412
    def setUp(self):
        self.op_type = "tanh"
413
        self.init_dtype()
414 415
        self.init_shape()

416
        np.random.seed(1024)
417
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
418 419 420 421
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
422 423

    def test_check_grad(self):
424 425
        if self.dtype == np.float16:
            return
426
        self.check_grad(['X'], 'Out')
427

428
    def init_dtype(self):
429
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
430 431 432 433
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

434

435 436 437 438 439
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
440 441 442 443
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
444
        np.random.seed(1024)
W
WangXi 已提交
445
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
446 447 448
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
449
            else paddle.CPUPlace()
450
        )
451 452 453 454
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
455 456

    def test_static_api(self):
457
        paddle.enable_static()
W
WangXi 已提交
458
        with paddle.static.program_guard(paddle.static.Program()):
459
            x = paddle.fluid.data('X', [10, 12], self.dtype)
460
            out1 = self.tanh(x)
W
WangXi 已提交
461 462 463 464 465 466
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
467
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
468 469 470

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
471
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
472 473 474 475 476 477
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
478
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
479 480 481
        paddle.enable_static()

    def test_errors(self):
482
        paddle.enable_static()
W
WangXi 已提交
483 484
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
485
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
486
            # The input dtype must be float16, float32.
487 488 489
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
490
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
491
            # support the input dtype is float16
492 493 494
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
495 496 497 498 499 500 501
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
502 503


504
class TestAtan(TestActivation, TestParameter):
505 506 507
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
508
        self.init_shape()
509

510
        np.random.seed(1024)
511
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
512 513 514 515 516 517 518 519
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
520
        self.check_grad(['X'], 'Out')
521

W
WuHaobo 已提交
522 523 524 525 526 527 528
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
529
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
530 531 532
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

533 534 535 536 537 538 539 540
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

541

542 543 544 545 546
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


547 548 549 550
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
551
        self.init_shape()
552

553
        np.random.seed(1024)
554
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
555 556 557 558 559 560 561 562 563 564
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

565 566 567 568 569 570 571

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
572 573 574 575
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
576
            z = paddle.sinh(x).numpy()
577
            z_expected = np.sinh(np_x)
578
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
579 580 581 582

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
583 584 585 586 587 588 589 590 591
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
592

593
            pd_sinh_out = paddle.sinh(data_x)
594 595
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
596 597 598 599 600
            (np_sinh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_sinh_out],
            )
601 602

        expected_res = np.sinh(input_x)
603
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
604 605 606 607

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
608 609 610
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
611 612
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
613
            loss = paddle.sinh(var)
614 615 616 617 618 619 620 621 622
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
623
            self.assertRaises(TypeError, paddle.sinh, 1)
624 625
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
626
            self.assertRaises(TypeError, paddle.sinh, x_int32)
627 628
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
629
            paddle.sinh(x_fp16)
630 631 632 633 634 635


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
636
        self.init_shape()
637

638
        np.random.seed(1024)
639
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
640 641 642 643 644 645 646 647 648 649
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

650 651 652 653 654 655 656

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
657 658 659 660
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
661
            z = paddle.cosh(x).numpy()
662
            z_expected = np.cosh(np_x)
663
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
664 665 666 667

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
668 669 670 671 672 673 674 675 676
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
677 678 679 680

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
681 682 683 684 685
            (np_cosh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_cosh_out],
            )
686 687

        expected_res = np.cosh(input_x)
688
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
689 690 691 692

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
693 694 695
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
696 697
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
698
            loss = paddle.cosh(var)
699 700 701 702 703 704 705 706 707
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
708
            self.assertRaises(TypeError, paddle.cosh, 1)
709 710
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
711
            self.assertRaises(TypeError, paddle.cosh, x_int32)
712 713
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
714
            paddle.cosh(x_fp16)
715 716


717 718 719 720 721 722
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
723 724
    def setUp(self):
        self.op_type = "tanh_shrink"
725
        self.init_dtype()
726
        self.init_shape()
727

728
        np.random.seed(1024)
729
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
730
        out = ref_tanhshrink(x)
731

732
        self.inputs = {'X': x}
733
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
734 735

    def test_check_grad(self):
736 737
        if self.dtype == np.float16:
            return
738
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
739

740

741 742 743 744 745
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


746 747 748
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
749
        np.random.seed(1024)
750
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
751 752 753
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
754
            else paddle.CPUPlace()
755
        )
756 757

    def test_static_api(self):
758
        paddle.enable_static()
759
        with paddle.static.program_guard(paddle.static.Program()):
760
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
761 762 763 764 765 766 767
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
768
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
769 770 771 772 773 774 775 776 777

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
778
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
779 780 781
        paddle.enable_static()

    def test_errors(self):
782
        paddle.enable_static()
783 784 785 786
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
787 788 789
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
790 791
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
792 793 794
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
795 796 797
            F.tanhshrink(x_fp16)


798 799 800 801 802 803
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
804
class TestHardShrink(TestActivation):
805 806
    def setUp(self):
        self.op_type = "hard_shrink"
807
        self.init_dtype()
808
        self.init_shape()
809

810 811
        self.threshold = 0.5
        self.set_attrs()
812
        np.random.seed(1024)
813
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
814
        out = ref_hardshrink(x, self.threshold)
815

816
        self.attrs = {'threshold': self.threshold}
817
        self.inputs = {'X': x}
818
        self.outputs = {'Out': out}
819

820 821 822
    def init_shape(self):
        self.shape = [10, 12]

823 824 825
    def set_attrs(self):
        pass

826
    def test_check_grad(self):
827 828
        if self.dtype == np.float16:
            return
829
        self.check_grad(['X'], 'Out')
830 831


832 833 834 835 836
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


837 838 839 840 841 842 843 844
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


845 846 847
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
848
        np.random.seed(1024)
849
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
850 851 852
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
853
            else paddle.CPUPlace()
854
        )
855 856

    def test_static_api(self):
857
        paddle.enable_static()
858
        with paddle.static.program_guard(paddle.static.Program()):
859
            x = paddle.fluid.data('X', [10, 12])
860 861 862 863 864 865 866
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
867
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
868 869 870

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
871
        x = paddle.to_tensor(self.x_np)
872 873 874 875 876
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
877
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
878 879 880 881 882 883

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
884
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
885 886
        paddle.enable_static()

887
    def test_errors(self):
888
        paddle.enable_static()
889
        with paddle.static.program_guard(paddle.static.Program()):
890
            # The input type must be Variable.
891
            self.assertRaises(TypeError, F.hardshrink, 1)
892
            # The input dtype must be float16, float32, float64.
893 894 895
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
896
            self.assertRaises(TypeError, F.hardshrink, x_int32)
897
            # support the input dtype is float16
898 899 900
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
901
            F.hardshrink(x_fp16)
902 903


904 905 906 907 908 909 910 911 912 913 914
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
915
        np.random.seed(1024)
916
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
917 918 919
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
920
            else paddle.CPUPlace()
921
        )
922 923

    def test_static_api(self):
924
        paddle.enable_static()
925
        with paddle.static.program_guard(paddle.static.Program()):
926
            x = paddle.fluid.data('X', [10, 12])
927 928 929 930 931 932 933
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
934
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
935 936 937

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
938
        x = paddle.to_tensor(self.x_np)
939 940 941 942 943
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
944
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
945 946 947 948 949 950

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
951
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
952 953 954
        paddle.enable_static()

    def test_errors(self):
955
        paddle.enable_static()
956 957 958 959
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
960 961 962
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
963 964
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
965 966 967
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
968 969 970
            F.hardtanh(x_fp16)


971 972 973
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
974 975
        out - threshold
    )
976 977 978 979
    return out


class TestSoftshrink(TestActivation):
980 981
    def setUp(self):
        self.op_type = "softshrink"
982 983
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
984
        self.init_dtype()
985
        self.init_shape()
986

987
        threshold = 0.8
988

989
        np.random.seed(1023)
990
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
991 992 993
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
994
        self.outputs = {'Out': out}
995 996

    def test_check_grad(self):
997 998
        if self.dtype == np.float16:
            return
999
        self.check_grad(['X'], 'Out', check_eager=True)
1000

1001

1002 1003 1004 1005 1006
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1007 1008 1009 1010
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1011
        np.random.seed(1024)
1012
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1013 1014 1015
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1016
            else paddle.CPUPlace()
1017
        )
1018 1019

    def test_static_api(self):
1020
        paddle.enable_static()
1021
        with paddle.static.program_guard(paddle.static.Program()):
1022
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1023 1024 1025 1026 1027 1028 1029
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1030
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1031 1032 1033 1034 1035 1036 1037 1038 1039

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1040
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1041 1042
        paddle.enable_static()

1043
    def test_errors(self):
1044
        paddle.enable_static()
1045
        with paddle.static.program_guard(paddle.static.Program()):
1046
            # The input type must be Variable.
1047
            self.assertRaises(TypeError, F.softshrink, 1)
1048
            # The input dtype must be float16, float32, float64.
1049 1050 1051
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1052
            self.assertRaises(TypeError, F.softshrink, x_int32)
1053
            # The threshold must be no less than zero
1054 1055 1056
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[12, 10], dtype='float32'
            )
1057
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1058
            # support the input dtype is float16
1059 1060 1061
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1062
            F.softshrink(x_fp16)
1063 1064


1065
class TestSqrt(TestActivation, TestParameter):
1066 1067
    def setUp(self):
        self.op_type = "sqrt"
1068
        self.python_api = paddle.sqrt
1069
        self.init_dtype()
1070
        self.init_shape()
1071

1072
        np.random.seed(1023)
1073
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1074 1075 1076 1077
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1078 1079

    def test_check_grad(self):
1080 1081
        if self.dtype == np.float16:
            return
1082 1083 1084 1085
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1086

1087

1088 1089 1090 1091 1092
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []


1093 1094 1095
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1096 1097 1098
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1099
        self.python_api = paddle.sqrt
1100
        self.init_dtype()
1101
        self.init_shape()
1102 1103

        np.random.seed(1023)
1104
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1115 1116 1117
    def init_shape(self):
        self.shape = [11, 17]

1118 1119
    def test_check_output(self):
        place = core.CUDAPlace(0)
1120
        self.check_output_with_place(place, check_eager=True)
1121 1122 1123

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1124
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1125 1126


Z
zhoukunsheng 已提交
1127 1128 1129
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1130
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1131
        self.init_dtype()
1132
        self.init_shape()
Z
zhoukunsheng 已提交
1133

1134
        np.random.seed(1024)
1135
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1136 1137 1138 1139 1140
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1141 1142 1143
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1144 1145 1146
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1147 1148 1149
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.0005, check_eager=True
        )
Z
zhoukunsheng 已提交
1150 1151


1152 1153 1154 1155 1156 1157 1158 1159
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1160
class TestAbs(TestActivation):
1161 1162
    def setUp(self):
        self.op_type = "abs"
1163
        self.init_dtype()
1164
        self.init_shape()
1165

1166
        np.random.seed(1024)
1167
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1168
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1169
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1170
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1171 1172
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1173 1174 1175 1176
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1177

1178 1179 1180
    def init_shape(self):
        self.shape = [4, 25]

1181
    def test_check_grad(self):
1182 1183
        if self.dtype == np.float16:
            return
1184
        self.check_grad(['X'], 'Out', check_eager=False)
1185

1186

1187 1188 1189 1190 1191
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1192
class TestCeil(TestActivation):
D
dzhwinter 已提交
1193 1194
    def setUp(self):
        self.op_type = "ceil"
1195 1196
        self.check_eager = True
        self.python_api = paddle.ceil
1197
        self.init_dtype()
1198
        self.init_shape()
1199

1200
        np.random.seed(1024)
1201
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1202 1203 1204 1205
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1206

1207 1208 1209
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1210
    # The same reason with TestFloor
C
chengduo 已提交
1211
    def test_check_grad(self):
1212 1213 1214
        pass


1215 1216 1217 1218 1219
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1220
class TestFloor(TestActivation):
D
dzhwinter 已提交
1221 1222
    def setUp(self):
        self.op_type = "floor"
1223 1224
        self.check_eager = True
        self.python_api = paddle.floor
1225
        self.init_dtype()
1226
        self.init_shape()
1227

1228
        np.random.seed(1024)
1229
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1230 1231 1232 1233
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1234

1235 1236 1237
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1238
    # the gradient on floor, ceil, round is undefined.
1239
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1240 1241
    # The same reason with TestFloor
    def test_check_grad(self):
1242 1243 1244
        pass


1245 1246 1247 1248 1249
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1250
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1251 1252
    def setUp(self):
        self.op_type = "cos"
1253
        self.init_dtype()
1254
        self.init_shape()
1255

1256
        np.random.seed(1024)
1257
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1258 1259 1260 1261
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1262

1263 1264 1265
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1266
    def test_check_grad(self):
1267 1268
        if self.dtype == np.float16:
            return
1269
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1270

1271

1272 1273 1274 1275 1276
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1277 1278 1279 1280 1281
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1282 1283
        self.init_shape()

J
joejiong 已提交
1284
        self.dtype = 'float32'
1285
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1286 1287 1288
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1289
            else paddle.CPUPlace()
1290
        )
J
joejiong 已提交
1291 1292 1293 1294 1295 1296

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1297 1298 1299
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1300 1301 1302 1303 1304
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1316 1317 1318
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1319
            else paddle.CPUPlace()
1320
        )
1321

J
joejiong 已提交
1322 1323 1324 1325 1326
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1327
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1328 1329 1330 1331 1332
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1333
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1334 1335 1336 1337
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1338
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1339 1340 1341 1342

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1343 1344 1345
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1346 1347 1348 1349 1350 1351 1352 1353
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1354 1355 1356 1357
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1358
        self.init_shape()
1359

1360
        np.random.seed(1024)
1361
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1362 1363 1364 1365 1366
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1367 1368 1369
    def init_shape(self):
        self.shape = [10, 12]

1370 1371 1372
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1373
        self.check_grad(['X'], 'Out')
1374 1375


1376 1377 1378 1379 1380
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1381
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1382 1383
    def setUp(self):
        self.op_type = "sin"
1384
        self.init_dtype()
1385
        self.init_shape()
1386

1387
        np.random.seed(1024)
1388
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1389 1390 1391 1392
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1393

1394 1395 1396
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1397
    def test_check_grad(self):
1398 1399
        if self.dtype == np.float16:
            return
1400
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1401 1402


1403 1404 1405 1406 1407
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1408 1409 1410 1411
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1412
        self.init_shape()
1413

1414
        np.random.seed(2048)
1415
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1416 1417 1418 1419 1420
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1421 1422 1423
    def init_shape(self):
        self.shape = [10, 12]

1424 1425 1426
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1427
        self.check_grad(['X'], 'Out')
1428 1429


1430 1431 1432 1433 1434
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1435 1436 1437 1438
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1439
        self.init_shape()
X
xiaoting 已提交
1440 1441

        np.random.seed(1024)
1442
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1443 1444 1445 1446 1447
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1448 1449 1450
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1451 1452 1453 1454 1455 1456
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1457 1458 1459 1460 1461
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1462 1463 1464 1465
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1466
        self.init_shape()
X
xiaoting 已提交
1467 1468

        np.random.seed(1024)
1469
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1470 1471 1472 1473 1474
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1475 1476 1477
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1478 1479 1480 1481 1482 1483
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1484 1485 1486 1487 1488
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1489 1490 1491 1492
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1493
        self.init_shape()
X
xiaoting 已提交
1494 1495

        np.random.seed(400)
1496
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1497 1498 1499 1500 1501
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1502 1503 1504
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1505 1506 1507 1508 1509 1510
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1511 1512 1513 1514 1515
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1516
class TestRound(TestActivation):
D
dzhwinter 已提交
1517 1518
    def setUp(self):
        self.op_type = "round"
1519 1520
        self.check_eager = True
        self.python_api = paddle.round
1521
        self.init_dtype()
1522
        self.init_shape()
1523

1524
        np.random.seed(1024)
1525
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1526 1527 1528 1529
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1530

1531 1532 1533
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1534
    def test_check_grad(self):
1535 1536 1537
        pass


1538 1539 1540 1541 1542
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1543
class TestRelu(TestActivation):
1544
    def setUp(self):
Q
qijun 已提交
1545
        self.op_type = "relu"
K
Kexin Zhao 已提交
1546
        self.init_dtype()
1547
        self.init_shape()
K
Kexin Zhao 已提交
1548

1549
        np.random.seed(1024)
1550
        if self.dtype == np.uint16:
1551
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1552 1553 1554 1555 1556
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1557
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1558 1559 1560 1561
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1562 1563

        self.outputs = {'Out': out}
1564 1565

    def test_check_grad(self):
K
Kexin Zhao 已提交
1566 1567
        if self.dtype == np.float16:
            return
1568
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1569 1570


1571 1572 1573 1574 1575
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1576 1577 1578
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1579
        np.random.seed(1024)
1580
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1581 1582 1583
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1584
            else paddle.CPUPlace()
1585
        )
1586 1587 1588 1589
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1590 1591

    def test_static_api(self):
1592
        paddle.enable_static()
1593
        with paddle.static.program_guard(paddle.static.Program()):
1594
            x = paddle.fluid.data('X', [10, 12])
1595
            out1 = self.relu(x)
1596 1597 1598 1599 1600 1601
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1602
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1603 1604 1605 1606 1607

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1608 1609
        out1 = m(x)
        out2 = self.relu(x)
1610 1611
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1612
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1613 1614
        paddle.enable_static()

1615
    def test_errors(self):
1616
        paddle.enable_static()
1617
        with paddle.static.program_guard(paddle.static.Program()):
1618
            # The input type must be Variable.
1619
            self.assertRaises(TypeError, self.relu, 1)
1620
            # The input dtype must be float16, float32, float64.
1621 1622 1623
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
1624
            self.assertRaises(TypeError, self.relu, x_int32)
1625
            # support the input dtype is float16
1626 1627 1628
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
1629 1630 1631 1632 1633 1634 1635
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1636 1637


1638 1639 1640 1641 1642 1643
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1644
class TestLeakyRelu(TestActivation):
1645 1646 1647
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1648 1649 1650
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1651
        self.init_shape()
1652
        alpha = self.get_alpha()
A
Adam 已提交
1653

1654
        np.random.seed(1024)
1655
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1656
        # The same reason with TestAbs
1657 1658
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1659

1660
        self.inputs = {'X': x}
A
Adam 已提交
1661
        self.outputs = {'Out': out}
1662
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1663 1664 1665 1666

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1667
        self.check_grad(['X'], 'Out')
1668 1669


1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1685 1686 1687 1688 1689
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1690 1691 1692
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1693
        np.random.seed(1024)
1694
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1695 1696 1697
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1698
            else paddle.CPUPlace()
1699
        )
1700 1701

    def test_static_api(self):
1702
        paddle.enable_static()
1703
        with paddle.static.program_guard(paddle.static.Program()):
1704
            x = paddle.fluid.data('X', [10, 12])
1705 1706 1707 1708 1709 1710 1711
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1712
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1713 1714 1715

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1716
        x = paddle.to_tensor(self.x_np)
1717 1718 1719 1720 1721
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1722
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1723 1724 1725 1726 1727 1728

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1729
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1730 1731
        paddle.enable_static()

1732
    def test_errors(self):
1733
        paddle.enable_static()
1734
        with paddle.static.program_guard(paddle.static.Program()):
1735
            # The input type must be Variable.
1736
            self.assertRaises(TypeError, F.leaky_relu, 1)
1737
            # The input dtype must be float16, float32, float64.
1738 1739 1740
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1741 1742
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1743 1744 1745
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1746
            F.leaky_relu(x_fp16)
1747 1748


1749 1750
def gelu(x, approximate):
    if approximate:
1751 1752 1753 1754 1755 1756 1757 1758
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
1759 1760 1761 1762 1763 1764
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1765 1766 1767
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1768
        self.init_shape()
1769
        approximate = True
1770
        np.random.seed(1024)
1771
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1772
        out = gelu(x, approximate)
C
Clementine 已提交
1773

1774
        self.inputs = {'X': x}
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1788
        self.init_shape()
1789
        approximate = False
1790
        np.random.seed(2048)
1791
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1792
        out = gelu(x, approximate)
C
Clementine 已提交
1793

1794
        self.inputs = {'X': x}
C
Clementine 已提交
1795
        self.outputs = {'Out': out}
1796
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1797 1798 1799 1800

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1801
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1802 1803


1804 1805 1806 1807 1808
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


1809 1810 1811
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1812
        np.random.seed(1024)
1813
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
1814 1815 1816
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1817
            else paddle.CPUPlace()
1818
        )
1819 1820

    def test_static_api(self):
1821
        paddle.enable_static()
1822
        with paddle.static.program_guard(paddle.static.Program()):
1823
            x = paddle.fluid.data('X', [11, 17])
1824 1825 1826 1827 1828 1829 1830
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1831
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1832 1833 1834 1835 1836 1837 1838 1839 1840

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1841
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1842 1843 1844 1845 1846 1847

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1848
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1849 1850 1851
        paddle.enable_static()

    def test_errors(self):
1852
        paddle.enable_static()
1853 1854 1855 1856
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1857 1858 1859
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
1860 1861
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1862 1863 1864
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
1865 1866 1867
            F.gelu(x_fp16)


C
chengduo 已提交
1868
class TestBRelu(TestActivation):
1869 1870
    def setUp(self):
        self.op_type = "brelu"
1871 1872
        self.init_dtype()

1873
        np.random.seed(1024)
Z
zhupengyang 已提交
1874
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1875 1876
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1877 1878
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1879
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1880 1881 1882
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1883 1884 1885

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1886
        self.outputs = {'Out': t}
1887 1888

    def test_check_grad(self):
1889 1890
        if self.dtype == np.float16:
            return
1891
        self.check_grad(['X'], 'Out')
1892

1893

1894 1895 1896 1897 1898 1899 1900
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
1901
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
1902
    def setUp(self):
1903
        self.op_type = "relu6"
1904
        self.init_dtype()
1905
        self.init_shape()
1906
        self.python_api = paddle.nn.functional.relu6
1907

1908
        np.random.seed(1024)
1909
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
1910
        x[np.abs(x) < 0.005] = 0.02
1911
        out = ref_relu6(x)
1912

1913 1914
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
1915
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
1916

1917 1918 1919
    def init_shape(self):
        self.shape = [10, 12]

1920 1921 1922
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1923
        self.check_grad(['X'], 'Out', check_eager=True)
1924 1925


1926 1927 1928 1929 1930
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


1931 1932 1933
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
1934
        np.random.seed(1024)
1935 1936
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
1937 1938 1939
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1940
            else paddle.CPUPlace()
1941
        )
1942 1943

    def test_static_api(self):
1944
        paddle.enable_static()
1945
        with paddle.static.program_guard(paddle.static.Program()):
1946
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1947 1948 1949 1950 1951 1952 1953
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
1954
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1955 1956 1957 1958 1959 1960 1961 1962 1963

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
1964
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1965 1966 1967
        paddle.enable_static()

    def test_fluid_api(self):
1968
        paddle.enable_static()
1969 1970
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
1971
            out = paddle.nn.functional.relu6(x)
1972 1973 1974
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
1975
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1976

1977
    def test_errors(self):
1978
        paddle.enable_static()
1979
        with paddle.static.program_guard(paddle.static.Program()):
1980
            # The input type must be Variable.
1981
            self.assertRaises(TypeError, F.relu6, 1)
1982
            # The input dtype must be float16, float32, float64.
1983 1984 1985
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1986
            self.assertRaises(TypeError, F.relu6, x_int32)
1987
            # support the input dtype is float16
1988 1989 1990
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1991
            F.relu6(x_fp16)
1992 1993


1994
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
1995 1996 1997 1998
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
1999 2000 2001
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2002 2003


H
huangjun12 已提交
2004 2005 2006 2007
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2008
        self.init_shape()
2009
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2010

2011
        np.random.seed(1024)
2012
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2013 2014 2015
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2016
        # the same with TestAbs
H
huangjun12 已提交
2017 2018
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2019
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2020

2021
        self.inputs = {'X': x}
H
huangjun12 已提交
2022 2023 2024
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

2025 2026 2027
    def init_shape(self):
        self.shape = [10, 12]

H
huangjun12 已提交
2028
    def test_check_grad(self):
2029 2030 2031 2032
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
2033 2034


2035 2036 2037 2038 2039
class TestHardSwish_ZeroDim(TestHardSwish):
    def init_shape(self):
        self.shape = []


2040 2041 2042 2043
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2044 2045 2046
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2047
            else paddle.CPUPlace()
2048
        )
2049 2050 2051

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2052
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2053 2054 2055 2056 2057 2058 2059
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2060
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2061 2062 2063

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
2064
        x = paddle.to_tensor([11648.0, 11448.0])
2065 2066 2067
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2068
        out_ref = [11648.0, 11448.0]
2069
        for r in [out1, out2]:
2070
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2071
        paddle.enable_static()
2072 2073 2074 2075 2076 2077 2078 2079

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2080
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2081 2082 2083 2084

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_swish(x)
2085
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2086 2087 2088 2089
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2090
            # The input type must be Variable.
2091
            self.assertRaises(TypeError, F.hardswish, 1)
2092
            # The input dtype must be float16, float32, float64.
2093 2094 2095
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2096
            self.assertRaises(TypeError, F.hardswish, x_int32)
2097
            # support the input dtype is float16
2098 2099 2100
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2101
            F.hardswish(x_fp16)
2102

2103 2104 2105 2106 2107
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2108

C
chengduo 已提交
2109
class TestSoftRelu(TestActivation):
2110 2111
    def setUp(self):
        self.op_type = "soft_relu"
2112 2113
        self.init_dtype()

2114
        np.random.seed(4096)
2115
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2116
        threshold = 2.0
Q
qijun 已提交
2117 2118
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2119
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2120 2121 2122
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2123 2124 2125 2126 2127
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2128 2129

    def test_check_grad(self):
2130 2131
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2132
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2133

2134

2135
def elu(x, alpha):
Z
zhupengyang 已提交
2136
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2137 2138 2139
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2140
class TestELU(TestActivation):
2141 2142
    def setUp(self):
        self.op_type = "elu"
2143
        self.init_dtype()
2144
        self.init_shape()
2145

2146
        np.random.seed(1024)
2147
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2148
        alpha = self.get_alpha()
2149
        out = elu(x, alpha)
2150 2151 2152 2153
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2154
        self.outputs = {'Out': out}
2155

2156 2157 2158
    def init_shape(self):
        self.shape = [10, 12]

2159
    def test_check_grad(self):
2160 2161
        if self.dtype == np.float16:
            return
2162
        self.check_grad(['X'], 'Out')
2163

Z
zhupengyang 已提交
2164
    def get_alpha(self):
2165
        return 1.0
Z
zhupengyang 已提交
2166 2167 2168 2169 2170 2171


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2172

2173 2174 2175 2176 2177
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2178 2179 2180
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2181
        np.random.seed(1024)
2182
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2183 2184 2185
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2186
            else paddle.CPUPlace()
2187
        )
2188 2189 2190 2191
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2192 2193

    def test_static_api(self):
2194
        paddle.enable_static()
2195
        with paddle.static.program_guard(paddle.static.Program()):
2196
            x = paddle.fluid.data('X', [10, 12])
2197
            out1 = self.elu(x)
2198 2199 2200 2201 2202 2203
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2204
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2205 2206 2207 2208

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2209 2210
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2211 2212 2213 2214
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2215
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2216

2217 2218
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2219 2220 2221 2222
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2223
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2224 2225
        paddle.enable_static()

2226
    def test_errors(self):
2227
        paddle.enable_static()
2228 2229
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2230
            self.assertRaises(TypeError, self.elu, 1)
2231
            # The input dtype must be float16, float32, float64.
2232 2233 2234
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2235
            self.assertRaises(TypeError, self.elu, x_int32)
2236
            # support the input dtype is float16
2237 2238 2239
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2240 2241 2242
            self.elu(x_fp16)


Z
zhupengyang 已提交
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2255 2256 2257 2258 2259 2260 2261 2262 2263
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2264
        self.init_shape()
2265

2266
        self.python_api = paddle.nn.functional.celu
2267
        np.random.seed(1024)
2268
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2269 2270 2271 2272 2273 2274
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2275 2276 2277
    def init_shape(self):
        self.shape = [10, 12]

2278 2279 2280
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2281
        self.check_grad(['X'], 'Out', check_eager=True)
2282 2283


2284 2285 2286 2287 2288
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2289 2290 2291 2292 2293
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2294 2295 2296
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2297
            else paddle.CPUPlace()
2298
        )
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2315
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2326
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2327 2328 2329 2330 2331 2332 2333

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2334
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2335 2336 2337 2338 2339 2340 2341 2342
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2343 2344 2345
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2346 2347
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2348 2349 2350
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[10, 12], dtype='float32'
            )
2351 2352
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2353 2354 2355
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2356 2357
            self.celu(x_fp16)

2358 2359 2360 2361 2362
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2363

C
chengduo 已提交
2364
class TestReciprocal(TestActivation):
Q
qijun 已提交
2365 2366
    def setUp(self):
        self.op_type = "reciprocal"
2367
        self.python_api = paddle.reciprocal
2368
        self.init_dtype()
2369
        self.init_shape()
2370

2371
        np.random.seed(1024)
2372
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2373 2374 2375 2376
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2377 2378

    def test_check_grad(self):
2379 2380
        if self.dtype == np.float16:
            return
2381 2382 2383 2384
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2385 2386


2387 2388 2389 2390 2391
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2392
class TestLog(TestActivation):
Q
qijun 已提交
2393 2394
    def setUp(self):
        self.op_type = "log"
2395 2396
        self.check_eager = True
        self.python_api = paddle.log
2397
        self.init_dtype()
2398
        self.init_shape()
2399

2400
        np.random.seed(1024)
2401
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2402 2403 2404 2405
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2406 2407

    def test_check_grad(self):
2408 2409
        if self.dtype == np.float16:
            return
2410
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2411

2412
    def test_error(self):
2413 2414 2415 2416 2417 2418
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32"
        )
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64"
        )
2419

2420 2421
        self.assertRaises(TypeError, paddle.log, in1)
        self.assertRaises(TypeError, paddle.log, in2)
2422

2423

2424 2425 2426 2427 2428
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2429 2430 2431
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2432 2433
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2434
        self.init_dtype()
2435
        self.init_shape()
J
joejiong 已提交
2436

2437
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2438 2439 2440 2441 2442 2443 2444 2445
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2446
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2447 2448 2449 2450 2451 2452 2453 2454 2455

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
2456 2457 2458
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2459
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2460 2461 2462
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2463 2464 2465 2466

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2467 2468 2469 2470 2471
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2472
        expected_res = np.log2(input_x)
2473
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2474 2475 2476 2477 2478 2479 2480 2481

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2482
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2483 2484


2485 2486 2487 2488 2489
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2490 2491 2492
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2493 2494
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2495
        self.init_dtype()
2496
        self.init_shape()
J
joejiong 已提交
2497

2498
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2499 2500 2501 2502 2503 2504 2505 2506
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2507
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2508

2509 2510 2511 2512 2513 2514 2515

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2516 2517 2518 2519 2520 2521 2522 2523
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
2524 2525 2526
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2527
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2528 2529 2530
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2531 2532 2533 2534

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2535 2536 2537 2538 2539
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2540
        expected_res = np.log10(input_x)
2541
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2542 2543 2544 2545 2546 2547 2548 2549

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2550
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2551 2552


2553 2554 2555
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2556 2557
        self.check_eager = True
        self.python_api = paddle.log1p
2558
        self.init_dtype()
2559
        self.init_shape()
2560

2561
        np.random.seed(1024)
2562
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2563 2564 2565 2566 2567 2568 2569 2570
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2571
        self.check_grad(['X'], 'Out', check_eager=True)
2572

2573 2574 2575 2576 2577 2578 2579

class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2580 2581 2582
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2583 2584 2585 2586 2587 2588
            data_x = fluid.layers.data(
                name="data_x",
                shape=[11, 17],
                append_batch_size=False,
                dtype="float64",
            )
2589 2590 2591 2592

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2593 2594 2595 2596 2597
            (res1,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
2598
        expected_res = np.log1p(input_x)
2599
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2600 2601 2602 2603 2604 2605 2606 2607

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2608
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2609 2610


C
chengduo 已提交
2611
class TestSquare(TestActivation):
Q
qijun 已提交
2612 2613
    def setUp(self):
        self.op_type = "square"
2614
        self.python_api = paddle.square
2615
        self.init_dtype()
2616
        self.init_shape()
2617

2618
        np.random.seed(1024)
2619
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2620 2621 2622 2623
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2624 2625

    def test_check_grad(self):
2626 2627
        if self.dtype == np.float16:
            return
2628 2629 2630
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.007, check_eager=True
        )
2631 2632 2633

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2634

2635

2636 2637 2638 2639 2640
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2641 2642 2643
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2644 2645 2646
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2647
        self.python_api = paddle.square
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2664
        self.check_output_with_place(place, check_eager=True)
2665 2666 2667

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2668 2669 2670
        self.check_grad_with_place(
            place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
        )
2671 2672


C
chengduo 已提交
2673
class TestPow(TestActivation):
2674 2675
    def setUp(self):
        self.op_type = "pow"
2676
        self.python_api = paddle.pow
2677
        self.check_eager = True
2678
        self.init_dtype()
2679
        self.init_shape()
2680

2681
        np.random.seed(1024)
2682
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2683 2684 2685
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2686
        self.attrs = {'factor': 3.0}
2687
        self.outputs = {'Out': out}
2688

2689 2690 2691
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2692
    def test_check_grad(self):
2693 2694
        if self.dtype == np.float16:
            return
2695
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2696

2697

2698 2699 2700 2701 2702
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


2703 2704 2705
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
2706 2707
        self.check_eager = False
        self.python_api = paddle.pow
2708 2709
        self.init_dtype()

2710
        np.random.seed(1024)
2711 2712 2713 2714 2715
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
2716
            'FactorTensor': np.array([3.0]).astype("float32"),
2717 2718 2719 2720 2721 2722
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2723
        self.check_output(check_eager=self.check_eager)
2724 2725 2726 2727

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2728
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2729 2730 2731

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
2732 2733 2734 2735 2736 2737
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
        res = fluid.layers.data(
            name="res", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
2738 2739 2740

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
2741 2742
        out_1 = paddle.pow(x, factor_1)
        out_2 = paddle.pow(x, factor_2)
2743 2744 2745
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2746 2747

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2748
        res_1, res_2, res, res_6 = exe.run(
2749 2750
            fluid.default_main_program(),
            feed={"x": input},
2751 2752
            fetch_list=[out_1, out_2, res, out_6],
        )
2753

2754 2755 2756
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2757 2758


2759 2760 2761 2762 2763
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2764
class TestSTanh(TestActivation):
2765 2766 2767 2768 2769 2770
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2771 2772
    def setUp(self):
        self.op_type = "stanh"
2773
        self.init_dtype()
2774 2775
        self.init_shape()

2776 2777
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2778

2779
        np.random.seed(1024)
2780
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2781 2782
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2783

2784
        self.inputs = {'X': x}
2785
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2786
        self.outputs = {'Out': out}
2787

Q
qijun 已提交
2788
    def test_check_grad(self):
2789 2790
        if self.dtype == np.float16:
            return
2791
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2792

2793

2794 2795 2796 2797 2798 2799 2800 2801 2802 2803
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


2804 2805 2806 2807 2808
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
2822 2823 2824
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
2825
            else paddle.CPUPlace()
2826
        )
2827 2828 2829 2830 2831 2832 2833 2834 2835 2836

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
2837
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2838 2839 2840 2841 2842 2843 2844

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
2845
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2846 2847 2848 2849 2850 2851
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
2852
            out = paddle.stanh(x, self.scale_a, self.scale_b)
2853 2854 2855
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
2856
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2857

2858
    def test_errors(self):
2859 2860
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2861
            # The input type must be Variable.
2862
            self.assertRaises(TypeError, paddle.stanh, 1)
2863
            # The input dtype must be float16, float32, float64.
2864 2865 2866
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2867
            self.assertRaises(TypeError, paddle.stanh, x_int32)
2868
            # support the input dtype is float16
2869 2870 2871
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
2883 2884


2885 2886
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
2887 2888 2889 2890
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
2891 2892 2893
    return out


C
chengduo 已提交
2894
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
2895 2896
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
2897
        self.python_api = paddle.nn.functional.softplus
2898
        self.init_dtype()
2899
        self.init_shape()
2900

2901 2902
        beta = 2
        threshold = 15
2903

2904
        np.random.seed(1024)
2905
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2906 2907 2908
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
2909
        self.outputs = {'Out': out}
K
kexinzhao 已提交
2910

W
Wang Bojun 已提交
2911 2912
        self.check_eager = True

2913 2914 2915
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
2916
    def test_check_grad(self):
2917 2918
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
2919 2920 2921
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
2922

2923

2924 2925 2926 2927 2928
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


2929 2930 2931
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


2959 2960 2961 2962 2963
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
2964
        np.random.seed(1024)
2965
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2966 2967 2968
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2969
            else paddle.CPUPlace()
2970
        )
2971 2972

    def test_static_api(self):
2973
        paddle.enable_static()
2974
        with paddle.static.program_guard(paddle.static.Program()):
2975
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2976 2977 2978 2979 2980 2981 2982
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
2983
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2984 2985 2986 2987 2988 2989 2990 2991 2992

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
2993
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2994 2995 2996
        paddle.enable_static()

    def test_errors(self):
2997
        paddle.enable_static()
2998 2999 3000 3001
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
3002 3003 3004
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3005 3006
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
3007 3008 3009
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3010 3011 3012 3013 3014 3015 3016 3017
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3018
class TestSoftsign(TestActivation):
3019 3020
    def setUp(self):
        self.op_type = "softsign"
3021
        self.init_dtype()
3022 3023
        self.init_shape()

3024
        self.python_api = paddle.nn.functional.softsign
3025

3026
        np.random.seed(1024)
3027
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3028 3029
        out = ref_softsign(x)
        self.inputs = {'X': x}
3030
        self.outputs = {'Out': out}
3031

3032 3033 3034
    def init_shape(self):
        self.shape = [10, 12]

3035
    def test_check_grad(self):
3036 3037
        if self.dtype == np.float16:
            return
3038
        self.check_grad(['X'], 'Out', check_eager=True)
3039 3040


3041 3042 3043 3044 3045
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3046 3047 3048
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3049
        np.random.seed(1024)
3050
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3051 3052 3053
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3054
            else paddle.CPUPlace()
3055
        )
3056 3057

    def test_static_api(self):
3058
        paddle.enable_static()
3059
        with paddle.static.program_guard(paddle.static.Program()):
3060
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3061 3062 3063 3064 3065 3066 3067
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3068
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3069 3070 3071 3072 3073 3074 3075 3076 3077

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3078
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3079 3080 3081
        paddle.enable_static()

    def test_errors(self):
3082
        paddle.enable_static()
3083 3084 3085 3086
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3087 3088 3089
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3090 3091
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3092 3093 3094
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3095 3096 3097
            F.softsign(x_fp16)


3098 3099 3100 3101 3102
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3103
class TestThresholdedRelu(TestActivation):
3104 3105
    def setUp(self):
        self.op_type = "thresholded_relu"
3106
        self.init_dtype()
3107
        self.init_shape()
3108

3109
        threshold = 15
3110

3111
        np.random.seed(1024)
3112
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3113 3114 3115 3116
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3117
        self.outputs = {'Out': out}
3118

3119 3120 3121
    def init_shape(self):
        self.shape = [10, 12]

3122
    def test_check_grad(self):
3123 3124
        if self.dtype == np.float16:
            return
3125
        self.check_grad(['X'], 'Out')
3126 3127


3128 3129 3130 3131 3132
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3133 3134 3135 3136 3137 3138 3139
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3140 3141 3142
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3143
            else paddle.CPUPlace()
3144
        )
3145 3146 3147 3148

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3149
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3150 3151 3152 3153 3154 3155 3156
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3157
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3158 3159 3160 3161 3162 3163 3164 3165 3166

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3167
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3168 3169
        paddle.enable_static()

3170
    def test_errors(self):
3171 3172
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3173
            # The input type must be Variable.
3174
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3175
            # The input dtype must be float16, float32, float64.
3176 3177 3178
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3179
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3180
            # support the input dtype is float16
3181 3182 3183
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3184
            F.thresholded_relu(x_fp16)
3185 3186


3187
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3188
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3189 3190


C
chengduo 已提交
3191
class TestHardSigmoid(TestActivation):
3192 3193
    def setUp(self):
        self.op_type = "hard_sigmoid"
3194 3195 3196 3197
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3198
        self.init_shape()
3199

3200
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3201
        lower_threshold = -self.offset / self.slope
3202
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3203

3204
        # Same reason as TestAbs
3205 3206 3207
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3208

3209
        out = ref_hardsigmoid(x, self.slope, self.offset)
3210

3211 3212
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3213
        self.outputs = {'Out': out}
3214

3215 3216 3217
    def init_shape(self):
        self.shape = [10, 12]

3218 3219
    def set_attrs(self):
        pass
3220

3221

3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3233 3234 3235 3236 3237
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3238 3239 3240 3241
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3242 3243 3244
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3245
            else paddle.CPUPlace()
3246
        )
3247 3248 3249

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3250
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3251 3252 3253 3254 3255 3256 3257
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3258
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3259 3260 3261 3262 3263 3264 3265 3266 3267

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3268
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3269
        paddle.enable_static()
3270 3271 3272 3273

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3274
            out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3275 3276 3277
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3278
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3279 3280 3281

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
3282
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3283
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3284 3285 3286 3287
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3288
            # The input type must be Variable.
3289
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3290
            # The input dtype must be float16, float32, float64.
3291 3292 3293
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3294
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3295
            # support the input dtype is float16
3296 3297 3298
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3299
            F.hardsigmoid(x_fp16)
3300 3301


3302 3303 3304 3305 3306
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3307
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3308 3309
    def setUp(self):
        self.op_type = "swish"
3310
        self.python_api = paddle.nn.functional.swish
3311
        self.init_dtype()
3312 3313
        self.init_shape()

3314
        self.check_eager = True
3315

3316
        np.random.seed(1024)
3317
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3318 3319
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3320
        self.attrs = {'beta': 1.0}
3321
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3322

3323 3324 3325
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3326
    def test_check_grad(self):
3327 3328
        if self.dtype == np.float16:
            return
3329 3330 3331 3332
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3333

A
Abhinav Arora 已提交
3334

3335 3336 3337 3338 3339
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3340 3341 3342 3343 3344
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3345 3346 3347
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3348
            else paddle.CPUPlace()
3349
        )
3350 3351 3352 3353

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3354
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3355 3356 3357 3358 3359 3360 3361
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3362
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3363

3364
    def func_test_dygraph_api(self):
3365 3366 3367 3368 3369 3370 3371
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3372
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3373 3374
        paddle.enable_static()

3375
    def test_dygraph_api(self):
3376
        with _test_eager_guard():
3377 3378
            self.func_test_dygraph_api()
        self.func_test_dygraph_api()
3379

3380 3381 3382 3383
    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3384
            out = paddle.nn.functional.swish(x)
3385 3386 3387
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3388
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3389

3390
    def test_errors(self):
3391 3392
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3393
            # The input type must be Variable.
3394
            self.assertRaises(TypeError, F.swish, 1)
3395
            # The input dtype must be float16, float32, float64.
3396 3397 3398
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3399
            self.assertRaises(TypeError, F.swish, x_int32)
3400
            # support the input dtype is float16
3401 3402 3403
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3404
            F.swish(x_fp16)
3405 3406


3407 3408 3409 3410
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3411 3412 3413 3414 3415 3416
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3417
        self.python_api = paddle.fluid.layers.nn.mish
3418
        self.init_dtype()
3419
        self.init_shape()
3420 3421

        np.random.seed(1024)
3422
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3423 3424 3425 3426
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3427 3428 3429
    def init_shape(self):
        self.shape = [10, 12]

3430 3431 3432
    def test_check_output(self):
        self.check_output(check_eager=True)

3433 3434 3435
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3436
        self.check_grad(['X'], 'Out', check_eager=True)
3437 3438


3439 3440 3441 3442 3443
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3444 3445 3446 3447 3448
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3449 3450 3451
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3452
            else paddle.CPUPlace()
3453
        )
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3466
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3467 3468 3469 3470 3471 3472 3473 3474 3475

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3476
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3477 3478 3479 3480 3481 3482 3483 3484 3485 3486
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.mish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3487
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3488 3489 3490 3491 3492 3493 3494

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3495 3496 3497
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3498 3499
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3500 3501 3502
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3503 3504 3505
            F.mish(x_fp16)


3506
# ------------------ Test Cudnn Activation----------------------
3507
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3508 3509 3510
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3526 3527 3528 3529 3530 3531 3532
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
    parent, atol=1e-3, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3533 3534 3535
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3536

C
chengduo 已提交
3537
        def test_check_output(self):
3538
            place = core.CUDAPlace(0)
C
chengduo 已提交
3539 3540 3541
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3542

C
chengduo 已提交
3543 3544 3545 3546
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3547 3548 3549
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol
                )
C
chengduo 已提交
3550 3551 3552 3553 3554 3555 3556

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3557
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3558
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3559
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3560 3561
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3562
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3563
create_test_act_fp16_class(TestHardShrink)
3564
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3565 3566 3567 3568 3569
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3570
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3571
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3572
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3573
create_test_act_fp16_class(TestSin)
3574
create_test_act_fp16_class(TestSinh)
3575 3576
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3577 3578 3579
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3580 3581
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3582
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3583 3584
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3585
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3586
create_test_act_fp16_class(TestELU)
3587
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3588 3589
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3590 3591 3592 3593
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3594
create_test_act_fp16_class(TestLog10, atol=5e-2)
3595
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3596 3597
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3598
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3599 3600 3601 3602 3603
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3604
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3605
create_test_act_fp16_class(TestHardSwish)
3606
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3607

3608

3609 3610 3611 3612 3613 3614
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3615 3616 3617 3618 3619 3620 3621 3622 3623 3624
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3625 3626 3627
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3628 3629 3630 3631 3632 3633 3634

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)
3635
create_test_act_bf16_class(TestAbs)
3636

Q
qijun 已提交
3637 3638
if __name__ == "__main__":
    unittest.main()