test_activation_op.py 133.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
J
joejiong 已提交
18

Q
qijun 已提交
19
import numpy as np
20
from eager_op_test import OpTest, convert_float_to_uint16
21 22
from scipy.special import erf, expit

23
import paddle
24
import paddle.nn.functional as F
25 26
from paddle import fluid, static
from paddle.fluid import Program, core, program_guard
27
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
28 29


30
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
31
    def test_errors(self):
32
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46
            with program_guard(Program(), Program()):
                # The input type of sqrt op must be Variable or numpy.ndarray.
                in1 = 1
                self.assertRaises(TypeError, paddle.sqrt, in1)
                # The input dtype of sqrt op must be float16, float32, float64.
                in2 = paddle.static.data(
                    name='input2', shape=[-1, 12, 10], dtype="int32"
                )
                self.assertRaises(TypeError, paddle.sqrt, in2)

                in3 = paddle.static.data(
                    name='input3', shape=[-1, 12, 10], dtype="float16"
                )
                paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
47 48


C
chengduo 已提交
49
class TestActivation(OpTest):
Q
qijun 已提交
50 51
    def setUp(self):
        self.op_type = "exp"
52
        self.init_dtype()
53
        self.init_shape()
54
        self.init_kernel_type()
55
        self.if_enable_cinn()
C
chentianyu03 已提交
56
        self.python_api = paddle.exp
57
        self.public_python_api = paddle.exp
58

59
        np.random.seed(2049)
60
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
61 62 63 64
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
65

66 67
        self.convert_input_output()

Q
qijun 已提交
68
    def test_check_output(self):
W
wanghuancoder 已提交
69
        self.check_output()
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
74 75 76 77
        self.check_grad(
            ['X'],
            'Out',
        )
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

88 89 90
    def convert_input_output(self):
        pass

91 92 93
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
94

95 96 97 98 99
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


100
class TestExpFp32_Prim(OpTest):
101 102 103 104 105 106
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp
107
        self.public_python_api = paddle.exp
108 109 110 111 112 113 114

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
115
        self.if_enable_cinn()
116 117 118 119 120 121 122 123 124 125 126 127 128

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

129
    def if_enable_cinn(self):
130
        pass
131 132


133
class TestExpFp64_Prim(TestExpFp32_Prim):
134 135 136 137
    def init_dtype(self):
        self.dtype = np.float64


138
class TestExpPrim_ZeroDim(TestExpFp32_Prim):
139 140 141
    def init_shape(self):
        self.shape = []

142
    def if_enable_cinn(self):
143 144 145
        self.enable_cinn = False


R
ronnywang 已提交
146 147 148
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
149
        self.python_api = paddle.expm1
R
ronnywang 已提交
150
        self.init_dtype()
151
        self.init_shape()
R
ronnywang 已提交
152 153

        np.random.seed(2049)
154
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
155 156 157 158
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
159
        self.convert_input_output()
R
ronnywang 已提交
160 161

    def test_check_grad(self):
W
wanghuancoder 已提交
162
        self.check_grad(['X'], 'Out')
163 164

    def test_check_output(self):
W
wanghuancoder 已提交
165
        self.check_output()
R
ronnywang 已提交
166 167


168 169 170 171 172
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        def run(place):
189
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
190
                with paddle.static.program_guard(paddle.static.Program()):
191
                    X = paddle.static.data('X', self.shape, dtype=self.dtype)
W
wanghuancoder 已提交
192 193 194
                    out = paddle.expm1(X)
                    exe = paddle.static.Executor(place)
                    res = exe.run(feed={'X': self.x})
R
ronnywang 已提交
195
            for r in res:
196
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
197 198 199 200 201 202 203 204

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
205
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
206 207 208 209 210

        for place in self.place:
            run(place)

    def test_errors(self):
211
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
212
            with paddle.static.program_guard(paddle.static.Program()):
213
                X = paddle.static.data('X', self.shape, dtype='int32')
W
wanghuancoder 已提交
214
                self.assertRaises(TypeError, paddle.expm1, X)
R
ronnywang 已提交
215 216 217
        # The input dtype must be float16, float32, float64.


218
class TestParameter:
219
    def test_out_name(self):
220
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
221 222 223 224 225 226 227 228 229 230 231
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = eval("paddle.%s(data, name='Y')" % self.op_type)
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = eval("np.%s(np_x)" % self.op_type)
                np.testing.assert_allclose(result, expected, rtol=1e-05)
232 233 234 235 236 237 238

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
239
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
240 241


C
chengduo 已提交
242
class TestSigmoid(TestActivation):
Q
qijun 已提交
243 244
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
245 246
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
247
        self.public_python_api = paddle.nn.functional.sigmoid
248
        self.init_dtype()
249
        self.init_shape()
250
        self.if_enable_cinn()
251
        np.random.seed(1024)
252
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
253 254 255 256
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
257

258 259
        self.convert_input_output()

260 261 262
    def init_dtype(self):
        self.dtype = np.float32

263 264 265
    def if_enable_cinn(self):
        pass

266
    def test_check_grad(self):
267 268
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
269
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
270

271

272 273 274 275
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []

276 277 278
    def if_enable_cinn(self):
        self.enable_cinn = False

279

280
@unittest.skipIf(
R
ronnywang 已提交
281 282
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
283
)
284 285 286
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
287 288
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
289
        self.public_python_api = paddle.nn.functional.sigmoid
290
        self.init_dtype()
291
        self.init_shape()
292
        self.if_enable_cinn()
293
        np.random.seed(1024)
294
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
295 296 297 298 299 300 301 302 303 304
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

305 306 307
    def init_shape(self):
        self.shape = [11, 17]

308 309 310
    def if_enable_cinn(self):
        self.enable_cinn = False

311 312
    def test_check_output(self):
        place = core.CUDAPlace(0)
313
        # elementwise_pow doesn't support bfloat16, skip check_prim here.
314
        self.check_output_with_place(place, check_prim=True)
315 316 317

    def test_check_grad(self):
        place = core.CUDAPlace(0)
318
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
319 320


321 322 323 324 325 326 327 328
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
329 330 331
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
332 333
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.silu
334
        self.public_python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
335
        self.init_dtype()
336
        self.init_shape()
337
        self.if_enable_cinn()
M
minghaoBD 已提交
338 339

        np.random.seed(1024)
340
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
341
        out = x / (np.exp(-x) + 1)
342
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
M
minghaoBD 已提交
343 344
        self.outputs = {'Out': out}

345 346
        self.convert_input_output()

M
minghaoBD 已提交
347 348 349
    def init_dtype(self):
        self.dtype = np.float32

350
    def if_enable_cinn(self):
351 352
        pass

M
minghaoBD 已提交
353
    def test_check_grad(self):
Z
zxcd 已提交
354
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
355 356


357 358 359
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
Z
zxcd 已提交
360

361
    def if_enable_cinn(self):
362
        self.enable_cinn = False
Z
zxcd 已提交
363 364


M
minghaoBD 已提交
365 366 367 368
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
369 370 371
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
372
            else paddle.CPUPlace()
373
        )
M
minghaoBD 已提交
374 375

    def test_static_api(self):
376
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
377
            with paddle.static.program_guard(paddle.static.Program()):
378
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
379 380 381 382 383 384 385 386
                out1 = F.silu(x)
                m = paddle.nn.Silu()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = self.x_np / (1 + np.exp(-self.x_np))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
387 388 389 390 391 392 393 394

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
395
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
396 397

    def test_errors(self):
398
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
399 400 401 402
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.silu, 1)
                # The input dtype must be float16, float32, float64.
403
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
404 405 406 407
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.silu, x_int32)
                # support the input dtype is float16
408
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
409 410 411
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.silu(x_fp16)
M
minghaoBD 已提交
412 413


C
chengduo 已提交
414
class TestLogSigmoid(TestActivation):
415 416
    def setUp(self):
        self.op_type = "logsigmoid"
W
wanghuancoder 已提交
417
        self.python_api = paddle.nn.functional.log_sigmoid
418
        self.init_dtype()
419
        self.init_shape()
420

421
        np.random.seed(2048)
422
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
423
        out = np.log(1 / (1 + np.exp(-x)))
424
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
425
        self.outputs = {'Out': out}
426

427 428
        self.convert_input_output()

429
    def test_check_grad(self):
430 431
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
432
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
433 434


435 436 437 438 439
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


440
class TestLogSigmoidAPI(unittest.TestCase):
441
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
442
    def setUp(self):
443
        np.random.seed(1024)
444
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
445 446 447
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
448
            else paddle.CPUPlace()
449
        )
450 451

    def test_static_api(self):
452
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
453
            with paddle.static.program_guard(paddle.static.Program()):
454
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
455 456 457 458 459 460 461 462
                out1 = F.log_sigmoid(x)
                m = paddle.nn.LogSigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
463 464 465

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
466
        out1 = F.log_sigmoid(x)
467 468 469 470
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
471
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
472 473

    def test_errors(self):
474
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
475 476 477 478
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.log_sigmoid, 1)
                # The input dtype must be float16, float32, float64.
479
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
480 481 482 483
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.log_sigmoid, x_int32)
                # support the input dtype is float16
484
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
485 486 487
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.log_sigmoid(x_fp16)
488 489


490
class TestTanh(TestActivation, TestParameter):
491 492
    def setUp(self):
        self.op_type = "tanh"
493
        self.prim_op_type = "prim"
W
wanghuancoder 已提交
494
        self.python_api = paddle.tanh
495
        self.public_python_api = paddle.tanh
496
        self.init_dtype()
497
        self.init_shape()
498
        self.if_enable_cinn()
499

500
        np.random.seed(1024)
501
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
502 503 504
        out = np.tanh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
505
        self.convert_input_output()
506 507

    def test_check_grad(self):
508 509
        if self.dtype == np.float16:
            return
510
        self.check_grad(['X'], 'Out', check_prim=True)
511

512
    def init_dtype(self):
513
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
514 515 516 517
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

518 519 520
    def if_enable_cinn(self):
        pass

521

522 523 524 525
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []

526 527 528
    def if_enable_cinn(self):
        self.enable_cinn = False

529

W
WangXi 已提交
530 531 532 533
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
534
        np.random.seed(1024)
W
WangXi 已提交
535
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
536 537 538
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
539
            else paddle.CPUPlace()
540
        )
541 542 543 544
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
545 546

    def test_static_api(self):
547
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
548
            with paddle.static.program_guard(paddle.static.Program()):
549
                x = paddle.static.data('X', [10, 12], self.dtype)
W
wanghuancoder 已提交
550 551 552 553 554 555 556 557
                out1 = self.tanh(x)
                th = paddle.nn.Tanh()
                out2 = th(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.tanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
558 559

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
560
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
561 562 563 564 565 566
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
567
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
568 569

    def test_errors(self):
570
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
571 572 573 574
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.tanh, 1)
                # The input dtype must be float16, float32.
575
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
576 577 578 579
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, self.tanh, x_int32)
                # support the input dtype is float16
580
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
581 582 583
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                self.tanh(x_fp16)
584 585 586 587 588 589


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
590 591


592
class TestAtan(TestActivation, TestParameter):
593 594
    def setUp(self):
        self.op_type = "atan"
W
wanghuancoder 已提交
595
        self.python_api = paddle.atan
596
        self.init_dtype()
597
        self.init_shape()
598

599
        np.random.seed(1024)
600
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
601 602 603 604
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
605
        self.convert_input_output()
606 607 608 609

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
610
        self.check_grad(['X'], 'Out')
611

W
WuHaobo 已提交
612
    def test_out_name(self):
613
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
614 615 616 617 618 619 620 621 622 623 624
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = paddle.atan(data, name='Y')
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = np.arctan(np_x)
                self.assertEqual(result, expected)
W
WuHaobo 已提交
625

626 627 628 629 630 631 632 633
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

634

635
class TestAtan_ZeroDim(TestAtan):
636 637 638 639
    def init_shape(self):
        self.shape = []


640 641 642
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
W
wanghuancoder 已提交
643
        self.python_api = paddle.sinh
644
        self.init_dtype()
645
        self.init_shape()
646

647
        np.random.seed(1024)
648
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
649 650 651 652
        out = np.sinh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

653 654
        self.convert_input_output()

655 656 657 658 659
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

660 661 662 663 664 665 666

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
667 668 669 670
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
671
            z = paddle.sinh(x).numpy()
672
            z_expected = np.sinh(np_x)
673
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
674 675

    def test_api(self):
676
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_sinh_out = paddle.sinh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_sinh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_sinh_out],
                )

            expected_res = np.sinh(input_x)
            np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
699 700 701 702

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
703 704 705
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
706 707
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
708
            loss = paddle.sinh(var)
709 710 711 712 713 714 715
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
716
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
717 718 719 720
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.sinh, 1)
                # The input dtype must be float16, float32, float64.
721
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
722 723 724 725
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.sinh, x_int32)
                # support the input dtype is float16
726
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
727 728 729
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.sinh(x_fp16)
730 731 732 733 734


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
W
wanghuancoder 已提交
735
        self.python_api = paddle.cosh
736
        self.init_dtype()
737
        self.init_shape()
738

739
        np.random.seed(1024)
740
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
741 742 743 744
        out = np.cosh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

745 746
        self.convert_input_output()

747 748 749 750 751
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

752 753 754 755 756 757 758

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
759 760 761 762
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
763
            z = paddle.cosh(x).numpy()
764
            z_expected = np.cosh(np_x)
765
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
766 767

    def test_api(self):
768
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_cosh_out = paddle.cosh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_cosh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_cosh_out],
                )

            expected_res = np.cosh(input_x)
            np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
791 792 793 794

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
795 796 797
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
798 799
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
800
            loss = paddle.cosh(var)
801 802 803 804 805 806 807
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
808
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
809 810 811 812
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.cosh, 1)
                # The input dtype must be float16, float32, float64.
813
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
814 815 816 817
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.cosh, x_int32)
                # support the input dtype is float16
818
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
819 820 821
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.cosh(x_fp16)
822 823


824 825 826 827 828 829
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
830 831
    def setUp(self):
        self.op_type = "tanh_shrink"
W
wanghuancoder 已提交
832
        self.python_api = paddle.nn.functional.tanhshrink
833
        self.init_dtype()
834
        self.init_shape()
835

836
        np.random.seed(1024)
837
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
838
        out = ref_tanhshrink(x)
839
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
840
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
841

842 843
        self.convert_input_output()

K
Kavya Srinet 已提交
844
    def test_check_grad(self):
845 846
        if self.dtype == np.float16:
            return
847
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
848

849

850 851 852 853 854
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


855 856 857
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
858
        np.random.seed(1024)
859
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
860 861 862
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
863
            else paddle.CPUPlace()
864
        )
865 866

    def test_static_api(self):
867
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
868
            with paddle.static.program_guard(paddle.static.Program()):
869
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
870 871 872 873 874 875 876 877
                out1 = F.tanhshrink(x)
                tanhshrink = paddle.nn.Tanhshrink()
                out2 = tanhshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_tanhshrink(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
878 879 880 881 882 883 884 885

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
886
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
887 888

    def test_errors(self):
889
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
890 891 892 893
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.tanhshrink, 1)
                # The input dtype must be float16, float32, float64.
894
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
895 896 897 898
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.tanhshrink, x_int32)
                # support the input dtype is float16
899
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
900 901 902
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.tanhshrink(x_fp16)
903 904


905 906 907 908 909 910
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
911
class TestHardShrink(TestActivation):
912 913
    def setUp(self):
        self.op_type = "hard_shrink"
W
wanghuancoder 已提交
914
        self.python_api = paddle.nn.functional.hardshrink
915
        self.init_dtype()
916
        self.init_shape()
917

918 919
        self.threshold = 0.5
        self.set_attrs()
920
        np.random.seed(1024)
921
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
922
        out = ref_hardshrink(x, self.threshold)
923 924
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
925

926
        self.attrs = {'threshold': self.threshold}
927 928

        self.convert_input_output()
929

930 931 932
    def init_shape(self):
        self.shape = [10, 12]

933 934 935
    def set_attrs(self):
        pass

936
    def test_check_grad(self):
937 938
        if self.dtype == np.float16:
            return
939
        self.check_grad(['X'], 'Out')
940 941


942 943 944 945 946
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


947 948 949 950 951 952 953 954
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


955 956 957
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
958
        np.random.seed(1024)
959
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
960 961 962
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
963
            else paddle.CPUPlace()
964
        )
965 966

    def test_static_api(self):
967
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
968
            with paddle.static.program_guard(paddle.static.Program()):
969
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
970 971 972 973 974 975 976 977
                out1 = F.hardshrink(x)
                hd = paddle.nn.Hardshrink()
                out2 = hd(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardshrink(self.x_np, 0.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
978 979

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
980
        x = paddle.to_tensor(self.x_np)
981 982 983 984 985
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
986
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
987 988 989 990 991 992

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
993
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
994

995
    def test_errors(self):
996
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
997 998 999 1000
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardshrink, 1)
                # The input dtype must be float16, float32, float64.
1001
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1002 1003 1004 1005
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardshrink, x_int32)
                # support the input dtype is float16
1006
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1007 1008 1009
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardshrink(x_fp16)
1010 1011


1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
1023
        np.random.seed(1024)
1024
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
1025 1026 1027
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1028
            else paddle.CPUPlace()
1029
        )
1030 1031

    def test_static_api(self):
1032
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1033
            with paddle.static.program_guard(paddle.static.Program()):
1034
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1035 1036 1037 1038 1039 1040 1041 1042
                out1 = F.hardtanh(x)
                m = paddle.nn.Hardtanh()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardtanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1043 1044

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
1045
        x = paddle.to_tensor(self.x_np)
1046 1047 1048 1049 1050
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
1051
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1052 1053 1054 1055 1056 1057

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
1058
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1059 1060

    def test_errors(self):
1061
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1062 1063 1064 1065
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardtanh, 1)
                # The input dtype must be float16, float32, float64.
1066
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1067 1068 1069 1070
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardtanh, x_int32)
                # support the input dtype is float16
1071
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1072 1073 1074
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardtanh(x_fp16)
1075 1076


1077 1078 1079
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1080 1081
        out - threshold
    )
1082 1083 1084 1085
    return out


class TestSoftshrink(TestActivation):
1086 1087
    def setUp(self):
        self.op_type = "softshrink"
1088
        self.python_api = paddle.nn.functional.softshrink
1089
        self.init_dtype()
1090
        self.init_shape()
1091

1092
        threshold = 0.8
1093

1094
        np.random.seed(1023)
1095
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1096
        out = ref_softshrink(x, threshold)
1097 1098

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
1099
        self.outputs = {'Out': out}
1100

1101 1102
        self.attrs = {"lambda": threshold}

1103
    def test_check_grad(self):
1104 1105
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1106
        self.check_grad(['X'], 'Out')
1107

1108

1109 1110 1111 1112 1113
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1114 1115 1116 1117
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1118
        np.random.seed(1024)
1119
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1120 1121 1122
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1123
            else paddle.CPUPlace()
1124
        )
1125 1126

    def test_static_api(self):
1127
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1128
            with paddle.static.program_guard(paddle.static.Program()):
1129
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
1130 1131 1132 1133 1134 1135 1136 1137
                out1 = F.softshrink(x, self.threshold)
                softshrink = paddle.nn.Softshrink(self.threshold)
                out2 = softshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softshrink(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1138 1139 1140 1141 1142 1143 1144 1145

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1146
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1147

1148
    def test_errors(self):
1149
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1150 1151 1152 1153
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softshrink, 1)
                # The input dtype must be float16, float32, float64.
1154
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1155 1156 1157 1158
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softshrink, x_int32)
                # The threshold must be no less than zero
1159
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
1160 1161 1162 1163
                    name='x_fp32', shape=[12, 10], dtype='float32'
                )
                self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
                # support the input dtype is float16
1164
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1165 1166 1167
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softshrink(x_fp16)
1168 1169


1170
class TestSqrt(TestActivation, TestParameter):
1171 1172
    def setUp(self):
        self.op_type = "sqrt"
1173
        self.prim_op_type = "prim"
1174
        self.python_api = paddle.sqrt
1175 1176
        self.public_python_api = paddle.sqrt

1177
        self.init_dtype()
1178
        self.init_shape()
1179
        self.if_enable_cinn()
1180

1181
        np.random.seed(1023)
1182
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1183 1184 1185 1186
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1187
        self.convert_input_output()
1188

1189 1190 1191
    def if_enable_cinn(self):
        pass

1192
    def test_check_grad(self):
1193 1194
        if self.dtype == np.float16:
            return
1195
        self.check_grad(['X'], 'Out', check_prim=True)
1196 1197

    def test_check_output(self):
W
wanghuancoder 已提交
1198
        self.check_output()
1199

1200

1201 1202 1203 1204 1205
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
1206
        self.public_python_api = paddle.sqrt
1207 1208
        self.init_dtype()
        self.init_shape()
1209
        self.if_enable_cinn()
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1220
        self.check_grad(['X'], 'Out', check_prim=True)
1221 1222

    def test_check_output(self):
W
wanghuancoder 已提交
1223
        self.check_output()
1224 1225 1226 1227

    def init_dtype(self):
        self.dtype = np.float32

1228 1229 1230
    def if_enable_cinn(self):
        pass

1231

1232 1233 1234 1235
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []

1236
    def if_enable_cinn(self):
1237
        self.enable_cinn = False
1238 1239


1240
@unittest.skipIf(
R
ronnywang 已提交
1241 1242
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
1243
)
1244 1245 1246
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1247
        self.prim_op_type = "prim"
1248
        self.python_api = paddle.sqrt
1249
        self.public_python_api = paddle.sqrt
1250
        self.init_dtype()
1251
        self.init_shape()
1252
        self.if_enable_cinn()
1253 1254

        np.random.seed(1023)
1255
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1266 1267 1268
    def init_shape(self):
        self.shape = [11, 17]

1269 1270 1271
    def if_enable_cinn(self):
        self.enable_cinn = False

1272 1273
    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1274
        self.check_output_with_place(place)
1275 1276 1277

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1278
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
1279 1280


M
mhy-666 已提交
1281 1282 1283 1284 1285
class TestSqrtComp(TestActivation, TestParameter):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1286
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1287 1288
        self.init_dtype()
        self.init_shape()
1289
        self.if_enable_cinn()
M
mhy-666 已提交
1290 1291 1292 1293 1294 1295 1296

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1297
        self.convert_input_output()
1298 1299 1300

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)


class TestSqrtCompFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1316
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1317 1318
        self.init_dtype()
        self.init_shape()
1319
        self.if_enable_cinn()
M
mhy-666 已提交
1320 1321 1322 1323 1324 1325
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1326 1327 1328

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32


Z
zhoukunsheng 已提交
1342 1343 1344
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
1345
        self.prim_op_type = "comp"
Z
zyfncg 已提交
1346
        self.python_api = paddle.rsqrt
1347
        self.public_python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1348
        self.init_dtype()
1349
        self.init_shape()
1350
        self.if_enable_cinn()
Z
zhoukunsheng 已提交
1351

1352
        np.random.seed(1024)
1353
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
Z
zhoukunsheng 已提交
1354 1355 1356 1357
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1358
        self.convert_input_output()
Z
zhoukunsheng 已提交
1359

1360 1361 1362
    def init_shape(self):
        self.shape = [10, 12]

1363 1364 1365
    def if_enable_cinn(self):
        pass

1366 1367 1368
    def test_check_output(self):
        self.check_output(check_prim=True)

Z
zhoukunsheng 已提交
1369 1370 1371
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1372 1373 1374 1375 1376 1377
        self.check_grad(
            ['X'],
            'Out',
            max_relative_error=0.0005,
            check_prim=True,
        )
Z
zhoukunsheng 已提交
1378 1379


1380 1381 1382
class TestRsqrt_ZeroDim(TestRsqrt):
    def init_shape(self):
        self.shape = []
1383 1384 1385

    def if_enable_cinn(self):
        self.enable_cinn = False
1386 1387


C
chengduo 已提交
1388
class TestAbs(TestActivation):
1389 1390
    def setUp(self):
        self.op_type = "abs"
1391 1392
        self.prim_op_type = "prim"
        self.python_api = paddle.abs
1393
        self.public_python_api = paddle.abs
1394
        self.init_dtype()
1395
        self.init_shape()
1396
        self.if_enable_cinn()
1397

1398
        np.random.seed(1024)
1399
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1400
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1401
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1402
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1403 1404
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1405 1406 1407 1408
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1409
        self.convert_input_output()
1410

1411 1412 1413
    def init_shape(self):
        self.shape = [4, 25]

1414 1415 1416
    def if_enable_cinn(self):
        pass

1417
    def test_check_grad(self):
1418 1419
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1420
        self.check_grad(['X'], 'Out', check_prim=True)
1421

1422

1423 1424 1425 1426
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []

1427 1428 1429
    def if_enable_cinn(self):
        self.enable_cinn = False

1430

C
chengduo 已提交
1431
class TestCeil(TestActivation):
D
dzhwinter 已提交
1432 1433
    def setUp(self):
        self.op_type = "ceil"
1434
        self.python_api = paddle.ceil
1435
        self.init_dtype()
1436
        self.init_shape()
1437

1438
        np.random.seed(1024)
1439
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1440 1441 1442 1443
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1444
        self.convert_input_output()
D
dzhwinter 已提交
1445

1446 1447 1448
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1449
    # The same reason with TestFloor
C
chengduo 已提交
1450
    def test_check_grad(self):
1451 1452 1453
        pass


1454 1455 1456 1457 1458
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1459
class TestFloor(TestActivation):
D
dzhwinter 已提交
1460 1461
    def setUp(self):
        self.op_type = "floor"
1462
        self.prim_op_type = "prim"
1463
        self.python_api = paddle.floor
1464
        self.public_python_api = paddle.floor
1465
        self.init_dtype()
1466
        self.init_shape()
1467
        self.if_enable_cinn()
1468

1469
        np.random.seed(1024)
1470
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1471 1472 1473 1474
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1475
        self.convert_input_output()
D
dzhwinter 已提交
1476

1477 1478 1479
    def init_shape(self):
        self.shape = [10, 12]

1480 1481 1482
    def if_enable_cinn(self):
        pass

D
dzhwinter 已提交
1483
    # the gradient on floor, ceil, round is undefined.
1484
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1485 1486
    # The same reason with TestFloor
    def test_check_grad(self):
1487 1488
        pass

1489
    def test_check_grad_for_prim(self):
1490 1491 1492 1493
        # the gradient on floor, ceil, round is undefined.
        # we return zero as gradient, but the numpy return nan.
        # for prim, we compare result with eager python api,
        # so, we use only_prim flag to express we only test prim.
1494 1495 1496 1497 1498 1499 1500 1501
        if core.is_compiled_with_cuda():
            self.check_grad_with_place(
                paddle.CUDAPlace(0),
                ['X'],
                'Out',
                check_prim=True,
                only_check_prim=True,
            )
1502 1503


1504
class TestFloor_ZeroDim(TestFloor):
1505 1506 1507
    def init_shape(self):
        self.shape = []

1508 1509
    def if_enable_cinn(self):
        self.enable_cinn = False
1510 1511


C
chengduo 已提交
1512
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1513 1514
    def setUp(self):
        self.op_type = "cos"
W
wanghuancoder 已提交
1515
        self.python_api = paddle.cos
1516 1517
        self.public_python_api = paddle.cos
        self.prim_op_type = "prim"
1518
        self.init_dtype()
1519
        self.init_shape()
1520 1521
        # prim not support now
        self.enable_cinn = False
1522

1523
        np.random.seed(1024)
1524
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1525 1526 1527
        out = np.cos(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1528
        self.convert_input_output()
C
add sin  
chengduoZH 已提交
1529

1530 1531 1532
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1533
    def test_check_grad(self):
1534 1535
        if self.dtype == np.float16:
            return
1536
        self.check_grad(['X'], 'Out', check_prim=True)
C
add sin  
chengduoZH 已提交
1537

1538

1539 1540 1541 1542 1543
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1544 1545 1546 1547
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
W
wanghuancoder 已提交
1548
        self.python_api = paddle.tan
J
joejiong 已提交
1549
        self.init_dtype()
1550 1551
        self.init_shape()

J
joejiong 已提交
1552
        self.dtype = 'float32'
1553
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1554 1555 1556
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1557
            else paddle.CPUPlace()
1558
        )
J
joejiong 已提交
1559 1560 1561 1562 1563

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}
1564
        self.convert_input_output()
J
joejiong 已提交
1565

1566 1567 1568
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1569 1570 1571 1572 1573
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1585 1586 1587
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1588
            else paddle.CPUPlace()
1589
        )
1590

J
joejiong 已提交
1591 1592 1593 1594
    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1595
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1596 1597

    def test_static_api(self):
1598
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1599 1600 1601 1602 1603 1604 1605
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', [11, 17], self.dtype)
                out = paddle.tan(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = np.tan(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1606 1607 1608 1609

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1610 1611 1612
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1613 1614 1615 1616 1617 1618 1619 1620
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1621 1622 1623
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
W
wanghuancoder 已提交
1624
        self.python_api = paddle.acos
1625
        self.init_dtype()
1626
        self.init_shape()
1627

1628
        np.random.seed(1024)
1629
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1630 1631 1632 1633
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1634
        self.convert_input_output()
1635

1636 1637 1638
    def init_shape(self):
        self.shape = [10, 12]

1639 1640 1641
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1642
        self.check_grad(['X'], 'Out')
1643 1644


1645 1646 1647 1648 1649
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1650
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1651 1652
    def setUp(self):
        self.op_type = "sin"
W
wanghuancoder 已提交
1653
        self.python_api = paddle.sin
1654 1655
        self.public_python_api = paddle.sin
        self.prim_op_type = "prim"
1656
        self.init_dtype()
1657
        self.init_shape()
1658 1659
        # prim not support now
        self.enable_cinn = False
1660

1661
        np.random.seed(1024)
1662
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1663 1664 1665
        out = np.sin(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1666
        self.convert_input_output()
C
add cos  
chengduoZH 已提交
1667

1668 1669 1670
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1671
    def test_check_grad(self):
1672 1673
        if self.dtype == np.float16:
            return
1674
        self.check_grad(['X'], 'Out', check_prim=True)
C
add cos  
chengduoZH 已提交
1675 1676


1677 1678 1679 1680 1681
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1682 1683 1684
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
W
wanghuancoder 已提交
1685
        self.python_api = paddle.asin
1686
        self.init_dtype()
1687
        self.init_shape()
1688

1689
        np.random.seed(2048)
1690
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1691 1692 1693 1694
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1695
        self.convert_input_output()
1696

1697 1698 1699
    def init_shape(self):
        self.shape = [10, 12]

1700 1701 1702
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1703
        self.check_grad(['X'], 'Out')
1704 1705


1706 1707 1708 1709 1710
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1711 1712 1713
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
W
wanghuancoder 已提交
1714
        self.python_api = paddle.acosh
X
xiaoting 已提交
1715
        self.init_dtype()
1716
        self.init_shape()
X
xiaoting 已提交
1717 1718

        np.random.seed(1024)
1719
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1720 1721 1722 1723
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1724
        self.convert_input_output()
X
xiaoting 已提交
1725

1726 1727 1728
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1729 1730 1731 1732 1733 1734
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1735 1736 1737 1738 1739
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1740 1741 1742
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
W
wanghuancoder 已提交
1743
        self.python_api = paddle.asinh
X
xiaoting 已提交
1744
        self.init_dtype()
1745
        self.init_shape()
X
xiaoting 已提交
1746 1747

        np.random.seed(1024)
1748
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1749 1750 1751 1752
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1753
        self.convert_input_output()
X
xiaoting 已提交
1754

1755 1756 1757
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1758 1759 1760 1761 1762 1763
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1764 1765 1766 1767 1768
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1769 1770 1771
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
W
wanghuancoder 已提交
1772
        self.python_api = paddle.atanh
X
xiaoting 已提交
1773
        self.init_dtype()
1774
        self.init_shape()
X
xiaoting 已提交
1775 1776

        np.random.seed(400)
1777
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1778 1779 1780 1781
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1782
        self.convert_input_output()
X
xiaoting 已提交
1783

1784 1785 1786
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1787 1788 1789 1790 1791 1792
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1793 1794 1795 1796 1797
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1798
class TestRound(TestActivation):
D
dzhwinter 已提交
1799 1800
    def setUp(self):
        self.op_type = "round"
1801
        self.python_api = paddle.round
1802
        self.init_dtype()
1803
        self.init_shape()
1804

1805
        np.random.seed(1024)
1806
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1807 1808 1809 1810
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1811
        self.convert_input_output()
D
dzhwinter 已提交
1812

1813 1814 1815
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1816
    def test_check_grad(self):
1817 1818 1819
        pass


1820 1821 1822 1823 1824
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1825
class TestRelu(TestActivation):
1826
    def setUp(self):
Q
qijun 已提交
1827
        self.op_type = "relu"
K
Kang Zhao 已提交
1828 1829
        self.python_api = paddle.nn.functional.relu
        self.prim_op_type = "comp"
1830
        self.public_python_api = paddle.nn.functional.relu
K
Kexin Zhao 已提交
1831
        self.init_dtype()
1832
        self.init_shape()
1833
        self.if_enable_cinn()
K
Kexin Zhao 已提交
1834

1835
        np.random.seed(1024)
1836 1837 1838 1839 1840
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0)
        self.inputs = {'X': x}
K
Kexin Zhao 已提交
1841 1842

        self.outputs = {'Out': out}
1843
        self.convert_input_output()
1844 1845

    def test_check_grad(self):
K
Kexin Zhao 已提交
1846 1847
        if self.dtype == np.float16:
            return
K
Kang Zhao 已提交
1848 1849 1850 1851 1852
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        self.check_output(check_prim=True)

1853 1854
    def if_enable_cinn(self):
        pass
A
Adam 已提交
1855 1856


1857 1858 1859 1860
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []

1861
    def if_enable_cinn(self):
K
Kang Zhao 已提交
1862 1863
        self.enable_cinn = False

1864

1865 1866 1867
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1868
        np.random.seed(1024)
1869
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1870 1871 1872
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1873
            else paddle.CPUPlace()
1874
        )
1875 1876 1877 1878
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1879 1880

    def test_static_api(self):
1881
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1882
            with paddle.static.program_guard(paddle.static.Program()):
1883
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1884 1885 1886 1887 1888 1889 1890 1891
                out1 = self.relu(x)
                m = paddle.nn.ReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.maximum(self.x_np, 0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1892 1893 1894 1895

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1896 1897
        out1 = m(x)
        out2 = self.relu(x)
1898 1899
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1900
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1901

1902
    def test_errors(self):
1903 1904
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1905 1906 1907 1908
                with paddle.static.program_guard(paddle.static.Program()):
                    # The input type must be Variable.
                    self.assertRaises(TypeError, self.relu, 1)
                    # The input dtype must be float16, float32, float64.
1909
                    x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1910 1911 1912 1913
                        name='x_int32', shape=[10, 12], dtype='int32'
                    )
                    self.assertRaises(TypeError, self.relu, x_int32)
                    # support the input dtype is float16
1914
                    x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1915 1916 1917
                        name='x_fp16', shape=[10, 12], dtype='float16'
                    )
                    self.relu(x_fp16)
1918 1919 1920 1921 1922 1923


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1924 1925


1926 1927 1928 1929 1930 1931
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1932
class TestLeakyRelu(TestActivation):
1933 1934 1935
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1936 1937
    def setUp(self):
        self.op_type = "leaky_relu"
W
wanghuancoder 已提交
1938
        self.python_api = paddle.nn.functional.leaky_relu
1939 1940
        self.public_python_api = paddle.nn.functional.leaky_relu
        self.prim_op_type = "comp"
A
Adam 已提交
1941
        self.init_dtype()
1942
        self.init_shape()
1943
        self.if_enable_cinn()
1944
        alpha = self.get_alpha()
A
Adam 已提交
1945

1946
        np.random.seed(1024)
1947
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1948
        # The same reason with TestAbs
1949 1950
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1951

1952
        self.inputs = {'X': x}
A
Adam 已提交
1953
        self.outputs = {'Out': out}
1954
        self.attrs = {'alpha': alpha}
1955
        self.convert_input_output()
A
Adam 已提交
1956

1957 1958 1959
    def if_enable_cinn(self):
        pass

1960 1961 1962
    def test_check_output(self):
        self.check_output(check_prim=True)

A
Adam 已提交
1963 1964 1965
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1966
        self.check_grad(['X'], 'Out', check_prim=True)
1967 1968


1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1984 1985 1986 1987
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []

1988
    def if_enable_cinn(self):
1989 1990
        self.enable_cinn = False

1991

1992 1993 1994
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1995
        np.random.seed(1024)
1996
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1997 1998 1999
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2000
            else paddle.CPUPlace()
2001
        )
2002 2003

    def test_static_api(self):
2004
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2005
            with paddle.static.program_guard(paddle.static.Program()):
2006
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2007 2008 2009 2010 2011 2012 2013 2014
                out1 = F.leaky_relu(x)
                m = paddle.nn.LeakyReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_leaky_relu(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2015 2016

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
2017
        x = paddle.to_tensor(self.x_np)
2018 2019 2020 2021 2022
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
2023
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2024 2025 2026 2027 2028 2029

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
2030
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2031

2032
    def test_errors(self):
2033
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2034 2035 2036 2037
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.leaky_relu, 1)
                # The input dtype must be float16, float32, float64.
2038
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2039 2040 2041 2042
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.leaky_relu, x_int32)
                # support the input dtype is float16
2043
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2044 2045 2046
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.leaky_relu(x_fp16)
2047 2048


2049 2050
def gelu(x, approximate):
    if approximate:
2051 2052 2053 2054 2055 2056 2057 2058
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
2059 2060 2061 2062 2063 2064
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
2065 2066
    def setUp(self):
        self.op_type = "gelu"
2067 2068
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2069
        self.public_python_api = paddle.nn.functional.gelu
C
Clementine 已提交
2070
        self.init_dtype()
2071
        self.init_shape()
2072
        approximate = True
2073
        np.random.seed(1024)
2074
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2075
        out = gelu(x, approximate)
C
Clementine 已提交
2076

2077
        self.inputs = {'X': x}
2078 2079 2080
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

2081 2082 2083 2084
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu device, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2085 2086 2087
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
cxxly 已提交
2088

2089 2090 2091
    def test_check_output(self):
        self.check_output(check_prim=True)

2092 2093 2094
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2095
        self.check_grad(['X'], 'Out', check_prim=True)
2096 2097 2098 2099 2100


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
2101 2102
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2103
        self.public_python_api = paddle.nn.functional.gelu
2104
        self.init_dtype()
2105
        self.init_shape()
2106
        approximate = False
2107
        np.random.seed(2048)
2108
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2109
        out = gelu(x, approximate)
2110
        self.if_enable_cinn()
C
Clementine 已提交
2111

2112
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
C
Clementine 已提交
2113
        self.outputs = {'Out': out}
2114
        self.convert_input_output()
2115
        self.attrs = {"approximate": approximate}
2116 2117 2118 2119
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2120 2121 2122
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
Clementine 已提交
2123

2124
    def if_enable_cinn(self):
2125
        pass
2126 2127 2128 2129

    def test_check_output(self):
        self.check_output(check_prim=True)

C
Clementine 已提交
2130 2131 2132
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2133
        self.check_grad(['X'], 'Out', check_prim=True)
C
Clementine 已提交
2134 2135


2136 2137 2138 2139
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []

2140 2141 2142
    def if_enable_cinn(self):
        self.enable_cinn = False

2143

2144 2145 2146
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
2147
        np.random.seed(1024)
2148
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
2149 2150 2151
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2152
            else paddle.CPUPlace()
2153
        )
C
cxxly 已提交
2154 2155
        self.enable_cinn = False

2156 2157 2158 2159
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2160 2161

    def test_static_api(self):
2162
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2163
            with paddle.static.program_guard(paddle.static.Program()):
2164
                x = paddle.static.data('X', [11, 17], dtype="float32")
W
wanghuancoder 已提交
2165 2166 2167 2168 2169 2170 2171 2172
                out1 = F.gelu(x)
                m = paddle.nn.GELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = gelu(self.x_np, False)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2173 2174 2175 2176 2177 2178 2179 2180

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
2181
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2182 2183 2184 2185 2186 2187

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
2188
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2189 2190

    def test_errors(self):
2191
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2192 2193 2194 2195
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.gelu, 1)
                # The input dtype must be float16, float32, float64.
2196
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2197 2198 2199 2200
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.gelu, x_int32)
                # support the input dtype is float16
2201
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2202 2203 2204
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.gelu(x_fp16)
2205 2206


C
chengduo 已提交
2207
class TestBRelu(TestActivation):
2208 2209
    def setUp(self):
        self.op_type = "brelu"
W
wanghuancoder 已提交
2210
        self.python_api = paddle.nn.functional.hardtanh
2211 2212
        self.init_dtype()

2213
        np.random.seed(1024)
Z
zhupengyang 已提交
2214
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2215 2216
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2217 2218
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2219
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2220 2221 2222
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2223 2224

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
F
fengjiayi 已提交
2225
        self.outputs = {'Out': t}
2226 2227
        self.convert_input_output()
        self.attrs = {'t_min': t_min, 't_max': t_max}
2228 2229

    def test_check_grad(self):
2230 2231
        if self.dtype == np.float16:
            return
2232
        self.check_grad(['X'], 'Out')
2233

2234

2235 2236 2237 2238 2239 2240 2241
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2242
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2243
    def setUp(self):
2244
        self.op_type = "relu6"
2245
        self.init_dtype()
2246
        self.init_shape()
2247
        self.python_api = paddle.nn.functional.relu6
2248

2249
        np.random.seed(1024)
2250
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2251
        x[np.abs(x) < 0.005] = 0.02
2252
        out = ref_relu6(x)
2253

2254
        self.attrs = {'threshold': 6.0}
2255 2256

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2257
        self.outputs = {'Out': out}
2258
        self.convert_input_output()
K
Kavya Srinet 已提交
2259

2260 2261 2262
    def init_shape(self):
        self.shape = [10, 12]

2263 2264 2265
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2266
        self.check_grad(['X'], 'Out')
2267 2268


2269 2270 2271 2272 2273
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2274 2275 2276
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2277
        np.random.seed(1024)
2278 2279
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2280 2281 2282
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2283
            else paddle.CPUPlace()
2284
        )
2285 2286

    def test_static_api(self):
2287
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2288
            with paddle.static.program_guard(paddle.static.Program()):
2289
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2290 2291 2292 2293 2294 2295 2296 2297
                out1 = F.relu6(x)
                relu6 = paddle.nn.ReLU6()
                out2 = relu6(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_relu6(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2298 2299 2300 2301 2302 2303 2304 2305

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2306
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2307 2308

    def test_fluid_api(self):
2309
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2310
            with fluid.program_guard(fluid.Program()):
2311
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2312 2313 2314 2315 2316
                out = paddle.nn.functional.relu6(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_relu6(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2317

2318
    def test_errors(self):
2319
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2320 2321 2322 2323
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.relu6, 1)
                # The input dtype must be float16, float32, float64.
2324
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2325 2326 2327 2328
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.relu6, x_int32)
                # support the input dtype is float16
2329
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2330 2331 2332
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.relu6(x_fp16)
2333 2334


2335 2336
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
2337
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
            with warnings.catch_warnings(record=True) as context:
                warnings.simplefilter("always")

                helper = LayerHelper("relu6")
                data = paddle.static.data(
                    name='data', shape=[None, 3, 32, 32], dtype='float32'
                )
                out = helper.create_variable_for_type_inference(
                    dtype=data.dtype
                )
                os.environ['FLAGS_print_extra_attrs'] = "1"
                helper.append_op(
                    type="relu6",
                    inputs={'X': data},
                    outputs={'Out': out},
                    attrs={'threshold': 6.0},
                )
                self.assertTrue(
                    "op relu6 use extra_attr: threshold"
                    in str(context[-1].message)
                )
                os.environ['FLAGS_print_extra_attrs'] = "0"
2360 2361


2362
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2363 2364 2365 2366
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2367 2368 2369
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2370 2371


H
huangjun12 已提交
2372 2373 2374 2375
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2376
        self.init_shape()
R
Roc 已提交
2377
        self.prim_op_type = "comp"
2378
        self.python_api = paddle.nn.functional.hardswish
2379
        self.public_python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2380

2381
        np.random.seed(1024)
2382
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2383 2384 2385
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2386
        # the same with TestAbs
H
huangjun12 已提交
2387 2388
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2389
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2390

2391
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
H
huangjun12 已提交
2392
        self.outputs = {'Out': out}
2393 2394
        self.convert_input_output()
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
R
Roc 已提交
2395
        self.enable_cinn = False
H
huangjun12 已提交
2396

2397 2398 2399
    def init_shape(self):
        self.shape = [10, 12]

2400 2401 2402
    def if_only_check_prim(self):
        return False

H
huangjun12 已提交
2403
    def test_check_grad(self):
2404 2405 2406 2407 2408 2409
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=self.if_only_check_prim(),
        )
2410 2411

    def test_check_output(self):
W
wanghuancoder 已提交
2412
        self.check_output(check_prim=True)
H
huangjun12 已提交
2413 2414


2415
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2416 2417 2418 2419 2420 2421 2422 2423
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

    def init_shape(self):
        self.shape = []


2424 2425 2426 2427
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2428 2429 2430
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2431
            else paddle.CPUPlace()
2432
        )
2433 2434

    def test_static_api(self):
2435
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2436
            with paddle.static.program_guard(paddle.static.Program()):
2437
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2438 2439 2440 2441 2442 2443 2444 2445
                out1 = F.hardswish(x)
                m = paddle.nn.Hardswish()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardswish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2446 2447

    def test_dygraph_api(self):
2448
        x = paddle.to_tensor([11648.0, 11448.0])
2449 2450 2451
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2452
        out_ref = [11648.0, 11448.0]
2453
        for r in [out1, out2]:
2454
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2455 2456

    def test_fluid_api(self):
2457
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2458
            with fluid.program_guard(fluid.Program()):
2459
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2460 2461 2462 2463 2464 2465
                out = paddle.nn.functional.hardswish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardswish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

2466
        x = paddle.to_tensor(self.x_np)
2467
        out = paddle.nn.functional.hardswish(x)
2468
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2469 2470

    def test_errors(self):
2471
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2472 2473 2474 2475
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardswish, 1)
                # The input dtype must be float16, float32, float64.
2476
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2477 2478 2479 2480
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardswish, x_int32)
                # support the input dtype is float16
2481
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2482 2483 2484
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardswish(x_fp16)
2485 2486


C
chengduo 已提交
2487
class TestSoftRelu(TestActivation):
2488 2489
    def setUp(self):
        self.op_type = "soft_relu"
2490 2491
        self.init_dtype()

2492
        np.random.seed(4096)
2493
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2494
        threshold = 2.0
Q
qijun 已提交
2495 2496
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2497
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2498 2499 2500
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2501
        out = np.log(np.exp(t) + 1)
2502 2503 2504

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2505 2506
        self.convert_input_output()
        self.attrs = {'threshold': threshold}
2507

2508 2509 2510
    def test_check_output(self):
        self.check_output(check_dygraph=False)

2511
    def test_check_grad(self):
2512 2513
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2514 2515 2516
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.02, check_dygraph=False
        )
2517

2518

2519
def elu(x, alpha):
Z
zhupengyang 已提交
2520
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2521 2522 2523
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2524
class TestELU(TestActivation):
2525 2526
    def setUp(self):
        self.op_type = "elu"
2527
        self.init_dtype()
2528
        self.init_shape()
W
wanghuancoder 已提交
2529
        self.python_api = paddle.nn.functional.elu
2530

2531
        np.random.seed(1024)
2532
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2533
        alpha = self.get_alpha()
2534
        out = elu(x, alpha)
2535 2536
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
2537 2538

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2539
        self.outputs = {'Out': out}
2540 2541
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2542

2543 2544 2545
    def init_shape(self):
        self.shape = [10, 12]

2546
    def test_check_grad(self):
2547 2548
        if self.dtype == np.float16:
            return
2549
        self.check_grad(['X'], 'Out')
2550

Z
zhupengyang 已提交
2551
    def get_alpha(self):
2552
        return 1.0
Z
zhupengyang 已提交
2553 2554 2555 2556 2557 2558


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2559

2560 2561 2562 2563 2564
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2565 2566 2567
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2568
        np.random.seed(1024)
2569
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2570 2571 2572
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2573
            else paddle.CPUPlace()
2574
        )
2575 2576 2577 2578
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2579 2580

    def test_static_api(self):
2581
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2582
            with paddle.static.program_guard(paddle.static.Program()):
2583
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2584 2585 2586 2587 2588 2589 2590 2591
                out1 = self.elu(x)
                m = paddle.nn.ELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = elu(self.x_np, 1.0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2592 2593 2594

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
2595 2596
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2597 2598 2599 2600
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2601
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2602

2603 2604
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2605 2606 2607 2608
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2609
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2610

2611
    def test_errors(self):
2612
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2613 2614 2615 2616
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.elu, 1)
                # The input dtype must be float16, float32, float64.
2617
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2618 2619 2620 2621
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.elu, x_int32)
                # support the input dtype is float16
2622
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2623 2624 2625
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.elu(x_fp16)
2626 2627


Z
zhupengyang 已提交
2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)


2638 2639 2640 2641 2642 2643 2644 2645 2646
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2647
        self.init_shape()
2648

2649
        self.python_api = paddle.nn.functional.celu
2650
        np.random.seed(1024)
2651
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2652 2653
        alpha = 1.5
        out = celu(x, alpha)
2654 2655

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2656
        self.outputs = {'Out': out}
2657 2658
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2659

2660 2661 2662
    def init_shape(self):
        self.shape = [10, 12]

2663 2664 2665
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2666
        self.check_grad(['X'], 'Out')
2667 2668


2669 2670 2671 2672 2673
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2674 2675 2676 2677 2678
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2679 2680 2681
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2682
            else paddle.CPUPlace()
2683
        )
2684 2685 2686 2687 2688 2689
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
2690
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2691
            with paddle.static.program_guard(paddle.static.Program()):
2692
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2693 2694 2695 2696 2697 2698 2699 2700
                out1 = self.celu(x, 1.5)
                m = paddle.nn.CELU(1.5)
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = celu(self.x_np, 1.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2701 2702 2703 2704 2705 2706 2707 2708 2709

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2710
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2711 2712 2713 2714 2715 2716 2717

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2718
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2719 2720

    def test_errors(self):
2721
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2722 2723 2724 2725
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.celu, 1)
                # The input dtype must be float16, float32, float64.
2726
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2727 2728 2729 2730
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.celu, x_int32)
                # The alpha must be not equal 0
2731
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
2732 2733 2734 2735
                    name='x_fp32', shape=[10, 12], dtype='float32'
                )
                self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
                # support the input dtype is float16
2736
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2737 2738 2739
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.celu(x_fp16)
2740 2741


C
chengduo 已提交
2742
class TestReciprocal(TestActivation):
Q
qijun 已提交
2743 2744
    def setUp(self):
        self.op_type = "reciprocal"
2745
        self.python_api = paddle.reciprocal
2746
        self.init_dtype()
2747
        self.init_shape()
2748

2749
        np.random.seed(1024)
2750
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2751 2752 2753 2754
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2755
        self.convert_input_output()
Q
qijun 已提交
2756 2757

    def test_check_grad(self):
2758 2759
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2760
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
2761 2762

    def test_check_output(self):
W
wanghuancoder 已提交
2763
        self.check_output()
Q
qijun 已提交
2764 2765


2766 2767 2768 2769 2770
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2771
class TestLog(TestActivation):
Q
qijun 已提交
2772 2773
    def setUp(self):
        self.op_type = "log"
2774
        self.prim_op_type = "prim"
2775
        self.python_api = paddle.log
2776
        self.public_python_api = paddle.log
2777
        self.init_dtype()
2778
        self.init_shape()
2779
        self.if_enable_cinn()
2780

2781
        np.random.seed(1024)
2782
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2783 2784 2785 2786
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2787
        self.convert_input_output()
Q
qijun 已提交
2788

2789 2790 2791
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
2792
    def test_check_grad(self):
2793 2794
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2795
        self.check_grad(['X'], 'Out', check_prim=True)
Q
qijun 已提交
2796

2797
    def test_error(self):
2798 2799
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2800 2801 2802 2803 2804 2805
                in1 = paddle.static.data(
                    name="in1", shape=[11, 17], dtype="int32"
                )
                in2 = paddle.static.data(
                    name="in2", shape=[11, 17], dtype="int64"
                )
2806

W
wanghuancoder 已提交
2807 2808
                self.assertRaises(TypeError, paddle.log, in1)
                self.assertRaises(TypeError, paddle.log, in2)
2809

2810

2811 2812
class Test_Log_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2813
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2824 2825


2826 2827 2828 2829
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []

2830 2831 2832
    def if_enable_cinn(self):
        self.enable_cinn = False

2833

J
joejiong 已提交
2834 2835 2836
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2837
        self.python_api = paddle.log2
J
joejiong 已提交
2838
        self.init_dtype()
2839
        self.init_shape()
J
joejiong 已提交
2840

2841
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2842 2843 2844 2845
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2846
        self.convert_input_output()
J
joejiong 已提交
2847 2848 2849 2850

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2851
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2852 2853

    def test_error(self):
2854
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2855 2856
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2857

W
wanghuancoder 已提交
2858 2859
            self.assertRaises(TypeError, paddle.log2, in1)
            self.assertRaises(TypeError, paddle.log2, in2)
J
joejiong 已提交
2860 2861

    def test_api(self):
2862
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log2(data_x)
                exe = paddle.static.Executor(place=fluid.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log2(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2881 2882 2883 2884 2885 2886 2887 2888

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2889
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2890 2891


2892 2893 2894 2895 2896
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2897 2898 2899
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2900
        self.python_api = paddle.log10
J
joejiong 已提交
2901
        self.init_dtype()
2902
        self.init_shape()
J
joejiong 已提交
2903

2904
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2905 2906 2907 2908
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2909
        self.convert_input_output()
J
joejiong 已提交
2910 2911 2912 2913

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2914
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2915

2916 2917 2918 2919 2920 2921 2922

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2923
    def test_error(self):
2924
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2925 2926
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2927

W
wanghuancoder 已提交
2928 2929
            self.assertRaises(TypeError, paddle.log10, in1)
            self.assertRaises(TypeError, paddle.log10, in2)
J
joejiong 已提交
2930 2931

    def test_api(self):
2932
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log10(data_x)
                exe = paddle.static.Executor(place=paddle.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log10(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2951 2952 2953 2954 2955 2956 2957 2958

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2959
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2960 2961


2962 2963 2964
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2965
        self.python_api = paddle.log1p
2966
        self.init_dtype()
2967
        self.init_shape()
2968

2969
        np.random.seed(1024)
2970
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2971 2972 2973 2974
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2975
        self.convert_input_output()
2976 2977 2978 2979

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2980
        self.check_grad(['X'], 'Out')
2981

2982

2983 2984
class Test_Log1p_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2985
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log1p(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2996 2997


2998 2999 3000 3001 3002 3003
class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
3004
    def test_api(self):
3005
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x",
                    shape=[11, 17],
                    dtype="float64",
                )

                out1 = paddle.log1p(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (res1,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log1p(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
3024 3025 3026 3027 3028 3029 3030 3031

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
3032
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
3033 3034


C
chengduo 已提交
3035
class TestSquare(TestActivation):
Q
qijun 已提交
3036 3037
    def setUp(self):
        self.op_type = "square"
3038
        self.python_api = paddle.square
3039
        self.init_dtype()
3040
        self.init_shape()
3041

3042
        np.random.seed(1024)
3043
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3044 3045 3046 3047
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3048
        self.convert_input_output()
Q
qijun 已提交
3049 3050

    def test_check_grad(self):
3051 3052
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3053
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
3054 3055

    def test_check_output(self):
W
wanghuancoder 已提交
3056
        self.check_output()
Q
qijun 已提交
3057

3058

3059 3060 3061 3062 3063
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


3064
@unittest.skipIf(
R
ronnywang 已提交
3065 3066
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3067
)
3068 3069 3070
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
3071
        self.python_api = paddle.square
3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3088
        self.check_output_with_place(place)
3089 3090 3091

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3092
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.5)
3093 3094


C
chengduo 已提交
3095
class TestPow(TestActivation):
3096 3097
    def setUp(self):
        self.op_type = "pow"
3098
        self.prim_op_type = "comp"
3099
        self.python_api = paddle.pow
3100
        self.public_python_api = paddle.pow
3101
        self.init_dtype()
3102
        self.init_shape()
3103
        self.if_enable_cinn()
3104

3105
        np.random.seed(1024)
3106
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
3107 3108 3109 3110
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3111 3112
        self.attrs = {'factor': 3.0}
        self.convert_input_output()
3113

3114 3115 3116
    def if_enable_cinn(self):
        pass

3117
    def test_check_output(self):
3118
        self.check_output(check_prim=True)
3119

3120
    def test_check_grad(self):
3121 3122
        if self.dtype == np.float16:
            return
3123
        self.check_grad(['X'], 'Out', check_prim=True)
3124

3125

3126 3127 3128 3129
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []

3130
    def if_enable_cinn(self):
3131 3132
        self.enable_cinn = False

3133

3134 3135 3136
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
3137
        self.python_api = paddle.pow
3138
        self.enable_cinn = False
3139 3140
        self.init_dtype()

3141
        np.random.seed(1024)
3142 3143 3144 3145 3146
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
W
wanghuancoder 已提交
3147
            'FactorTensor': np.array([3.0]).astype(self.dtype),
3148 3149 3150 3151 3152 3153
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
W
wanghuancoder 已提交
3154
        self.check_output()
3155 3156 3157 3158

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3159
        self.check_grad(['X'], 'Out')
3160 3161

    def test_api(self):
3162
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3163 3164 3165 3166 3167 3168 3169
            input = np.random.uniform(1, 2, [11, 17]).astype("float32")
            x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
            res = paddle.static.data(
                name="res", shape=[11, 17], dtype="float32"
            )

            factor_1 = 2.0
3170
            factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0)
W
wanghuancoder 已提交
3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182
            out_1 = paddle.pow(x, factor_1)
            out_2 = paddle.pow(x, factor_2)
            out_4 = paddle.pow(x, factor_1, name='pow_res')
            out_6 = paddle.pow(x, factor_2)
            self.assertEqual(('pow_res' in out_4.name), True)

            exe = fluid.Executor(place=fluid.CPUPlace())
            res_1, res_2, res, res_6 = exe.run(
                fluid.default_main_program(),
                feed={"x": input},
                fetch_list=[out_1, out_2, res, out_6],
            )
3183

W
wanghuancoder 已提交
3184 3185 3186
            assert np.allclose(res_1, np.power(input, 2))
            assert np.allclose(res_2, np.power(input, 3))
            assert np.allclose(res_6, np.power(input, 3))
3187 3188


3189 3190 3191 3192 3193
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
3194
class TestSTanh(TestActivation):
3195 3196 3197 3198 3199 3200
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

3201 3202
    def setUp(self):
        self.op_type = "stanh"
W
wanghuancoder 已提交
3203
        self.python_api = paddle.stanh
3204
        self.init_dtype()
3205 3206
        self.init_shape()

3207 3208
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
3209

3210
        np.random.seed(1024)
3211
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3212 3213
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3214

3215
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3216
        self.outputs = {'Out': out}
3217 3218
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
        self.convert_input_output()
3219

Q
qijun 已提交
3220
    def test_check_grad(self):
3221 3222
        if self.dtype == np.float16:
            return
3223
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3224

3225

3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3236 3237 3238 3239 3240
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3254 3255 3256
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3257
            else paddle.CPUPlace()
3258
        )
3259 3260

    def test_static_api(self):
3261
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3262
            with paddle.static.program_guard(paddle.static.Program()):
3263
                x = paddle.static.data('X', [10, 12])
W
wanghuancoder 已提交
3264 3265 3266 3267 3268 3269
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3270 3271 3272 3273 3274 3275

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3276
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3277 3278

    def test_fluid_api(self):
3279
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3280
            with fluid.program_guard(fluid.Program()):
3281
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
3282 3283 3284 3285 3286
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3287

3288
    def test_errors(self):
3289
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3290 3291 3292 3293
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.stanh, 1)
                # The input dtype must be float16, float32, float64.
3294
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3295 3296 3297 3298
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.stanh, x_int32)
                # support the input dtype is float16
3299
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3300 3301 3302
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.stanh(x_fp16)
3303 3304 3305 3306 3307 3308 3309 3310 3311 3312


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3313 3314


3315 3316
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3317 3318 3319 3320
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3321 3322 3323
    return out


C
chengduo 已提交
3324
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3325 3326
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3327
        self.python_api = paddle.nn.functional.softplus
3328
        self.init_dtype()
3329
        self.init_shape()
3330

3331 3332
        beta = 2
        threshold = 15
3333

3334
        np.random.seed(1024)
3335
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3336 3337 3338
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3339
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3340

3341 3342 3343
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3344
    def test_check_grad(self):
3345 3346
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3347
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
3348

3349

3350 3351 3352 3353 3354
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3355
@unittest.skipIf(
R
ronnywang 已提交
3356 3357
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3358
)
3359 3360 3361 3362
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()
W
wanghuancoder 已提交
3363
        self.python_api = paddle.nn.functional.softplus
3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3387 3388 3389 3390 3391
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3392
        np.random.seed(1024)
3393
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3394 3395 3396
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3397
            else paddle.CPUPlace()
3398
        )
3399 3400

    def test_static_api(self):
3401
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3402
            with paddle.static.program_guard(paddle.static.Program()):
3403
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3404 3405 3406 3407 3408 3409 3410 3411
                out1 = F.softplus(x, self.beta, self.threshold)
                softplus = paddle.nn.Softplus(self.beta, self.threshold)
                out2 = softplus(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3412 3413 3414 3415 3416 3417 3418 3419

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3420
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3421 3422

    def test_errors(self):
3423
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3424 3425 3426 3427
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softplus, 1)
                # The input dtype must be float16, float32, float64.
3428
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3429 3430 3431 3432
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softplus, x_int32)
                # support the input dtype is float16
3433
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3434 3435 3436
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softplus(x_fp16)
3437 3438 3439 3440 3441 3442 3443


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3444
class TestSoftsign(TestActivation):
3445 3446
    def setUp(self):
        self.op_type = "softsign"
3447
        self.init_dtype()
3448 3449
        self.init_shape()

3450
        self.python_api = paddle.nn.functional.softsign
3451

3452
        np.random.seed(1024)
3453
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3454
        out = ref_softsign(x)
3455 3456

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3457
        self.outputs = {'Out': out}
3458
        self.convert_input_output()
3459

3460 3461 3462
    def init_shape(self):
        self.shape = [10, 12]

3463
    def test_check_grad(self):
3464 3465
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3466
        self.check_grad(['X'], 'Out')
3467 3468


3469 3470 3471 3472 3473
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3474 3475 3476
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3477
        np.random.seed(1024)
3478
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3479 3480 3481
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3482
            else paddle.CPUPlace()
3483
        )
3484 3485

    def test_static_api(self):
3486
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3487
            with paddle.static.program_guard(paddle.static.Program()):
3488
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3489 3490 3491 3492 3493 3494 3495 3496
                out1 = F.softsign(x)
                softsign = paddle.nn.Softsign()
                out2 = softsign(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softsign(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3497 3498 3499 3500 3501 3502 3503 3504

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3505
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3506 3507

    def test_errors(self):
3508
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3509 3510 3511 3512
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softsign, 1)
                # The input dtype must be float16, float32, float64.
3513
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3514 3515 3516 3517
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softsign, x_int32)
                # support the input dtype is float16
3518
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3519 3520 3521
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softsign(x_fp16)
3522 3523


3524 3525 3526 3527 3528
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3529
class TestThresholdedRelu(TestActivation):
3530 3531
    def setUp(self):
        self.op_type = "thresholded_relu"
3532
        self.init_dtype()
3533
        self.init_shape()
W
wanghuancoder 已提交
3534
        self.python_api = paddle.nn.functional.thresholded_relu
3535

3536
        threshold = 15
3537

3538
        np.random.seed(1024)
3539
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3540 3541
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
3542 3543

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3544
        self.outputs = {'Out': out}
3545 3546
        self.attrs = {"threshold": threshold}
        self.convert_input_output()
3547

3548 3549 3550
    def init_shape(self):
        self.shape = [10, 12]

3551
    def test_check_grad(self):
3552 3553
        if self.dtype == np.float16:
            return
3554
        self.check_grad(['X'], 'Out')
3555 3556


3557 3558 3559 3560 3561
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3562 3563 3564 3565 3566 3567 3568
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3569 3570 3571
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3572
            else paddle.CPUPlace()
3573
        )
3574 3575

    def test_static_api(self):
3576
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3577
            with paddle.static.program_guard(paddle.static.Program()):
3578
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3579 3580 3581 3582 3583 3584 3585 3586
                out1 = F.thresholded_relu(x, self.threshold)
                thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
                out2 = thresholded_relu(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_thresholded_relu(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3587 3588

    def test_dygraph_api(self):
3589
        paddle.disable_static()
3590 3591 3592 3593 3594 3595
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3596
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3597

3598
    def test_errors(self):
3599
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3600 3601 3602 3603
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.thresholded_relu, 1)
                # The input dtype must be float16, float32, float64.
3604
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3605 3606 3607 3608
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.thresholded_relu, x_int32)
                # support the input dtype is float16
3609
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3610 3611 3612
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.thresholded_relu(x_fp16)
3613 3614


3615
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3616
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3617 3618


C
chengduo 已提交
3619
class TestHardSigmoid(TestActivation):
3620 3621
    def setUp(self):
        self.op_type = "hard_sigmoid"
3622 3623 3624 3625
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3626
        self.init_shape()
W
wanghuancoder 已提交
3627
        self.python_api = paddle.nn.functional.hardsigmoid
3628

3629
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3630
        lower_threshold = -self.offset / self.slope
3631
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3632

3633
        # Same reason as TestAbs
3634 3635 3636
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3637

3638
        out = ref_hardsigmoid(x, self.slope, self.offset)
3639

3640
        self.attrs = {'slope': self.slope, 'offset': self.offset}
3641 3642

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3643
        self.outputs = {'Out': out}
3644
        self.convert_input_output()
3645

3646 3647 3648
    def init_shape(self):
        self.shape = [10, 12]

3649 3650
    def set_attrs(self):
        pass
3651

3652

3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3664 3665 3666 3667 3668
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3669 3670 3671 3672
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3673 3674 3675
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3676
            else paddle.CPUPlace()
3677
        )
3678 3679

    def test_static_api(self):
3680
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3681 3682 3683 3684 3685 3686 3687 3688 3689 3690
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.hardsigmoid(x)
                m = paddle.nn.Hardsigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardsigmoid(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3691 3692 3693 3694 3695 3696 3697 3698

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3699
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3700 3701

    def test_fluid_api(self):
3702
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3703
            with fluid.program_guard(fluid.Program()):
3704
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3705 3706 3707 3708 3709 3710
                out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

3711
        paddle.disable_static(self.place)
3712
        x = paddle.to_tensor(self.x_np)
3713
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3714
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3715 3716

    def test_errors(self):
3717
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3718 3719 3720 3721
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardsigmoid, 1)
                # The input dtype must be float16, float32, float64.
3722
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3723 3724 3725 3726
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardsigmoid, x_int32)
                # support the input dtype is float16
3727
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3728 3729 3730
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardsigmoid(x_fp16)
3731 3732


3733 3734 3735 3736 3737
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3738
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3739 3740
    def setUp(self):
        self.op_type = "swish"
3741
        self.python_api = paddle.nn.functional.swish
3742
        self.init_dtype()
3743 3744
        self.init_shape()

3745
        np.random.seed(1024)
3746
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3747
        out = ref_swish(x)
3748 3749

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3750
        self.outputs = {'Out': out}
3751 3752
        self.attrs = {'beta': 1.0}
        self.convert_input_output()
A
Abhinav Arora 已提交
3753

3754 3755 3756
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3757
    def test_check_grad(self):
3758 3759
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3760 3761 3762 3763
        self.check_grad(
            ['X'],
            'Out',
        )
3764

A
Abhinav Arora 已提交
3765

3766 3767 3768 3769 3770
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3771 3772 3773 3774 3775
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3776 3777 3778
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3779
            else paddle.CPUPlace()
3780
        )
3781 3782

    def test_static_api(self):
3783
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3784 3785 3786 3787 3788 3789 3790 3791 3792 3793
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.swish(x)
                swish = paddle.nn.Swish()
                out2 = swish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_swish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3794

3795
    def test_dygraph_api(self):
3796 3797 3798 3799 3800 3801
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3802
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3803 3804

    def test_fluid_api(self):
3805
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3806
            with fluid.program_guard(fluid.Program()):
3807
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3808 3809 3810 3811 3812
                out = paddle.nn.functional.swish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_swish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3813

3814
    def test_errors(self):
3815
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3816 3817 3818 3819
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.swish, 1)
                # The input dtype must be float16, float32, float64.
3820
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3821 3822 3823 3824
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.swish, x_int32)
                # support the input dtype is float16
3825
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3826 3827 3828
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.swish(x_fp16)
3829 3830


3831 3832 3833 3834
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3835 3836 3837 3838 3839 3840
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3841
        self.python_api = paddle.nn.functional.mish
3842
        self.init_dtype()
3843
        self.init_shape()
3844 3845

        np.random.seed(1024)
3846
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3847
        out = ref_mish(x)
3848 3849

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3850
        self.outputs = {'Out': out}
3851
        self.convert_input_output()
3852

3853 3854 3855
    def init_shape(self):
        self.shape = [10, 12]

3856
    def test_check_output(self):
W
wanghuancoder 已提交
3857
        self.check_output()
3858

3859 3860 3861
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3862
        self.check_grad(['X'], 'Out')
3863 3864


3865 3866 3867 3868 3869
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3870 3871 3872 3873 3874
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3875 3876 3877
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3878
            else paddle.CPUPlace()
3879
        )
3880 3881

    def test_static_api(self):
3882
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.mish(x)
                mish = paddle.nn.Mish()
                out2 = mish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_mish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3893 3894 3895 3896 3897 3898 3899 3900

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3901
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3902 3903

    def test_fluid_api(self):
3904
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3905
            with fluid.program_guard(fluid.Program()):
3906
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3907 3908 3909 3910 3911
                out = paddle.nn.functional.mish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_mish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3912 3913

    def test_errors(self):
3914
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3915 3916 3917 3918
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.mish, 1)
                # The input dtype must be float16, float32, float64.
3919
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3920 3921 3922 3923
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.mish, x_int32)
                # support the input dtype is float16
3924
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3925 3926 3927
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.mish(x_fp16)
3928 3929


3930
# ------------------ Test Cudnn Activation----------------------
3931
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3932 3933 3934
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3935 3936 3937 3938
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

3939
    cls_name = "{}_{}".format(parent.__name__, "cudnn")
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3950 3951
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
3952 3953 3954
    parent,
    atol=1e-3,
    grad_check=True,
3955
    check_dygraph=True,
3956
    check_prim=False,
3957
    enable_cinn=False,
3958
    grad_atol=1e-2,
3959
    **kwargs
3960 3961 3962 3963
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3964
    class TestActFp16(parent):
3965 3966 3967 3968 3969
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

C
chengduo 已提交
3970 3971
        def init_dtype(self):
            self.dtype = np.float16
3972

3973
        def if_enable_cinn(self):
3974 3975
            self.enable_cinn = enable_cinn

C
chengduo 已提交
3976
        def test_check_output(self):
3977
            place = core.CUDAPlace(0)
C
chengduo 已提交
3978 3979
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
3980
                self.check_output_with_place(
3981 3982 3983 3984
                    place,
                    atol=atol,
                    check_dygraph=check_dygraph,
                    check_prim=check_prim,
3985
                )
3986

C
chengduo 已提交
3987 3988 3989 3990
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3991
                self.check_grad_with_place(
3992 3993 3994
                    place,
                    ['X'],
                    'Out',
3995
                    check_dygraph=check_dygraph,
3996 3997
                    check_prim=check_prim,
                    max_relative_error=grad_atol,
3998
                )
C
chengduo 已提交
3999

4000
    cls_name = "{}_{}".format(parent.__name__, "FP16OP")
C
chengduo 已提交
4001 4002 4003 4004
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


4005
create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
4006
create_test_act_fp16_class(TestExpm1)
4007 4008
create_test_act_fp16_class(TestSigmoid, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSilu, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4009
create_test_act_fp16_class(TestLogSigmoid)
4010
create_test_act_fp16_class(TestTanh, check_prim=True, enable_cinn=True)
4011
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
4012
create_test_act_fp16_class(TestHardShrink)
4013
create_test_act_fp16_class(TestSoftshrink)
4014 4015 4016
create_test_act_fp16_class(TestSqrt, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSqrtComp, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestAbs, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4017
create_test_act_fp16_class(TestCeil, grad_check=False)
4018 4019 4020
create_test_act_fp16_class(
    TestFloor, check_prim=True, grad_check=False, enable_cinn=True
)
4021 4022 4023 4024
create_test_act_fp16_class(TestCos)
create_test_act_fp16_class(TestTan)
create_test_act_fp16_class(TestCosh)
create_test_act_fp16_class(TestAcos)
C
chengduo 已提交
4025
create_test_act_fp16_class(TestSin)
4026
create_test_act_fp16_class(TestSinh)
4027 4028
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
4029 4030 4031
create_test_act_fp16_class(TestAcosh)
create_test_act_fp16_class(TestAsinh)
create_test_act_fp16_class(TestAtanh)
C
chengduo 已提交
4032
create_test_act_fp16_class(TestRound, grad_check=False)
4033
create_test_act_fp16_class(TestRelu, check_prim=True, enable_cinn=True)
4034 4035 4036
create_test_act_fp16_class(
    TestGelu,
    check_prim=True,
4037
    enable_cinn=True,
4038 4039
    rev_comp_rtol=1e-3,
    rev_comp_atol=1e-3,
4040 4041
    cinn_rtol=1e-3,
    cinn_atol=1e-3,
4042
)
C
chengduo 已提交
4043 4044
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
4045
create_test_act_fp16_class(TestSoftRelu, check_dygraph=False)
C
chengduo 已提交
4046
create_test_act_fp16_class(TestELU)
4047
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
4048
create_test_act_fp16_class(TestReciprocal)
4049
create_test_act_fp16_class(TestLog, check_prim=True)
4050
if core.is_compiled_with_rocm():
4051
    create_test_act_fp16_class(TestLog2)
4052
else:
4053 4054 4055
    create_test_act_fp16_class(TestLog2)
create_test_act_fp16_class(TestLog10)
create_test_act_fp16_class(TestLog1p)
C
chengduo 已提交
4056
create_test_act_fp16_class(TestSquare)
4057 4058 4059
create_test_act_fp16_class(TestPow, check_prim=True)
create_test_act_fp16_class(TestPow_factor_tensor)
create_test_act_fp16_class(TestSTanh)
C
chengduo 已提交
4060 4061 4062 4063
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
4064
create_test_act_fp16_class(TestSwish)
4065
create_test_act_fp16_class(TestHardSwish, check_prim=True)
4066
create_test_act_fp16_class(TestMish)
4067
create_test_act_fp16_class(TestLeakyRelu, check_prim=True, enable_cinn=True)
4068
create_test_act_fp16_class(
4069
    TestLeakyReluAlpha1, check_prim=True, enable_cinn=True
4070
)
4071 4072 4073 4074 4075 4076 4077 4078
create_test_act_fp16_class(
    TestLeakyReluAlpha2, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(
    TestLeakyReluAlpha3, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_fp16_class(TestRsqrt, check_prim=True, enable_cinn=True)
A
Abhinav Arora 已提交
4079

4080

4081
def create_test_act_bf16_class(
4082 4083 4084 4085 4086
    parent,
    atol=1e-2,
    grad_check=True,
    check_dygraph=True,
    check_prim=False,
4087
    enable_cinn=False,
4088 4089
    grad_atol=1e-2,
    **kwargs
4090 4091
):
    @unittest.skipIf(
4092 4093 4094
        not core.is_compiled_with_cuda()
        or not core.is_bfloat16_supported(core.CUDAPlace(0)),
        "core is not compiled with CUDA and do not support bfloat16",
4095
    )
4096
    class TestActBF16(parent):
4097 4098 4099 4100 4101
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

4102
        def init_dtype(self):
4103 4104
            self.dtype = np.float32

4105 4106 4107
        def if_enable_cinn(self):
            self.enable_cinn = enable_cinn

4108 4109 4110
        def convert_input_output(self):
            self.inputs = {'X': convert_float_to_uint16(self.inputs['X'])}
            self.outputs = {'Out': convert_float_to_uint16(self.outputs['Out'])}
4111 4112 4113 4114
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
4115 4116 4117
            self.check_output_with_place(
                place, atol=atol, check_prim=check_prim
            )
4118 4119 4120

        def test_check_grad(self):
            place = core.CUDAPlace(0)
4121 4122
            if grad_check:
                self.check_grad_with_place(
4123 4124 4125 4126 4127
                    place,
                    ['X'],
                    'Out',
                    max_relative_error=grad_atol,
                    check_prim=check_prim,
4128
                )
4129

4130
    cls_name = "{}_{}".format(parent.__name__, "BF16OP")
4131 4132 4133 4134
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


4135
create_test_act_bf16_class(TestActivation)
4136 4137 4138 4139
create_test_act_bf16_class(TestExpm1)
create_test_act_bf16_class(TestSigmoid, check_prim=True)
create_test_act_bf16_class(TestSilu, check_prim=True)
create_test_act_bf16_class(TestLogSigmoid)
4140
create_test_act_bf16_class(TestTanh, check_prim=True)
4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166
create_test_act_bf16_class(TestTanhshrink)
create_test_act_bf16_class(TestHardShrink)
create_test_act_bf16_class(TestSoftshrink)
create_test_act_bf16_class(TestSqrt, check_prim=True)
create_test_act_bf16_class(TestSqrtComp, check_prim=True)
create_test_act_bf16_class(TestAbs, check_prim=True)
create_test_act_bf16_class(TestCeil, grad_check=False)
create_test_act_bf16_class(TestFloor, grad_check=False, check_prim=True)
create_test_act_bf16_class(TestCos)
create_test_act_bf16_class(TestTan)
create_test_act_bf16_class(TestCosh)
create_test_act_bf16_class(TestAcos)
create_test_act_bf16_class(TestSin)
create_test_act_bf16_class(TestSinh)
create_test_act_bf16_class(TestAsin)
create_test_act_bf16_class(TestAtan)
create_test_act_bf16_class(TestAcosh)
create_test_act_bf16_class(TestAsinh)
create_test_act_bf16_class(TestAtanh)
create_test_act_bf16_class(TestRound, grad_check=False)
create_test_act_bf16_class(TestRelu, check_prim=True)
create_test_act_bf16_class(
    TestGelu,
    check_prim=True,
    rev_comp_rtol=1e-2,
    rev_comp_atol=1e-2,
4167 4168
    cinn_rtol=1e-2,
    cinn_atol=1e-2,
4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193
)
create_test_act_bf16_class(TestBRelu)
create_test_act_bf16_class(TestRelu6)
create_test_act_bf16_class(TestSoftRelu, check_dygraph=False)
create_test_act_bf16_class(TestELU)
create_test_act_bf16_class(TestCELU)
create_test_act_bf16_class(TestReciprocal)
create_test_act_bf16_class(TestLog, check_prim=True)
if core.is_compiled_with_rocm():
    create_test_act_bf16_class(TestLog2)
else:
    create_test_act_bf16_class(TestLog2)
create_test_act_bf16_class(TestLog10)
create_test_act_bf16_class(TestLog1p)
create_test_act_bf16_class(TestSquare)
create_test_act_bf16_class(TestPow, check_prim=True)
create_test_act_bf16_class(TestPow_factor_tensor)
create_test_act_bf16_class(TestSTanh)
create_test_act_bf16_class(TestSoftplus)
create_test_act_bf16_class(TestSoftsign)
create_test_act_bf16_class(TestThresholdedRelu)
create_test_act_bf16_class(TestHardSigmoid)
create_test_act_bf16_class(TestSwish)
create_test_act_bf16_class(TestHardSwish, check_prim=True)
create_test_act_bf16_class(TestMish)
4194 4195 4196 4197 4198 4199
create_test_act_bf16_class(TestLeakyRelu, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha1, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha2, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha3, check_prim=True)
create_test_act_bf16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_bf16_class(TestRsqrt, check_prim=True)
4200

Q
qijun 已提交
4201 4202
if __name__ == "__main__":
    unittest.main()