test_activation_op.py 32.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22
import paddle
23
import paddle.fluid as fluid
24 25
import paddle.nn as nn
import paddle.nn.functional as functional
26
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
27 28


29
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
45
class TestActivation(OpTest):
Q
qijun 已提交
46 47
    def setUp(self):
        self.op_type = "exp"
48
        self.init_dtype()
49
        self.init_kernel_type()
50 51 52 53 54 55

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
56 57 58 59 60

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
61 62
        if self.dtype == np.float16:
            return
63
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
64

65
    def init_dtype(self):
66
        self.dtype = np.float64
67

68 69 70
    def init_kernel_type(self):
        pass

Q
qijun 已提交
71

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
class TestParameter(object):
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(z, z_expected)


C
chengduo 已提交
102
class TestSigmoid(TestActivation):
Q
qijun 已提交
103 104
    def setUp(self):
        self.op_type = "sigmoid"
105 106 107 108 109 110 111
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
112

113 114 115
    def init_dtype(self):
        self.dtype = np.float32

116
    def test_check_grad(self):
117 118 119 120
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

121

C
chengduo 已提交
122
class TestLogSigmoid(TestActivation):
123 124
    def setUp(self):
        self.op_type = "logsigmoid"
125 126 127 128 129 130 131
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
132 133

    def test_check_grad(self):
134 135
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
136
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
137 138


139
class TestTanh(TestActivation, TestParameter):
140 141
    def setUp(self):
        self.op_type = "tanh"
142 143 144 145 146 147
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
148 149

    def test_check_grad(self):
150 151
        if self.dtype == np.float16:
            return
152
        self.check_grad(['X'], 'Out')
153

154 155 156 157 158 159
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

160

161
class TestAtan(TestActivation, TestParameter):
162 163 164 165 166 167 168 169 170 171 172 173 174
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
175
        self.check_grad(['X'], 'Out')
176

177 178 179 180 181 182 183 184
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

185

C
chengduo 已提交
186
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
187 188
    def setUp(self):
        self.op_type = "tanh_shrink"
189 190 191 192 193 194 195
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
196 197

    def test_check_grad(self):
198 199
        if self.dtype == np.float16:
            return
200
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
201

202

C
chengduo 已提交
203
class TestHardShrink(TestActivation):
204 205
    def setUp(self):
        self.op_type = "hard_shrink"
206 207
        self.init_dtype()

208
        threshold = 0.5
Z
zhupengyang 已提交
209
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
210 211
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
212 213

        self.attrs = {'lambda': threshold}
214 215
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
216 217

    def test_check_grad(self):
218 219
        if self.dtype == np.float16:
            return
220
        self.check_grad(['X'], 'Out')
221 222


C
chengduo 已提交
223
class TestSoftShrink(TestActivation):
224 225
    def setUp(self):
        self.op_type = "softshrink"
226 227
        self.init_dtype()

228
        lambda_val = 0.1
Z
zhupengyang 已提交
229
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
230 231 232 233
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

234
        self.attrs = {'lambda': lambda_val}
235 236
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
237 238

    def test_check_grad(self):
239 240
        if self.dtype == np.float16:
            return
241
        self.check_grad(['X'], 'Out')
242

243

244
class TestSqrt(TestActivation, TestParameter):
245 246
    def setUp(self):
        self.op_type = "sqrt"
247 248 249 250 251 252 253
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
254 255

    def test_check_grad(self):
256 257
        if self.dtype == np.float16:
            return
258
        self.check_grad(['X'], 'Out')
259

260

Z
zhoukunsheng 已提交
261 262 263 264 265
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
266
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
267 268 269 270 271 272 273 274 275 276 277
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
278
class TestAbs(TestActivation):
279 280
    def setUp(self):
        self.op_type = "abs"
281 282
        self.init_dtype()

283
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
284
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
285
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
286
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
287 288
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
289 290 291 292
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
293 294

    def test_check_grad(self):
295 296
        if self.dtype == np.float16:
            return
297
        self.check_grad(['X'], 'Out')
298

299

C
chengduo 已提交
300
class TestCeil(TestActivation):
D
dzhwinter 已提交
301 302
    def setUp(self):
        self.op_type = "ceil"
303 304
        self.init_dtype()

Z
zhupengyang 已提交
305
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
306 307 308 309
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
310

D
dzhwinter 已提交
311
    # The same reason with TestFloor
C
chengduo 已提交
312
    def test_check_grad(self):
313 314 315
        pass


C
chengduo 已提交
316
class TestFloor(TestActivation):
D
dzhwinter 已提交
317 318
    def setUp(self):
        self.op_type = "floor"
319 320
        self.init_dtype()

Z
zhupengyang 已提交
321
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
322 323 324 325
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
326

D
dzhwinter 已提交
327
    # the gradient on floor, ceil, round is undefined.
328
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
329 330
    # The same reason with TestFloor
    def test_check_grad(self):
331 332 333
        pass


C
chengduo 已提交
334
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
335 336
    def setUp(self):
        self.op_type = "cos"
337 338
        self.init_dtype()

Z
zhupengyang 已提交
339
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
340 341 342 343
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
344 345

    def test_check_grad(self):
346 347
        if self.dtype == np.float16:
            return
348
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
349

350

351 352 353 354 355
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
356
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
357 358 359 360 361 362 363 364
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
365
        self.check_grad(['X'], 'Out')
366 367


368
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
369 370
    def setUp(self):
        self.op_type = "sin"
371 372
        self.init_dtype()

Z
zhupengyang 已提交
373
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
374 375 376 377
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
378 379

    def test_check_grad(self):
380 381
        if self.dtype == np.float16:
            return
382
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
383 384


385 386 387 388 389
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
390
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
391 392 393 394 395 396 397 398
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
399
        self.check_grad(['X'], 'Out')
400 401


C
chengduo 已提交
402
class TestRound(TestActivation):
D
dzhwinter 已提交
403 404
    def setUp(self):
        self.op_type = "round"
405 406
        self.init_dtype()

Z
zhupengyang 已提交
407
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
408 409 410 411
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
412

C
chengduo 已提交
413
    def test_check_grad(self):
414 415 416
        pass


C
chengduo 已提交
417
class TestRelu(TestActivation):
418
    def setUp(self):
Q
qijun 已提交
419
        self.op_type = "relu"
K
Kexin Zhao 已提交
420 421 422
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
423 424
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
425 426 427 428
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
429 430

    def test_check_grad(self):
K
Kexin Zhao 已提交
431 432
        if self.dtype == np.float16:
            return
433
        self.check_grad(['X'], 'Out')
A
Adam 已提交
434 435


436 437 438 439 440 441 442 443 444 445 446 447 448 449
class TestReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sqrt, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.relu(x_fp16)


A
Adam 已提交
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
466
        self.check_grad(['X'], 'Out')
467 468


469 470 471 472 473 474 475 476 477 478 479 480 481 482
class TestLeakyReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.leaky_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.leaky_relu, x_int32)
            # support the input dtype is float32
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float32')
            fluid.layers.leaky_relu(x_fp16)


483 484 485 486 487 488 489 490 491 492
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
493 494 495
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
496 497 498
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
499

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
515
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
516
        out = gelu(x, approximate)
C
Clementine 已提交
517 518 519

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
520
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
521 522 523 524

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
525
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
526 527


C
chengduo 已提交
528
class TestBRelu(TestActivation):
529 530
    def setUp(self):
        self.op_type = "brelu"
531 532
        self.init_dtype()

Z
zhupengyang 已提交
533
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
534 535
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
536 537
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
538
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
539 540 541
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
542 543 544

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
545
        self.outputs = {'Out': t}
546 547

    def test_check_grad(self):
548 549
        if self.dtype == np.float16:
            return
550
        self.check_grad(['X'], 'Out')
551

552

553 554 555 556 557 558 559 560 561 562 563 564 565 566
class TestBReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.brelu(x_fp16)


C
chengduo 已提交
567
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
568
    def setUp(self):
569
        self.op_type = "relu6"
570 571
        self.init_dtype()

Z
zhupengyang 已提交
572
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
573 574 575 576
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
577
        out = np.minimum(np.maximum(x, 0), threshold)
578

579
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
580
        self.attrs = {'threshold': threshold}
581
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
582

583 584 585
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
586
        self.check_grad(['X'], 'Out')
587 588


H
huangjun12 已提交
589 590 591 592 593
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
594
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
610
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
611 612


C
chengduo 已提交
613
class TestSoftRelu(TestActivation):
614 615
    def setUp(self):
        self.op_type = "soft_relu"
616 617 618
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
619
        threshold = 2.0
Q
qijun 已提交
620 621
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
622
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
623 624 625
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
626 627 628 629 630
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
631 632

    def test_check_grad(self):
633 634
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
635
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
636

637

C
chengduo 已提交
638
class TestELU(TestActivation):
639 640
    def setUp(self):
        self.op_type = "elu"
641 642
        self.init_dtype()

Z
zhupengyang 已提交
643
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
644
        alpha = 1.
645
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
646 647 648 649
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
650
        self.outputs = {'Out': out}
651 652

    def test_check_grad(self):
653 654
        if self.dtype == np.float16:
            return
655
        self.check_grad(['X'], 'Out')
656 657


658
class TestELUOpError(unittest.TestCase):
659 660 661 662 663 664 665 666 667 668 669
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
670
class TestReciprocal(TestActivation):
Q
qijun 已提交
671 672
    def setUp(self):
        self.op_type = "reciprocal"
673 674 675 676 677 678 679
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
680 681

    def test_check_grad(self):
682 683
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
684
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
685 686


C
chengduo 已提交
687
class TestLog(TestActivation):
Q
qijun 已提交
688 689
    def setUp(self):
        self.op_type = "log"
690 691 692 693 694 695 696
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
697 698

    def test_check_grad(self):
699 700
        if self.dtype == np.float16:
            return
701
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
702

703

C
chengduo 已提交
704
class TestSquare(TestActivation):
Q
qijun 已提交
705 706
    def setUp(self):
        self.op_type = "square"
707 708 709 710 711 712 713
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
714 715

    def test_check_grad(self):
716 717
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
718
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
719

720

C
chengduo 已提交
721
class TestPow(TestActivation):
722 723
    def setUp(self):
        self.op_type = "pow"
724 725 726 727 728 729
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
730
        self.attrs = {'factor': 3.0}
731
        self.outputs = {'Out': out}
732 733

    def test_check_grad(self):
734 735
        if self.dtype == np.float16:
            return
736
        self.check_grad(['X'], 'Out')
737

738

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
761
        self.check_grad(['X'], 'Out')
762 763 764 765 766

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
767 768 769 770 771
        res = fluid.layers.data(
            name="res",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
772 773 774 775 776

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
777 778 779 780 781
        out_3 = paddle.pow(x, factor_1, out=res)
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
782 783

        exe = fluid.Executor(place=fluid.CPUPlace())
784 785 786 787
        res_1, res_2, res_3, res, res_6 = exe.run(
            fluid.default_main_program(),
            feed={"x": input},
            fetch_list=[out_1, out_2, out_3, res, out_6])
788 789 790

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
791 792
        assert np.array_equal(res_3, res)
        assert np.array_equal(res_6, np.power(input, 3))
793 794


C
chengduo 已提交
795
class TestSTanh(TestActivation):
796 797
    def setUp(self):
        self.op_type = "stanh"
798 799 800
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
801 802
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
803 804 805
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
806
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
807
        self.outputs = {'Out': out}
808

Q
qijun 已提交
809
    def test_check_grad(self):
810 811
        if self.dtype == np.float16:
            return
812
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
813

814

C
chengduo 已提交
815
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
816 817
    def setUp(self):
        self.op_type = "softplus"
818
        self.init_dtype()
C
chengduo 已提交
819
        self.dtype = np.float64
820 821 822 823 824 825

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
826 827

    def test_check_grad(self):
828 829
        if self.dtype == np.float16:
            return
830
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
831

832

C
chengduo 已提交
833
class TestSoftsign(TestActivation):
834 835
    def setUp(self):
        self.op_type = "softsign"
836 837 838 839 840 841 842
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
843 844

    def test_check_grad(self):
845 846
        if self.dtype == np.float16:
            return
847
        self.check_grad(['X'], 'Out')
848 849


C
chengduo 已提交
850
class TestThresholdedRelu(TestActivation):
851 852
    def setUp(self):
        self.op_type = "thresholded_relu"
853 854
        self.init_dtype()

855
        threshold = 0.25
Z
zhupengyang 已提交
856
        self.delta = 0.005
857
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
858 859

        # Same reason as TestAbs
Z
zhupengyang 已提交
860
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
861
        out = (X > threshold) * X
862

863
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
864
        self.attrs = {'threshold': threshold}
865
        self.outputs = {'Out': out}
866 867

    def test_check_grad(self):
868 869
        if self.dtype == np.float16:
            return
870
        self.check_grad(['X'], 'Out')
871 872


C
chengduo 已提交
873
class TestHardSigmoid(TestActivation):
874 875
    def setUp(self):
        self.op_type = "hard_sigmoid"
876 877
        self.init_dtype()

Z
zhupengyang 已提交
878
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
879 880 881 882 883
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
884 885
        self.delta = 0.005

886
        # Same reason as TestAbs
Z
zhupengyang 已提交
887 888
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
889 890

        temp = X * slope + offset
891 892 893 894
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
895 896

    def test_check_grad(self):
897 898
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
899
        self.check_grad(['X'], 'Out')
900

901

C
chengduo 已提交
902
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
903 904
    def setUp(self):
        self.op_type = "swish"
905 906 907 908 909 910 911 912 913
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
914 915

    def test_check_grad(self):
916 917
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
918
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
919

920

921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
940 941 942 943 944 945 946 947 948 949
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
950

C
chengduo 已提交
951
        def test_check_output(self):
952
            place = core.CUDAPlace(0)
C
chengduo 已提交
953 954 955
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
956

C
chengduo 已提交
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
981
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
982
create_test_act_fp16_class(TestSin)
983 984
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
985 986
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
987
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
988 989 990 991 992 993 994 995
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
996
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
997 998 999 1000 1001 1002
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
1003
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
1004

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073

class TestNNReluAPI(unittest.TestCase):
    def setUp(self):
        self.init_data()

    def init_data(self):
        self.x_shape = [10, 12]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
        self.y = self.ref_forward(self.x)

    def ref_forward(self, x):
        return np.maximum(x, 0)

    def ref_backward(self, y, dy):
        y_t = y.copy()
        y_t[y_t > 0] = 1
        return y_t * dy

    def check_api(self, place=fluid.CPUPlace(), inplace=False):
        main_program = Program()
        myrelu = nn.ReLU(inplace)
        with fluid.program_guard(main_program):
            x = fluid.data(name='x', shape=self.x_shape)
            x.stop_gradient = False
            y = myrelu(x)
            fluid.backward.append_backward(fluid.layers.mean(y))
        exe = fluid.Executor(place)
        out = exe.run(main_program,
                      feed={'x': self.x},
                      fetch_list=[y, y.grad_name, x.grad_name])
        self.assertTrue(np.allclose(out[0], self.y))
        self.assertTrue(np.allclose(out[2], self.ref_backward(self.y, out[1])))

        with fluid.dygraph.guard(place):
            x = fluid.dygraph.to_variable(self.x)
            y = myrelu(x)
        self.assertTrue(np.allclose(y.numpy(), self.y))

    def test_check_api(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for place in places:
            for inplace in [True, False]:
                self.check_api(place, inplace)


class TestNNFunctionalReluAPI(unittest.TestCase):
    def setUp(self):
        self.init_data()

    def init_data(self):
        self.x_shape = [10, 12]
        self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
        self.y = self.ref_forward(self.x)

    def ref_forward(self, x):
        return np.maximum(x, 0)

    def test_check_api(self):
        main_program = Program()
        with fluid.program_guard(main_program):
            x = fluid.data(name='x', shape=self.x_shape)
            y = functional.relu(x)
        exe = fluid.Executor(fluid.CPUPlace())
        out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
        self.assertTrue(np.allclose(out[0], self.y))


Q
qijun 已提交
1074 1075
if __name__ == "__main__":
    unittest.main()