test_activation_op.py 29.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22
import paddle
23 24
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
25 26


27
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
43
class TestActivation(OpTest):
Q
qijun 已提交
44 45
    def setUp(self):
        self.op_type = "exp"
46
        self.init_dtype()
47
        self.init_kernel_type()
48 49 50 51 52 53

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
54 55 56 57 58

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
59 60
        if self.dtype == np.float16:
            return
61
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
62

63
    def init_dtype(self):
64
        self.dtype = np.float64
65

66 67 68
    def init_kernel_type(self):
        pass

Q
qijun 已提交
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
class TestParameter(object):
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(z, z_expected)


C
chengduo 已提交
100
class TestSigmoid(TestActivation):
Q
qijun 已提交
101 102
    def setUp(self):
        self.op_type = "sigmoid"
103 104 105 106 107 108 109
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
110

111 112 113
    def init_dtype(self):
        self.dtype = np.float32

114
    def test_check_grad(self):
115 116 117 118
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

119

C
chengduo 已提交
120
class TestLogSigmoid(TestActivation):
121 122
    def setUp(self):
        self.op_type = "logsigmoid"
123 124 125 126 127 128 129
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
130 131

    def test_check_grad(self):
132 133
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
135 136


137
class TestTanh(TestActivation, TestParameter):
138 139
    def setUp(self):
        self.op_type = "tanh"
140 141 142 143 144 145
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
146 147

    def test_check_grad(self):
148 149
        if self.dtype == np.float16:
            return
150
        self.check_grad(['X'], 'Out')
151

152 153 154 155 156 157
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

158

159
class TestAtan(TestActivation, TestParameter):
160 161 162 163 164 165 166 167 168 169 170 171 172
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
173
        self.check_grad(['X'], 'Out')
174

175 176 177 178 179 180 181 182
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

183

C
chengduo 已提交
184
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
185 186
    def setUp(self):
        self.op_type = "tanh_shrink"
187 188 189 190 191 192 193
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
194 195

    def test_check_grad(self):
196 197
        if self.dtype == np.float16:
            return
198
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
199

200

C
chengduo 已提交
201
class TestHardShrink(TestActivation):
202 203
    def setUp(self):
        self.op_type = "hard_shrink"
204 205
        self.init_dtype()

206
        threshold = 0.5
Z
zhupengyang 已提交
207
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
208 209
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
210 211

        self.attrs = {'lambda': threshold}
212 213
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
214 215

    def test_check_grad(self):
216 217
        if self.dtype == np.float16:
            return
218
        self.check_grad(['X'], 'Out')
219 220


C
chengduo 已提交
221
class TestSoftShrink(TestActivation):
222 223
    def setUp(self):
        self.op_type = "softshrink"
224 225
        self.init_dtype()

226
        lambda_val = 0.1
Z
zhupengyang 已提交
227
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
228 229 230 231
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

232
        self.attrs = {'lambda': lambda_val}
233 234
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
235 236

    def test_check_grad(self):
237 238
        if self.dtype == np.float16:
            return
239
        self.check_grad(['X'], 'Out')
240

241

242
class TestSqrt(TestActivation, TestParameter):
243 244
    def setUp(self):
        self.op_type = "sqrt"
245 246 247 248 249 250 251
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
252 253

    def test_check_grad(self):
254 255
        if self.dtype == np.float16:
            return
256
        self.check_grad(['X'], 'Out')
257

258

Z
zhoukunsheng 已提交
259 260 261 262 263
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
264
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
265 266 267 268 269 270 271 272 273 274 275
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
276
class TestAbs(TestActivation):
277 278
    def setUp(self):
        self.op_type = "abs"
279 280
        self.init_dtype()

281
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
282
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
283
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
284
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
285 286
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
287 288 289 290
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
291 292

    def test_check_grad(self):
293 294
        if self.dtype == np.float16:
            return
295
        self.check_grad(['X'], 'Out')
296

297

C
chengduo 已提交
298
class TestCeil(TestActivation):
D
dzhwinter 已提交
299 300
    def setUp(self):
        self.op_type = "ceil"
301 302
        self.init_dtype()

Z
zhupengyang 已提交
303
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
304 305 306 307
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
308

D
dzhwinter 已提交
309
    # The same reason with TestFloor
C
chengduo 已提交
310
    def test_check_grad(self):
311 312 313
        pass


C
chengduo 已提交
314
class TestFloor(TestActivation):
D
dzhwinter 已提交
315 316
    def setUp(self):
        self.op_type = "floor"
317 318
        self.init_dtype()

Z
zhupengyang 已提交
319
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
320 321 322 323
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
324

D
dzhwinter 已提交
325
    # the gradient on floor, ceil, round is undefined.
326
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
327 328
    # The same reason with TestFloor
    def test_check_grad(self):
329 330 331
        pass


C
chengduo 已提交
332
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
333 334
    def setUp(self):
        self.op_type = "cos"
335 336
        self.init_dtype()

Z
zhupengyang 已提交
337
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
338 339 340 341
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
342 343

    def test_check_grad(self):
344 345
        if self.dtype == np.float16:
            return
346
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
347

348

349 350 351 352 353
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
354
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
355 356 357 358 359 360 361 362
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
363
        self.check_grad(['X'], 'Out')
364 365


366
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
367 368
    def setUp(self):
        self.op_type = "sin"
369 370
        self.init_dtype()

Z
zhupengyang 已提交
371
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
372 373 374 375
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
376 377

    def test_check_grad(self):
378 379
        if self.dtype == np.float16:
            return
380
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
381 382


383 384 385 386 387
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
388
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
389 390 391 392 393 394 395 396
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
397
        self.check_grad(['X'], 'Out')
398 399


C
chengduo 已提交
400
class TestRound(TestActivation):
D
dzhwinter 已提交
401 402
    def setUp(self):
        self.op_type = "round"
403 404
        self.init_dtype()

Z
zhupengyang 已提交
405
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
406 407 408 409
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
410

C
chengduo 已提交
411
    def test_check_grad(self):
412 413 414
        pass


C
chengduo 已提交
415
class TestRelu(TestActivation):
416
    def setUp(self):
Q
qijun 已提交
417
        self.op_type = "relu"
K
Kexin Zhao 已提交
418 419 420
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
421 422
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
423 424 425 426
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
427 428

    def test_check_grad(self):
K
Kexin Zhao 已提交
429 430
        if self.dtype == np.float16:
            return
431
        self.check_grad(['X'], 'Out')
A
Adam 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
450
        self.check_grad(['X'], 'Out')
451 452


453 454 455 456 457 458 459 460 461 462
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
463 464 465
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
466 467 468
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
469

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
485
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
486
        out = gelu(x, approximate)
C
Clementine 已提交
487 488 489

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
490
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
491 492 493 494

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
495
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
496 497


C
chengduo 已提交
498
class TestBRelu(TestActivation):
499 500
    def setUp(self):
        self.op_type = "brelu"
501 502
        self.init_dtype()

Z
zhupengyang 已提交
503
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
504 505
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
506 507
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
508
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
509 510 511
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
512 513 514

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
515
        self.outputs = {'Out': t}
516 517

    def test_check_grad(self):
518 519
        if self.dtype == np.float16:
            return
520
        self.check_grad(['X'], 'Out')
521

522

523 524 525 526 527 528 529 530 531 532 533 534 535 536
class TestBReluOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.brelu(x_fp16)


C
chengduo 已提交
537
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
538
    def setUp(self):
539
        self.op_type = "relu6"
540 541
        self.init_dtype()

Z
zhupengyang 已提交
542
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
543 544 545 546
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
547
        out = np.minimum(np.maximum(x, 0), threshold)
548

549
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
550
        self.attrs = {'threshold': threshold}
551
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
552

553 554 555
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
556
        self.check_grad(['X'], 'Out')
557 558


H
huangjun12 已提交
559 560 561 562 563
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
564
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
580
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
581 582


C
chengduo 已提交
583
class TestSoftRelu(TestActivation):
584 585
    def setUp(self):
        self.op_type = "soft_relu"
586 587 588
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
589
        threshold = 2.0
Q
qijun 已提交
590 591
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
592
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
593 594 595
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
596 597 598 599 600
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
601 602

    def test_check_grad(self):
603 604
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
605
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
606

607

C
chengduo 已提交
608
class TestELU(TestActivation):
609 610
    def setUp(self):
        self.op_type = "elu"
611 612
        self.init_dtype()

Z
zhupengyang 已提交
613
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
614
        alpha = 1.
615
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
616 617 618 619
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
620
        self.outputs = {'Out': out}
621 622

    def test_check_grad(self):
623 624
        if self.dtype == np.float16:
            return
625
        self.check_grad(['X'], 'Out')
626 627


628
class TestELUOpError(unittest.TestCase):
629 630 631 632 633 634 635 636 637 638 639
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
640
class TestReciprocal(TestActivation):
Q
qijun 已提交
641 642
    def setUp(self):
        self.op_type = "reciprocal"
643 644 645 646 647 648 649
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
650 651

    def test_check_grad(self):
652 653
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
654
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
655 656


C
chengduo 已提交
657
class TestLog(TestActivation):
Q
qijun 已提交
658 659
    def setUp(self):
        self.op_type = "log"
660 661 662 663 664 665 666
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
667 668

    def test_check_grad(self):
669 670
        if self.dtype == np.float16:
            return
671
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
672

673

C
chengduo 已提交
674
class TestSquare(TestActivation):
Q
qijun 已提交
675 676
    def setUp(self):
        self.op_type = "square"
677 678 679 680 681 682 683
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
684 685

    def test_check_grad(self):
686 687
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
688
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
689

690

C
chengduo 已提交
691
class TestPow(TestActivation):
692 693
    def setUp(self):
        self.op_type = "pow"
694 695 696 697 698 699
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
700
        self.attrs = {'factor': 3.0}
701
        self.outputs = {'Out': out}
702 703

    def test_check_grad(self):
704 705
        if self.dtype == np.float16:
            return
706
        self.check_grad(['X'], 'Out')
707

708

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
731
        self.check_grad(['X'], 'Out')
732 733

    def test_api(self):
734
        import paddle
735 736 737 738 739
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
740 741 742 743 744
        res = fluid.layers.data(
            name="res",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
745 746 747 748 749

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
750 751 752 753 754
        out_3 = paddle.pow(x, factor_1, out=res)
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
755 756

        exe = fluid.Executor(place=fluid.CPUPlace())
757 758 759 760
        res_1, res_2, res_3, res, res_6 = exe.run(
            fluid.default_main_program(),
            feed={"x": input},
            fetch_list=[out_1, out_2, out_3, res, out_6])
761 762 763

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
764 765
        assert np.array_equal(res_3, res)
        assert np.array_equal(res_6, np.power(input, 3))
766 767


C
chengduo 已提交
768
class TestSTanh(TestActivation):
769 770
    def setUp(self):
        self.op_type = "stanh"
771 772 773
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
774 775
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
776 777 778
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
779
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
780
        self.outputs = {'Out': out}
781

Q
qijun 已提交
782
    def test_check_grad(self):
783 784
        if self.dtype == np.float16:
            return
785
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
786

787

C
chengduo 已提交
788
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
789 790
    def setUp(self):
        self.op_type = "softplus"
791
        self.init_dtype()
C
chengduo 已提交
792
        self.dtype = np.float64
793 794 795 796 797 798

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
799 800

    def test_check_grad(self):
801 802
        if self.dtype == np.float16:
            return
803
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
804

805

C
chengduo 已提交
806
class TestSoftsign(TestActivation):
807 808
    def setUp(self):
        self.op_type = "softsign"
809 810 811 812 813 814 815
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
816 817

    def test_check_grad(self):
818 819
        if self.dtype == np.float16:
            return
820
        self.check_grad(['X'], 'Out')
821 822


C
chengduo 已提交
823
class TestThresholdedRelu(TestActivation):
824 825
    def setUp(self):
        self.op_type = "thresholded_relu"
826 827
        self.init_dtype()

828
        threshold = 0.25
Z
zhupengyang 已提交
829
        self.delta = 0.005
830
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
831 832

        # Same reason as TestAbs
Z
zhupengyang 已提交
833
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
834
        out = (X > threshold) * X
835

836
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
837
        self.attrs = {'threshold': threshold}
838
        self.outputs = {'Out': out}
839 840

    def test_check_grad(self):
841 842
        if self.dtype == np.float16:
            return
843
        self.check_grad(['X'], 'Out')
844 845


C
chengduo 已提交
846
class TestHardSigmoid(TestActivation):
847 848
    def setUp(self):
        self.op_type = "hard_sigmoid"
849 850
        self.init_dtype()

Z
zhupengyang 已提交
851
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
852 853 854 855 856
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
857 858
        self.delta = 0.005

859
        # Same reason as TestAbs
Z
zhupengyang 已提交
860 861
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
862 863

        temp = X * slope + offset
864 865 866 867
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
868 869

    def test_check_grad(self):
870 871
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
872
        self.check_grad(['X'], 'Out')
873

874

C
chengduo 已提交
875
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
876 877
    def setUp(self):
        self.op_type = "swish"
878 879 880 881 882 883 884 885 886
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
887 888

    def test_check_grad(self):
889 890
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
891
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
892

893

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
913 914 915 916 917 918 919 920 921 922
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
923

C
chengduo 已提交
924
        def test_check_output(self):
925
            place = core.CUDAPlace(0)
C
chengduo 已提交
926 927 928
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
929

C
chengduo 已提交
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
954
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
955
create_test_act_fp16_class(TestSin)
956 957
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
958 959
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
960
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
961 962 963 964 965 966 967 968
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
969
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
970 971 972 973 974 975
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
976
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
977

Q
qijun 已提交
978 979
if __name__ == "__main__":
    unittest.main()