test_activation_op.py 25.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22 23
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
24 25


C
chengduo 已提交
26
class TestActivation(OpTest):
Q
qijun 已提交
27 28
    def setUp(self):
        self.op_type = "exp"
29 30
        self.dtype = np.float32
        self.init_dtype()
31
        self.init_kernel_type()
32 33 34 35 36 37

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
38 39 40 41 42

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
43 44
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
45
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
46

47
    def init_dtype(self):
C
chengduo 已提交
48
        self.dtype = np.float32
49

50 51 52
    def init_kernel_type(self):
        pass

Q
qijun 已提交
53

C
chengduo 已提交
54
class TestSigmoid(TestActivation):
Q
qijun 已提交
55 56
    def setUp(self):
        self.op_type = "sigmoid"
57 58 59 60 61 62 63
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
64

65
    def test_check_grad(self):
66 67 68 69
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

70

C
chengduo 已提交
71
class TestLogSigmoid(TestActivation):
72 73
    def setUp(self):
        self.op_type = "logsigmoid"
74 75 76 77 78 79 80
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
81 82

    def test_check_grad(self):
83 84
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
85
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
86 87


C
chengduo 已提交
88
class TestTanh(TestActivation):
89 90
    def setUp(self):
        self.op_type = "tanh"
91 92 93 94 95 96 97
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
98 99

    def test_check_grad(self):
100 101
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
102
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
103 104


105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
class TestAtan(TestActivation):
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
122
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
123 124
    def setUp(self):
        self.op_type = "tanh_shrink"
125 126 127 128 129 130 131
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
132 133

    def test_check_grad(self):
134 135
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
136
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
137

138

C
chengduo 已提交
139
class TestHardShrink(TestActivation):
140 141
    def setUp(self):
        self.op_type = "hard_shrink"
142 143
        self.init_dtype()

144
        threshold = 0.5
145 146 147
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
148 149

        self.attrs = {'lambda': threshold}
150 151
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
152 153

    def test_check_grad(self):
154 155
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
156
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
157 158


C
chengduo 已提交
159
class TestSoftShrink(TestActivation):
160 161
    def setUp(self):
        self.op_type = "softshrink"
162 163
        self.init_dtype()

164
        lambda_val = 0.1
165 166 167 168 169
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

170
        self.attrs = {'lambda': lambda_val}
171 172
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
173 174

    def test_check_grad(self):
175 176
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
177
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
178

179

C
chengduo 已提交
180
class TestSqrt(TestActivation):
181 182
    def setUp(self):
        self.op_type = "sqrt"
183 184 185 186 187 188 189
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
190 191

    def test_check_grad(self):
192 193
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
194
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
195

196

Z
zhoukunsheng 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [2, 3]).astype(self.dtype)
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
214
class TestAbs(TestActivation):
215 216
    def setUp(self):
        self.op_type = "abs"
217 218 219
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
220
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
221
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
222
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
223 224
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
225 226 227 228
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
229 230

    def test_check_grad(self):
231 232
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
233
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
234

235

C
chengduo 已提交
236
class TestCeil(TestActivation):
D
dzhwinter 已提交
237 238
    def setUp(self):
        self.op_type = "ceil"
239 240 241 242 243 244 245
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
246

D
dzhwinter 已提交
247
    # The same reason with TestFloor
C
chengduo 已提交
248
    def test_check_grad(self):
249 250 251
        pass


C
chengduo 已提交
252
class TestFloor(TestActivation):
D
dzhwinter 已提交
253 254
    def setUp(self):
        self.op_type = "floor"
255 256 257 258 259 260 261
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
262

D
dzhwinter 已提交
263
    # the gradient on floor, ceil, round is undefined.
264
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
265 266
    # The same reason with TestFloor
    def test_check_grad(self):
267 268 269
        pass


C
chengduo 已提交
270
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
271 272
    def setUp(self):
        self.op_type = "cos"
273 274 275 276 277 278 279
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
280 281

    def test_check_grad(self):
282 283
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
284 285
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

286

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
304
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
305 306
    def setUp(self):
        self.op_type = "sin"
307 308 309 310 311 312 313
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
314 315

    def test_check_grad(self):
316 317
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
318 319 320
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
338
class TestRound(TestActivation):
D
dzhwinter 已提交
339 340
    def setUp(self):
        self.op_type = "round"
341 342 343 344 345 346 347
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
348

C
chengduo 已提交
349
    def test_check_grad(self):
350 351 352
        pass


C
chengduo 已提交
353
class TestRelu(TestActivation):
354
    def setUp(self):
Q
qijun 已提交
355
        self.op_type = "relu"
K
Kexin Zhao 已提交
356 357 358
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
359 360
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
361 362 363 364
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
365 366

    def test_check_grad(self):
K
Kexin Zhao 已提交
367 368
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
369
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
A
Adam 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
389 390


C
Clementine 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
408
class TestBRelu(TestActivation):
409 410
    def setUp(self):
        self.op_type = "brelu"
411 412 413
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
414 415
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
416 417
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
418
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
419 420 421
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
422 423 424

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
425
        self.outputs = {'Out': t}
426 427

    def test_check_grad(self):
428 429
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
430
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
431

432

C
chengduo 已提交
433
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
434
    def setUp(self):
435
        self.op_type = "relu6"
436 437 438
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
439 440 441 442
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
443
        out = np.minimum(np.maximum(x, 0), threshold)
444

445
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
446
        self.attrs = {'threshold': threshold}
447
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
448

449 450 451 452 453 454
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


H
huangjun12 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

        x = np.random.uniform(-6, 6, [4, 4]).astype(self.dtype)
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


C
chengduo 已提交
479
class TestSoftRelu(TestActivation):
480 481
    def setUp(self):
        self.op_type = "soft_relu"
482 483 484
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
485
        threshold = 2.0
Q
qijun 已提交
486 487 488
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
489 490 491
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
492 493 494 495 496
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
497 498

    def test_check_grad(self):
499 500
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
501
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
502

503

C
chengduo 已提交
504
class TestELU(TestActivation):
505 506
    def setUp(self):
        self.op_type = "elu"
507 508 509
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
510
        alpha = 1.
511
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
512 513 514 515
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
516
        self.outputs = {'Out': out}
517 518

    def test_check_grad(self):
519 520
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
521
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
522 523


524 525 526 527 528 529 530 531 532 533 534 535
class TestELUOpError(OpTest):
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
536
class TestReciprocal(TestActivation):
Q
qijun 已提交
537 538
    def setUp(self):
        self.op_type = "reciprocal"
539 540 541 542 543 544 545
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
546 547

    def test_check_grad(self):
548 549
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
550
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
551 552


C
chengduo 已提交
553
class TestLog(TestActivation):
Q
qijun 已提交
554 555
    def setUp(self):
        self.op_type = "log"
556 557 558 559 560 561 562
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
563 564

    def test_check_grad(self):
565 566
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
567
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
568

569

C
chengduo 已提交
570
class TestSquare(TestActivation):
Q
qijun 已提交
571 572
    def setUp(self):
        self.op_type = "square"
573 574 575 576 577 578 579
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
580 581

    def test_check_grad(self):
582 583
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
584
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
585

586

C
chengduo 已提交
587
class TestPow(TestActivation):
588 589
    def setUp(self):
        self.op_type = "pow"
590 591 592 593 594 595
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
596
        self.attrs = {'factor': 3.0}
597
        self.outputs = {'Out': out}
598 599

    def test_check_grad(self):
600 601
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
602
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
603

604

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)

    def test_api(self):
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2 = exe.run(fluid.default_main_program(),
                               feed={"x": input},
                               fetch_list=[out_1, out_2])

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))


C
chengduo 已提交
650
class TestSTanh(TestActivation):
651 652
    def setUp(self):
        self.op_type = "stanh"
653 654 655
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
656 657
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
658 659 660
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
661
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
662
        self.outputs = {'Out': out}
663

Q
qijun 已提交
664
    def test_check_grad(self):
665 666
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
667
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
668

669

C
chengduo 已提交
670
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
671 672
    def setUp(self):
        self.op_type = "softplus"
673
        self.init_dtype()
C
chengduo 已提交
674
        self.dtype = np.float64
675 676 677 678 679 680

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
681 682

    def test_check_grad(self):
683 684
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
685
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
686

687

C
chengduo 已提交
688
class TestSoftsign(TestActivation):
689 690
    def setUp(self):
        self.op_type = "softsign"
691 692 693 694 695 696 697
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
698 699

    def test_check_grad(self):
700 701
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
702
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
703 704


C
chengduo 已提交
705
class TestThresholdedRelu(TestActivation):
706 707
    def setUp(self):
        self.op_type = "thresholded_relu"
708 709
        self.init_dtype()

710 711
        threshold = 0.25
        self.relative_error = 0.005
712
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
713 714 715

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
716
        out = (X > threshold) * X
717

718
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
719
        self.attrs = {'threshold': threshold}
720
        self.outputs = {'Out': out}
721 722

    def test_check_grad(self):
723 724
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
725
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
726 727


C
chengduo 已提交
728
class TestHardSigmoid(TestActivation):
729 730
    def setUp(self):
        self.op_type = "hard_sigmoid"
731 732
        self.init_dtype()

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
748 749 750 751
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
752 753

    def test_check_grad(self):
754 755
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
756
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
757

758

C
chengduo 已提交
759
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
760 761
    def setUp(self):
        self.op_type = "swish"
762 763 764 765 766 767 768 769 770
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
771 772

    def test_check_grad(self):
773 774
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
775
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
776

777

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
797 798 799 800 801 802 803 804 805 806
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
807

C
chengduo 已提交
808
        def test_check_output(self):
809
            place = core.CUDAPlace(0)
C
chengduo 已提交
810 811 812
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
813

C
chengduo 已提交
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
838
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
839
create_test_act_fp16_class(TestSin)
840 841
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
842 843
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
844
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
845 846 847 848 849 850 851 852
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
853
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
854 855 856 857 858 859
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
860
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
861

Q
qijun 已提交
862 863
if __name__ == "__main__":
    unittest.main()