test_activation_op.py 16.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15 16
import unittest
import numpy as np
K
Kexin Zhao 已提交
17
import paddle.fluid.core as core
Q
qijun 已提交
18
from op_test import OpTest
A
Abhinav Arora 已提交
19
from scipy.special import expit
Q
qijun 已提交
20 21 22 23 24 25 26 27


class TestExp(OpTest):
    def setUp(self):
        self.op_type = "exp"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
28
        self.outputs = {'Out': np.exp(self.inputs['X'])}
Q
qijun 已提交
29 30 31 32 33

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
34
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
35 36 37 38 39 40 41 42


class TestSigmoid(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
43
        self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))}
Q
qijun 已提交
44 45 46 47

    def test_check_output(self):
        self.check_output()

48
    def test_check_grad(self):
F
fengjiayi 已提交
49
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
50 51


52 53 54 55 56 57
class TestLogSigmoid(OpTest):
    def setUp(self):
        self.op_type = "logsigmoid"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
58
        self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))}
59 60 61 62 63

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
64
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
65 66


67 68 69 70 71 72
class TestTanh(OpTest):
    def setUp(self):
        self.op_type = "tanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
73
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
74 75 76 77 78

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
79
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
80 81


K
Kavya Srinet 已提交
82 83 84 85 86 87
class TestTanhShrink(OpTest):
    def setUp(self):
        self.op_type = "tanh_shrink"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32")
        }
F
fengjiayi 已提交
88
        self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])}
K
Kavya Srinet 已提交
89 90 91 92 93

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
94
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
95 96


97 98 99 100 101 102 103 104 105 106 107
class TestHardShrink(OpTest):
    def setUp(self):
        self.op_type = "hard_shrink"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        threshold = 0.5

        self.inputs = {'X': x}
        self.attrs = {'lambda': threshold}

        t = np.copy(x)
        t[(t >= -threshold) & (t <= threshold)] = 0
F
fengjiayi 已提交
108
        self.outputs = {'Out': t}
109 110 111 112 113

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
114
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
115 116


117 118 119 120 121 122 123 124 125 126 127
class TestSoftShrink(OpTest):
    def setUp(self):
        self.op_type = "softshrink"
        lambda_val = 0.1
        self.attrs = {'lambda': lambda_val}
        self.inputs = {
            'X': np.random.uniform(0.25, 10, [4, 4]).astype("float32")
        }
        y = np.copy(self.inputs['X'])
        y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * (
            y - lambda_val)
F
fengjiayi 已提交
128
        self.outputs = {'Out': y}
129 130 131 132 133

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
135 136


137 138 139 140 141 142
class TestSqrt(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
143
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
144 145 146 147 148

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
149
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
150 151 152 153 154


class TestAbs(OpTest):
    def setUp(self):
        self.op_type = "abs"
Q
qijun 已提交
155 156 157 158 159 160
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        # Because we set delta = 0.005 in caculating numeric gradient,
        # if x is too small, such as 0.002, x_neg will be -0.003
        # x_pos will be 0.007, so the numeric gradient is unaccurate.
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
161
        self.inputs = {'X': x}
F
fengjiayi 已提交
162
        self.outputs = {'Out': np.abs(self.inputs['X'])}
163 164 165 166 167

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
168
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
169 170


D
dzhwinter 已提交
171 172 173 174 175
class TestCeil(OpTest):
    def setUp(self):
        self.op_type = "ceil"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
176
        self.outputs = {'Out': np.ceil(self.inputs['X'])}
D
dzhwinter 已提交
177 178 179 180 181

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
182
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
183 184 185 186 187 188 189


class TestFloor(OpTest):
    def setUp(self):
        self.op_type = "floor"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
Q
Qiao Longfei 已提交
190
        self.outputs = {'Out': np.floor(self.inputs['X'])}
D
dzhwinter 已提交
191 192 193 194 195

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
196
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
197 198 199 200 201 202 203


class TestRound(OpTest):
    def setUp(self):
        self.op_type = "round"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
204
        self.outputs = {'Out': np.round(self.inputs['X'])}
D
dzhwinter 已提交
205 206 207 208 209

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
210
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
211 212


Q
qijun 已提交
213
class TestRelu(OpTest):
214
    def setUp(self):
Q
qijun 已提交
215
        self.op_type = "relu"
K
Kexin Zhao 已提交
216 217 218 219
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
220 221
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
222 223 224 225
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
226 227 228 229 230

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
K
Kexin Zhao 已提交
231 232
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
233
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
234

K
Kexin Zhao 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248
    def init_dtype(self):
        pass


class TestFP16Relu(TestRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

249 250 251 252 253

class TestBRelu(OpTest):
    def setUp(self):
        self.op_type = "brelu"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
254 255
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
256 257
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
258
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
Q
qijun 已提交
259 260

        self.inputs = {'X': x}
261 262 263 264
        self.attrs = {'t_min': t_min, 't_max': t_max}
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
F
fengjiayi 已提交
265
        self.outputs = {'Out': t}
266 267 268 269 270

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
271
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
272 273


274
class TestRelu6(OpTest):
K
Kavya Srinet 已提交
275
    def setUp(self):
276 277 278 279 280 281 282 283 284
        self.op_type = "relu6"
        x = np.random.uniform(-1, 1, [4, 10]).astype("float32")
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02

        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
K
Kavya Srinet 已提交
285
        self.outputs = {
F
fengjiayi 已提交
286
            'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold)
K
Kavya Srinet 已提交
287 288 289 290 291 292
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
293
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
K
Kavya Srinet 已提交
294 295


296 297 298
class TestSoftRelu(OpTest):
    def setUp(self):
        self.op_type = "soft_relu"
Q
qijun 已提交
299
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
300
        threshold = 2.0
Q
qijun 已提交
301 302 303
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
304 305 306 307 308
        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
F
fengjiayi 已提交
309
        self.outputs = {'Out': np.log((np.exp(t) + 1))}
310 311 312 313 314

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
315
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
316 317


318 319 320 321 322 323 324 325 326 327
class TestELU(OpTest):
    def setUp(self):
        self.op_type = "elu"
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
        alpha = 1.
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {
F
fengjiayi 已提交
328
            'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
329 330 331 332 333 334
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
335
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
336 337


Q
qijun 已提交
338 339 340 341
class TestReciprocal(OpTest):
    def setUp(self):
        self.op_type = "reciprocal"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
F
fengjiayi 已提交
342
        self.outputs = {'Out': np.reciprocal(self.inputs['X'])}
Q
qijun 已提交
343 344 345 346 347

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
348
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
349 350 351 352 353 354 355 356


class TestLog(OpTest):
    def setUp(self):
        self.op_type = "log"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
357
        self.outputs = {'Out': np.log(self.inputs['X'])}
Q
qijun 已提交
358 359 360 361 362

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
363
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
364 365 366 367 368 369 370 371


class TestSquare(OpTest):
    def setUp(self):
        self.op_type = "square"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
372
        self.outputs = {'Out': np.square(self.inputs['X'])}
Q
qijun 已提交
373 374 375 376 377

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
378
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
379 380


381 382 383 384
class TestPow(OpTest):
    def setUp(self):
        self.op_type = "pow"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
Y
Yang Yang(Tony) 已提交
385
        self.attrs = {'factor': 3.0}
F
fengjiayi 已提交
386
        self.outputs = {'Out': np.power(self.inputs['X'], 3)}
387 388 389 390 391

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
392
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
393 394 395 396 397 398 399 400 401 402 403


class TestSTanh(OpTest):
    def setUp(self):
        self.op_type = "stanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
F
fengjiayi 已提交
404
        self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)}
405 406 407 408

    def test_check_output(self):
        self.check_output()

Q
qijun 已提交
409
    def test_check_grad(self):
F
fengjiayi 已提交
410
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
411 412


K
kexinzhao 已提交
413 414 415 416
class TestSoftplus(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.inputs = {
Y
Yu Yang 已提交
417
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float64")
K
kexinzhao 已提交
418
        }
F
fengjiayi 已提交
419
        self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))}
K
kexinzhao 已提交
420 421 422 423 424

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
425
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
426 427


428 429 430 431 432 433 434
class TestSoftsign(OpTest):
    def setUp(self):
        self.op_type = "softsign"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
        self.outputs = {
F
fengjiayi 已提交
435
            'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X']))
436 437 438 439 440 441
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
442
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
443 444


445 446 447 448 449 450 451 452 453 454 455 456
class TestThresholdedRelu(OpTest):
    def setUp(self):
        self.op_type = "thresholded_relu"
        threshold = 0.25
        self.relative_error = 0.005
        X = np.random.uniform(-1, 1, [11, 17]).astype("float32")

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2

        self.inputs = {'X': X}
        self.attrs = {'threshold': threshold}
F
fengjiayi 已提交
457
        self.outputs = {'Out': (X > threshold) * X}
458 459 460 461 462

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
463
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
464 465


466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
class TestHardSigmoid(OpTest):
    def setUp(self):
        self.op_type = "hard_sigmoid"
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        self.inputs = {'X': X}
        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
F
fengjiayi 已提交
485
        self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))}
486 487 488 489 490

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
491
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
492 493


A
Abhinav Arora 已提交
494 495 496 497 498 499
class TestSwish(OpTest):
    def setUp(self):
        self.op_type = "swish"
        X = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        self.inputs = {'X': X}
        self.attrs = {'beta': 2.3}
F
fengjiayi 已提交
500
        self.outputs = {'Out': X * expit(self.attrs['beta'] * X)}
A
Abhinav Arora 已提交
501 502 503 504 505

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
506
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
507 508


509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
#--------------------test MKLDNN--------------------
class TestMKLDNNRelu(OpTest):
    def setUp(self):
        self.op_type = "relu"
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        self.inputs = {'X': x}
        self.outputs = {'Out': np.maximum(self.inputs['X'], 0)}
        self.use_mkldnn = True

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


class TestMKLDNNTanh(OpTest):
    def setUp(self):
        self.op_type = "tanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
        self.use_mkldnn = True

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


class TestMKLDNNSqrt(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
        self.use_mkldnn = True

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


class TestMKLDNNAbs(OpTest):
    def setUp(self):
        self.op_type = "abs"
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        self.inputs = {'X': x}
        self.outputs = {'Out': np.abs(self.inputs['X'])}
        self.use_mkldnn = True

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


Q
qijun 已提交
576 577
if __name__ == "__main__":
    unittest.main()