test_activation_op.py 17.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15 16
import unittest
import numpy as np
K
Kexin Zhao 已提交
17
import paddle.fluid.core as core
Q
qijun 已提交
18
from op_test import OpTest
A
Abhinav Arora 已提交
19
from scipy.special import expit
Q
qijun 已提交
20 21 22 23 24 25 26 27


class TestExp(OpTest):
    def setUp(self):
        self.op_type = "exp"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
28
        self.outputs = {'Out': np.exp(self.inputs['X'])}
Q
qijun 已提交
29 30 31 32 33

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
34
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
35 36 37 38 39 40 41 42


class TestSigmoid(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
43
        self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))}
Q
qijun 已提交
44 45 46 47

    def test_check_output(self):
        self.check_output()

48
    def test_check_grad(self):
F
fengjiayi 已提交
49
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
50 51


52 53 54 55 56 57
class TestLogSigmoid(OpTest):
    def setUp(self):
        self.op_type = "logsigmoid"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
58
        self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))}
59 60 61 62 63

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
64
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
65 66


67 68 69 70 71 72
class TestTanh(OpTest):
    def setUp(self):
        self.op_type = "tanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
73
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
74 75 76 77 78

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
79
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
80 81


K
Kavya Srinet 已提交
82 83 84 85 86 87
class TestTanhShrink(OpTest):
    def setUp(self):
        self.op_type = "tanh_shrink"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32")
        }
F
fengjiayi 已提交
88
        self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])}
K
Kavya Srinet 已提交
89 90 91 92 93

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
94
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
95 96


97 98 99 100 101 102 103 104 105 106 107
class TestHardShrink(OpTest):
    def setUp(self):
        self.op_type = "hard_shrink"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        threshold = 0.5

        self.inputs = {'X': x}
        self.attrs = {'lambda': threshold}

        t = np.copy(x)
        t[(t >= -threshold) & (t <= threshold)] = 0
F
fengjiayi 已提交
108
        self.outputs = {'Out': t}
109 110 111 112 113

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
114
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
115 116


117 118 119 120 121 122 123 124 125 126 127
class TestSoftShrink(OpTest):
    def setUp(self):
        self.op_type = "softshrink"
        lambda_val = 0.1
        self.attrs = {'lambda': lambda_val}
        self.inputs = {
            'X': np.random.uniform(0.25, 10, [4, 4]).astype("float32")
        }
        y = np.copy(self.inputs['X'])
        y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * (
            y - lambda_val)
F
fengjiayi 已提交
128
        self.outputs = {'Out': y}
129 130 131 132 133

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
135 136


137 138 139 140 141 142
class TestSqrt(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
143
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
144 145 146 147 148

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
149
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
150 151 152 153 154


class TestAbs(OpTest):
    def setUp(self):
        self.op_type = "abs"
Q
qijun 已提交
155 156 157 158 159 160
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        # Because we set delta = 0.005 in caculating numeric gradient,
        # if x is too small, such as 0.002, x_neg will be -0.003
        # x_pos will be 0.007, so the numeric gradient is unaccurate.
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
161
        self.inputs = {'X': x}
F
fengjiayi 已提交
162
        self.outputs = {'Out': np.abs(self.inputs['X'])}
163 164 165 166 167

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
168
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
169 170


D
dzhwinter 已提交
171 172 173 174 175
class TestCeil(OpTest):
    def setUp(self):
        self.op_type = "ceil"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
176
        self.outputs = {'Out': np.ceil(self.inputs['X'])}
D
dzhwinter 已提交
177 178 179 180 181

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
182
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
183 184 185 186 187 188 189


class TestFloor(OpTest):
    def setUp(self):
        self.op_type = "floor"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
Q
Qiao Longfei 已提交
190
        self.outputs = {'Out': np.floor(self.inputs['X'])}
D
dzhwinter 已提交
191 192 193 194 195

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
196
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
197 198


C
add cos  
chengduoZH 已提交
199 200 201 202 203
class TestCos(OpTest):
    def setUp(self):
        self.op_type = "cos"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
C
add sin  
chengduoZH 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
        self.outputs = {'Out': np.cos(self.inputs['X'])}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


class TestSin(OpTest):
    def setUp(self):
        self.op_type = "sin"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
        self.outputs = {'Out': np.sin(self.inputs['X'])}
C
add cos  
chengduoZH 已提交
219 220 221 222 223 224 225 226

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


D
dzhwinter 已提交
227 228 229 230 231
class TestRound(OpTest):
    def setUp(self):
        self.op_type = "round"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
        self.inputs = {'X': x}
F
fengjiayi 已提交
232
        self.outputs = {'Out': np.round(self.inputs['X'])}
D
dzhwinter 已提交
233 234 235 236 237

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
238
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
239 240


Q
qijun 已提交
241
class TestRelu(OpTest):
242
    def setUp(self):
Q
qijun 已提交
243
        self.op_type = "relu"
K
Kexin Zhao 已提交
244 245 246 247
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
248 249
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
250 251 252 253
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
254 255 256 257 258

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
K
Kexin Zhao 已提交
259 260
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
261
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
262

K
Kexin Zhao 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276
    def init_dtype(self):
        pass


class TestFP16Relu(TestRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

277 278 279 280 281

class TestBRelu(OpTest):
    def setUp(self):
        self.op_type = "brelu"
        x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
282 283
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
284 285
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
286
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
Q
qijun 已提交
287 288

        self.inputs = {'X': x}
289 290 291 292
        self.attrs = {'t_min': t_min, 't_max': t_max}
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
F
fengjiayi 已提交
293
        self.outputs = {'Out': t}
294 295 296 297 298

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
299
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
300 301


302
class TestRelu6(OpTest):
K
Kavya Srinet 已提交
303
    def setUp(self):
304 305 306 307 308 309 310 311 312
        self.op_type = "relu6"
        x = np.random.uniform(-1, 1, [4, 10]).astype("float32")
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02

        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
K
Kavya Srinet 已提交
313
        self.outputs = {
F
fengjiayi 已提交
314
            'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold)
K
Kavya Srinet 已提交
315 316 317 318 319 320
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
321
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
K
Kavya Srinet 已提交
322 323


324 325 326
class TestSoftRelu(OpTest):
    def setUp(self):
        self.op_type = "soft_relu"
Q
qijun 已提交
327
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
Y
Yang Yang(Tony) 已提交
328
        threshold = 2.0
Q
qijun 已提交
329 330 331
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
332 333 334 335 336
        self.inputs = {'X': x}
        self.attrs = {'threshold': threshold}
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
F
fengjiayi 已提交
337
        self.outputs = {'Out': np.log((np.exp(t) + 1))}
338 339 340 341 342

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
343
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
344 345


346 347 348 349 350 351 352 353 354 355
class TestELU(OpTest):
    def setUp(self):
        self.op_type = "elu"
        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
        alpha = 1.
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {
F
fengjiayi 已提交
356
            'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
357 358 359 360 361 362
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
363
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
364 365


Q
qijun 已提交
366 367 368 369
class TestReciprocal(OpTest):
    def setUp(self):
        self.op_type = "reciprocal"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
F
fengjiayi 已提交
370
        self.outputs = {'Out': np.reciprocal(self.inputs['X'])}
Q
qijun 已提交
371 372 373 374 375

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
376
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
377 378 379 380 381 382 383 384


class TestLog(OpTest):
    def setUp(self):
        self.op_type = "log"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
385
        self.outputs = {'Out': np.log(self.inputs['X'])}
Q
qijun 已提交
386 387 388 389 390

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
391
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
392 393 394 395 396 397 398 399


class TestSquare(OpTest):
    def setUp(self):
        self.op_type = "square"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
F
fengjiayi 已提交
400
        self.outputs = {'Out': np.square(self.inputs['X'])}
Q
qijun 已提交
401 402 403 404 405

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
406
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
407 408


409 410 411 412
class TestPow(OpTest):
    def setUp(self):
        self.op_type = "pow"
        self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
Y
Yang Yang(Tony) 已提交
413
        self.attrs = {'factor': 3.0}
F
fengjiayi 已提交
414
        self.outputs = {'Out': np.power(self.inputs['X'], 3)}
415 416 417 418 419

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
420
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
421 422 423 424 425 426 427 428 429 430 431


class TestSTanh(OpTest):
    def setUp(self):
        self.op_type = "stanh"
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        }
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
F
fengjiayi 已提交
432
        self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)}
433 434 435 436

    def test_check_output(self):
        self.check_output()

Q
qijun 已提交
437
    def test_check_grad(self):
F
fengjiayi 已提交
438
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
439 440


K
kexinzhao 已提交
441 442 443 444
class TestSoftplus(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.inputs = {
Y
Yu Yang 已提交
445
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float64")
K
kexinzhao 已提交
446
        }
F
fengjiayi 已提交
447
        self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))}
K
kexinzhao 已提交
448 449 450 451 452

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
453
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
454 455


456 457 458 459 460 461 462
class TestSoftsign(OpTest):
    def setUp(self):
        self.op_type = "softsign"
        self.inputs = {
            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
        }
        self.outputs = {
F
fengjiayi 已提交
463
            'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X']))
464 465 466 467 468 469
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
470
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
471 472


473 474 475 476 477 478 479 480 481 482 483 484
class TestThresholdedRelu(OpTest):
    def setUp(self):
        self.op_type = "thresholded_relu"
        threshold = 0.25
        self.relative_error = 0.005
        X = np.random.uniform(-1, 1, [11, 17]).astype("float32")

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2

        self.inputs = {'X': X}
        self.attrs = {'threshold': threshold}
F
fengjiayi 已提交
485
        self.outputs = {'Out': (X > threshold) * X}
486 487 488 489 490

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
491
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
492 493


494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
class TestHardSigmoid(OpTest):
    def setUp(self):
        self.op_type = "hard_sigmoid"
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        self.inputs = {'X': X}
        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
F
fengjiayi 已提交
513
        self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))}
514 515 516 517 518

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
519
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
520 521


A
Abhinav Arora 已提交
522 523 524 525 526 527
class TestSwish(OpTest):
    def setUp(self):
        self.op_type = "swish"
        X = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
        self.inputs = {'X': X}
        self.attrs = {'beta': 2.3}
F
fengjiayi 已提交
528
        self.outputs = {'Out': X * expit(self.attrs['beta'] * X)}
A
Abhinav Arora 已提交
529 530 531 532 533

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
F
fengjiayi 已提交
534
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
535 536


537
#--------------------test MKLDNN--------------------
538
class TestMKLDNNReluDim2(TestRelu):
539
    def setUp(self):
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
        super(TestMKLDNNReluDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNTanhDim2(TestTanh):
    def setUp(self):
        super(TestMKLDNNTanhDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNSqrtDim2(TestSqrt):
    def setUp(self):
        super(TestMKLDNNSqrtDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNAbsDim2(TestAbs):
    def setUp(self):
        super(TestMKLDNNAbsDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNReluDim4(TestRelu):
    def setUp(self):
        super(TestMKLDNNReluDim4, self).setUp()
569

570 571 572
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
573
        out = np.maximum(x, 0)
574

575 576 577
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}
578 579


580
class TestMKLDNNTanhDim4(TestTanh):
581
    def setUp(self):
582
        super(TestMKLDNNTanhDim4, self).setUp()
583

584 585 586 587
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
K
Krzysztof Binias 已提交
588
        self.attrs = {"use_mkldnn": True}
589 590


591
class TestMKLDNNSqrtDim4(TestSqrt):
592
    def setUp(self):
593
        super(TestMKLDNNSqrtDim4, self).setUp()
594

595 596 597 598
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
K
Krzysztof Binias 已提交
599
        self.attrs = {"use_mkldnn": True}
600 601


602
class TestMKLDNNAbsDim4(TestAbs):
603
    def setUp(self):
604
        super(TestMKLDNNAbsDim4, self).setUp()
605

606 607 608 609 610
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        self.inputs = {'X': x}
        self.outputs = {'Out': np.abs(self.inputs['X'])}
K
Krzysztof Binias 已提交
611
        self.attrs = {"use_mkldnn": True}
612 613


Q
qijun 已提交
614 615
if __name__ == "__main__":
    unittest.main()