未验证 提交 83dd7e47 编写于 作者: H houj04 提交者: GitHub

update unittests for activation ops on xpu test=kunlun (#39677)

* update unittests for activation ops on xpu. test=kunlun

* update input data range. test=kunlun

* update input data range. test=kunlun
上级 45dd4a5f
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -14,32 +14,31 @@ ...@@ -14,32 +14,31 @@
from __future__ import print_function from __future__ import print_function
import sys
sys.path.append("..")
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core import sys
sys.path.append("..")
import paddle
from op_test import OpTest from op_test import OpTest
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from scipy.special import expit, erf from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle
import paddle.fluid as fluid
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.fluid import compiler, Program, program_guard
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU") class TestActivationOPBase(XPUOpTest):
class TestXPUActivation(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "exp" self.place = paddle.XPUPlace(0)
self.init_dtype() self.init_dtype()
self.init_kernel_type() self.set_case()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) def set_case(self):
out = np.exp(x) self.op_type = 'exp'
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.exp(x)
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
...@@ -48,79 +47,87 @@ class TestXPUActivation(XPUOpTest): ...@@ -48,79 +47,87 @@ class TestXPUActivation(XPUOpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place)
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def init_kernel_type(self): def test_check_grad(self):
pass self.check_grad_with_place(self.place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestExpOP(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestXPUSigmoid(TestXPUActivation): self.op_name = 'exp'
def setUp(self): self.use_dynamic_create_class = False
self.op_type = "sigmoid"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) class XPUTestExp(TestActivationOPBase):
out = 1 / (1 + np.exp(-x)) def set_case(self):
self.op_type = 'exp'
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.exp(x)
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('exp')
for stype in support_types:
create_test_class(globals(), XPUTestExpOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUTanh(TestXPUActivation):
def setUp(self):
self.op_type = "tanh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.tanh(x)
class XPUTestSigmoidOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'sigmoid'
self.use_dynamic_create_class = False
class XPUTestSigmoid(TestActivationOPBase):
def set_case(self):
self.op_type = "sigmoid"
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = 1 / (1 + np.exp(-x))
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('sigmoid')
for stype in support_types:
create_test_class(globals(), XPUTestSigmoidOP, stype)
class TestXPUTanhFP16(TestXPUActivation):
def setUp(self): class XPUTestTanhOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'tanh'
self.use_dynamic_create_class = False
class XPUTestTanh(TestActivationOPBase):
def set_case(self):
self.op_type = "tanh" self.op_type = "tanh"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.tanh(x)
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.tanh(x)
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def init_dtype(self):
self.dtype = np.float16
def test_check_grad(self): support_types = get_xpu_op_support_types('tanh')
if paddle.is_compiled_with_xpu(): for stype in support_types:
place = paddle.XPUPlace(0) create_test_class(globals(), XPUTestTanhOP, stype)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestSqrtOP(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestXPUSqrt(TestXPUActivation): self.op_name = 'sqrt'
def setUp(self): self.use_dynamic_create_class = False
class XPUTestSqrt(TestActivationOPBase):
def set_case(self):
self.op_type = "sqrt" self.op_type = "sqrt"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.sqrt(x) out = np.sqrt(x)
...@@ -129,18 +136,21 @@ class TestXPUSqrt(TestXPUActivation): ...@@ -129,18 +136,21 @@ class TestXPUSqrt(TestXPUActivation):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('sqrt')
for stype in support_types:
create_test_class(globals(), XPUTestSqrtOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU") class XPUTestAbsOP(XPUOpTestWrapper):
class TestXPUAbs(TestXPUActivation): def __init__(self):
def setUp(self): self.op_name = 'abs'
self.use_dynamic_create_class = False
class XPUTestAbs(TestActivationOPBase):
def set_case(self):
self.op_type = "abs" self.op_type = "abs"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype) x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
# Because we set delta = 0.005 in calculating numeric gradient, # Because we set delta = 0.005 in calculating numeric gradient,
...@@ -154,18 +164,21 @@ class TestXPUAbs(TestXPUActivation): ...@@ -154,18 +164,21 @@ class TestXPUAbs(TestXPUActivation):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('abs')
for stype in support_types:
create_test_class(globals(), XPUTestAbsOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU") class XPUTestReluOP(XPUOpTestWrapper):
class TestXPURelu(TestXPUActivation): def __init__(self):
def setUp(self): self.op_name = 'relu'
self.use_dynamic_create_class = False
class XPUTestRelu(TestActivationOPBase):
def set_case(self):
self.op_type = "relu" self.op_type = "relu"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
# The same reason with TestAbs # The same reason with TestAbs
...@@ -176,36 +189,22 @@ class TestXPURelu(TestXPUActivation): ...@@ -176,36 +189,22 @@ class TestXPURelu(TestXPUActivation):
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUGelu(TestXPUActivation):
def setUp(self):
self.op_type = "gelu"
self.init_dtype()
approximate = False
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = gelu(x, approximate)
self.inputs = {'X': x} support_types = get_xpu_op_support_types('relu')
self.outputs = {'Out': out} for stype in support_types:
self.attrs = {"approximate": approximate, 'use_xpu': True} create_test_class(globals(), XPUTestReluOP, stype)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
class XPUTestGeluOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'gelu'
self.use_dynamic_create_class = False
class TestXPUGelu(TestXPUActivation): class XPUTestGelu(TestActivationOPBase):
def setUp(self): def set_case(self):
self.op_type = "gelu" self.op_type = "gelu"
self.init_dtype() self.dtype = self.in_type
approximate = False approximate = False
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = gelu(x, approximate) out = gelu(x, approximate)
...@@ -214,16 +213,14 @@ class TestXPUGelu(TestXPUActivation): ...@@ -214,16 +213,14 @@ class TestXPUGelu(TestXPUActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
self.attrs = {"approximate": approximate, 'use_xpu': True} self.attrs = {"approximate": approximate, 'use_xpu': True}
def init_dtype(self):
self.dtype = np.float16
def test_check_grad(self): support_types = get_xpu_op_support_types('gelu')
if paddle.is_compiled_with_xpu(): for stype in support_types:
place = paddle.XPUPlace(0) create_test_class(globals(), XPUTestGeluOP, stype)
self.check_grad_with_place(place, ['X'], 'Out')
def gelu(x, approximate): def gelu(x, approximate):
from scipy.special import erf
if approximate: if approximate:
y_ref = 0.5 * x * (1.0 + np.tanh( y_ref = 0.5 * x * (1.0 + np.tanh(
np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
...@@ -232,12 +229,16 @@ def gelu(x, approximate): ...@@ -232,12 +229,16 @@ def gelu(x, approximate):
return y_ref.astype(x.dtype) return y_ref.astype(x.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestHardSwishGeluOP(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestXPUHardSwish(TestXPUActivation): self.op_name = 'hard_swish'
def setUp(self): self.use_dynamic_create_class = False
class XPUTestHardSwish(TestActivationOPBase):
def set_case(self):
self.op_type = "hard_swish" self.op_type = "hard_swish"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
offset = 3.0 offset = 3.0
threshold = 6.0 threshold = 6.0
...@@ -248,10 +249,10 @@ class TestXPUHardSwish(TestXPUActivation): ...@@ -248,10 +249,10 @@ class TestXPUHardSwish(TestXPUActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
def test_check_grad(self):
if paddle.is_compiled_with_xpu(): support_types = get_xpu_op_support_types('hard_swish')
place = paddle.XPUPlace(0) for stype in support_types:
self.check_grad_with_place(place, ['X'], 'Out') create_test_class(globals(), XPUTestHardSwishOP, stype)
def hard_swish(x, offset, threshold, scale): def hard_swish(x, offset, threshold, scale):
...@@ -259,12 +260,15 @@ def hard_swish(x, offset, threshold, scale): ...@@ -259,12 +260,15 @@ def hard_swish(x, offset, threshold, scale):
return y_ref.astype(x.dtype) return y_ref.astype(x.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestLogOP(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestXPULog(TestXPUActivation): self.op_name = 'log'
def setUp(self): self.use_dynamic_create_class = False
class XPUTestLog(TestActivationOPBase):
def set_case(self):
self.op_type = "log" self.op_type = "log"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.log(x) out = np.log(x)
...@@ -274,32 +278,43 @@ class TestXPULog(TestXPUActivation): ...@@ -274,32 +278,43 @@ class TestXPULog(TestXPUActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), support_types = get_xpu_op_support_types('log')
"core is not compiled with XPU") for stype in support_types:
class TestXPUSquare(TestXPUActivation): create_test_class(globals(), XPUTestLogOP, stype)
def setUp(self):
class XPUTestSquareOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'square'
self.use_dynamic_create_class = False
class XPUTestSquare(TestActivationOPBase):
def set_case(self):
self.op_type = "square" self.op_type = "square"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.square(x) out = np.square(x)
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('square')
for stype in support_types:
create_test_class(globals(), XPUTestSquareOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU") class XPUTestPowOP(XPUOpTestWrapper):
class TestXPUPow(TestXPUActivation): def __init__(self):
def setUp(self): self.op_name = 'pow'
self.use_dynamic_create_class = False
class XPUTestPow(TestActivationOPBase):
def set_case(self):
self.op_type = "pow" self.op_type = "pow"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
out = np.power(x, 3) out = np.power(x, 3)
...@@ -309,12 +324,21 @@ class TestXPUPow(TestXPUActivation): ...@@ -309,12 +324,21 @@ class TestXPUPow(TestXPUActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), support_types = get_xpu_op_support_types('pow')
"core is not compiled with XPU") for stype in support_types:
class TestXPULeakyRelu(TestXPUActivation): create_test_class(globals(), XPUTestPowOP, stype)
def setUp(self):
class XPUTestLeakyReluOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'leaky_relu'
self.use_dynamic_create_class = False
class XPUTestLeakyRelu(TestActivationOPBase):
def set_case(self):
self.op_type = "leaky_relu" self.op_type = "leaky_relu"
self.init_dtype() self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
alpha = np.random.uniform( alpha = np.random.uniform(
0, 0,
...@@ -325,10 +349,10 @@ class TestXPULeakyRelu(TestXPUActivation): ...@@ -325,10 +349,10 @@ class TestXPULeakyRelu(TestXPUActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
self.attrs = {'use_xpu': True, 'alpha': alpha} self.attrs = {'use_xpu': True, 'alpha': alpha}
def test_check_grad(self):
if paddle.is_compiled_with_xpu(): support_types = get_xpu_op_support_types('leaky_relu')
place = paddle.XPUPlace(0) for stype in support_types:
self.check_grad_with_place(place, ['X'], 'Out') create_test_class(globals(), XPUTestLeakyReluOP, stype)
def leaky_relu(x, alpha): def leaky_relu(x, alpha):
...@@ -339,10 +363,15 @@ def leaky_relu(x, alpha): ...@@ -339,10 +363,15 @@ def leaky_relu(x, alpha):
return y_ref.astype(x.dtype) return y_ref.astype(x.dtype)
class TestXPUReciprocal(TestXPUActivation): class XPUTestReciprocalOP(XPUOpTestWrapper):
def setUp(self): def __init__(self):
self.op_name = 'reciprocal'
self.use_dynamic_create_class = False
class XPUTestRecipocal(TestActivationOPBase):
def set_case(self):
self.op_type = "reciprocal" self.op_type = "reciprocal"
self.init_dtype() self.dtype = self.in_type
np.random.seed(1024) np.random.seed(1024)
x = np.random.uniform(1, 2, [1111, 1117]).astype(self.dtype) x = np.random.uniform(1, 2, [1111, 1117]).astype(self.dtype)
...@@ -352,20 +381,23 @@ class TestXPUReciprocal(TestXPUActivation): ...@@ -352,20 +381,23 @@ class TestXPUReciprocal(TestXPUActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('reciprocal')
for stype in support_types:
create_test_class(globals(), XPUTestReciprocalOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU") class XPUTestSoftPlusOP(XPUOpTestWrapper):
class TestXPUSoftPlus(TestXPUActivation): def __init__(self):
def setUp(self): self.op_name = 'softplus'
self.use_dynamic_create_class = False
class XPUTestSoftPlusBase(TestActivationOPBase):
def set_case(self):
self.op_type = "softplus" self.op_type = "softplus"
self.init_dtype() self.dtype = self.in_type
self.init_config()
self.init_config()
beta = np.random.uniform(0, 1) beta = np.random.uniform(0, 1)
threshold = np.random.uniform(0, 1) threshold = np.random.uniform(0, 1)
out = ref_softplus(self.x, beta, threshold) out = ref_softplus(self.x, beta, threshold)
...@@ -377,31 +409,24 @@ class TestXPUSoftPlus(TestXPUActivation): ...@@ -377,31 +409,24 @@ class TestXPUSoftPlus(TestXPUActivation):
def init_config(self): def init_config(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
def test_check_grad(self): class XPUTestSoftPlus2(XPUTestSoftPlusBase):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSoftPlus2(TestXPUSoftPlus):
def init_config(self): def init_config(self):
self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype) self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype)
class XPUTestSoftPlus3(XPUTestSoftPlusBase):
def init_config(self):
self.x = np.random.uniform(-2, 2,
[4, 512, 15, 15]).astype(self.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestSoftPlus4(XPUTestSoftPlusBase):
"core is not compiled with XPU")
class TestXPUSoftPlus3(TestXPUSoftPlus):
def init_config(self): def init_config(self):
self.x = np.random.uniform(-2, 2, [4, 512, 15, 15]).astype(self.dtype) self.x = np.random.uniform(-2, 2,
[4, 256, 22, 22]).astype(self.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), support_types = get_xpu_op_support_types('softplus')
"core is not compiled with XPU") for stype in support_types:
class TestXPUSoftPlus4(TestXPUSoftPlus): create_test_class(globals(), XPUTestSoftPlusOP, stype)
def init_config(self):
self.x = np.random.uniform(-2, 2, [4, 256, 22, 22]).astype(self.dtype)
def ref_softplus(x, beta=1, threshold=20): def ref_softplus(x, beta=1, threshold=20):
...@@ -412,5 +437,4 @@ def ref_softplus(x, beta=1, threshold=20): ...@@ -412,5 +437,4 @@ def ref_softplus(x, beta=1, threshold=20):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册