未验证 提交 d3352b99 编写于 作者: Z zxcd 提交者: GitHub

add sigmoid composite rule (#50827)

* add sigmoid composite rule

* add python api

* fix code style.

* add check_prim=True

* add sigmoid fp16 unit test.

* fix code style.

* rm bf16 check_prim

* fix code style.
上级 90144a9a
......@@ -259,6 +259,9 @@ class TestParameter:
class TestSigmoid(TestActivation):
def setUp(self):
self.op_type = "sigmoid"
self.prim_op_type = "comp"
self.enable_cinn = False
self.python_api = paddle.nn.functional.sigmoid
self.init_dtype()
self.init_shape()
......@@ -275,7 +278,7 @@ class TestSigmoid(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.01)
self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
class TestSigmoid_ZeroDim(TestSigmoid):
......@@ -283,12 +286,45 @@ class TestSigmoid_ZeroDim(TestSigmoid):
self.shape = []
class TestSigmoidFP16(TestActivation):
def setUp(self):
self.op_type = "sigmoid"
self.prim_op_type = "comp"
self.enable_cinn = False
self.only_prim = True
self.python_api = paddle.nn.functional.sigmoid
self.init_dtype()
self.init_shape()
np.random.seed(1024)
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
out = 1 / (1 + np.exp(-x))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def init_dtype(self):
self.dtype = np.float16
def test_check_grad(self):
self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
def test_check_output(self):
check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_output(check_eager=check_eager, check_prim=True)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestSigmoidBF16(OpTest):
def setUp(self):
self.op_type = "sigmoid"
self.prim_op_type = "comp"
self.enable_cinn = False
self.python_api = paddle.nn.functional.sigmoid
self.init_dtype()
self.init_shape()
......@@ -309,6 +345,7 @@ class TestSigmoidBF16(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
# elementwise_pow can not support bfloat16, skip check_prim = True.
self.check_output_with_place(place)
def test_check_grad(self):
......@@ -3733,6 +3770,7 @@ def create_test_act_fp16_class(
create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestExpm1)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestSigmoidFP16)
create_test_act_fp16_class(TestSilu)
create_test_act_fp16_class(TestSiluFP16)
create_test_act_fp16_class(TestLogSigmoid)
......
......@@ -286,6 +286,17 @@ def hard_swish_composite(x):
return res
@REGISTER_COMPOSITE('sigmoid')
def sigmoid_composite(x):
"""
define composite rule of op sigmoid
res = 1 / (1 + exp(-x))
"""
sum_temp = 1 + exp(-x)
res = 1 / sum_temp
return res
@REGISTER_COMPOSITE('silu')
def silu_composite(x):
"""
......
......@@ -569,6 +569,7 @@ def exp(x, name=None):
'float64',
'complex64',
'complex128',
'uint16',
],
'exp',
)
......@@ -808,7 +809,7 @@ def sigmoid(x, name=None):
return _C_ops.sigmoid(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sigmoid'
x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'sigmoid'
)
helper = LayerHelper('sigmoid', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册