diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index c7f08436dab449d7136c8b8c7f264d13dda07482..a11d8fae5b4cde9a863e84fbc6876eba223e0437 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -259,6 +259,9 @@ class TestParameter: class TestSigmoid(TestActivation): def setUp(self): self.op_type = "sigmoid" + self.prim_op_type = "comp" + self.enable_cinn = False + self.python_api = paddle.nn.functional.sigmoid self.init_dtype() self.init_shape() @@ -275,7 +278,7 @@ class TestSigmoid(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', max_relative_error=0.01) + self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True) class TestSigmoid_ZeroDim(TestSigmoid): @@ -283,12 +286,45 @@ class TestSigmoid_ZeroDim(TestSigmoid): self.shape = [] +class TestSigmoidFP16(TestActivation): + def setUp(self): + self.op_type = "sigmoid" + self.prim_op_type = "comp" + self.enable_cinn = False + self.only_prim = True + self.python_api = paddle.nn.functional.sigmoid + self.init_dtype() + self.init_shape() + + np.random.seed(1024) + x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) + out = 1 / (1 + np.exp(-x)) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def init_dtype(self): + self.dtype = np.float16 + + def test_check_grad(self): + self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True) + + def test_check_output(self): + check_eager = False + if hasattr(self, 'check_eager'): + check_eager = self.check_eager + self.check_output(check_eager=check_eager, check_prim=True) + + @unittest.skipIf( not core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestSigmoidBF16(OpTest): def setUp(self): self.op_type = "sigmoid" + self.prim_op_type = "comp" + self.enable_cinn = False + self.python_api = paddle.nn.functional.sigmoid self.init_dtype() self.init_shape() @@ -309,6 +345,7 @@ class TestSigmoidBF16(OpTest): def test_check_output(self): place = core.CUDAPlace(0) + # elementwise_pow can not support bfloat16, skip check_prim = True. self.check_output_with_place(place) def test_check_grad(self): @@ -3733,6 +3770,7 @@ def create_test_act_fp16_class( create_test_act_fp16_class(TestActivation) create_test_act_fp16_class(TestExpm1) create_test_act_fp16_class(TestSigmoid) +create_test_act_fp16_class(TestSigmoidFP16) create_test_act_fp16_class(TestSilu) create_test_act_fp16_class(TestSiluFP16) create_test_act_fp16_class(TestLogSigmoid) diff --git a/python/paddle/incubate/autograd/composite_rules.py b/python/paddle/incubate/autograd/composite_rules.py index 97dc9e183f32c77dba7d78409f84975395042fef..86a9cc9bbfaf424c7cfb545dbb91330879a7da5b 100644 --- a/python/paddle/incubate/autograd/composite_rules.py +++ b/python/paddle/incubate/autograd/composite_rules.py @@ -286,6 +286,17 @@ def hard_swish_composite(x): return res +@REGISTER_COMPOSITE('sigmoid') +def sigmoid_composite(x): + """ + define composite rule of op sigmoid + res = 1 / (1 + exp(-x)) + """ + sum_temp = 1 + exp(-x) + res = 1 / sum_temp + return res + + @REGISTER_COMPOSITE('silu') def silu_composite(x): """ diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index 9ea51d0bca628fb9e483507e2492c67a9a8fb7d5..e83d04f3c02684d00764afd24cde90a501321918 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -569,6 +569,7 @@ def exp(x, name=None): 'float64', 'complex64', 'complex128', + 'uint16', ], 'exp', ) @@ -808,7 +809,7 @@ def sigmoid(x, name=None): return _C_ops.sigmoid(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'sigmoid' + x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'sigmoid' ) helper = LayerHelper('sigmoid', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype)