From d3352b99c2c054fca9b3ba38fa2e40e7e1bf8e12 Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Fri, 3 Mar 2023 23:17:56 +0800 Subject: [PATCH] add sigmoid composite rule (#50827) * add sigmoid composite rule * add python api * fix code style. * add check_prim=True * add sigmoid fp16 unit test. * fix code style. * rm bf16 check_prim * fix code style. --- .../tests/unittests/test_activation_op.py | 40 ++++++++++++++++++- .../incubate/autograd/composite_rules.py | 11 +++++ python/paddle/tensor/ops.py | 3 +- 3 files changed, 52 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index c7f08436da..a11d8fae5b 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -259,6 +259,9 @@ class TestParameter: class TestSigmoid(TestActivation): def setUp(self): self.op_type = "sigmoid" + self.prim_op_type = "comp" + self.enable_cinn = False + self.python_api = paddle.nn.functional.sigmoid self.init_dtype() self.init_shape() @@ -275,7 +278,7 @@ class TestSigmoid(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', max_relative_error=0.01) + self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True) class TestSigmoid_ZeroDim(TestSigmoid): @@ -283,12 +286,45 @@ class TestSigmoid_ZeroDim(TestSigmoid): self.shape = [] +class TestSigmoidFP16(TestActivation): + def setUp(self): + self.op_type = "sigmoid" + self.prim_op_type = "comp" + self.enable_cinn = False + self.only_prim = True + self.python_api = paddle.nn.functional.sigmoid + self.init_dtype() + self.init_shape() + + np.random.seed(1024) + x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) + out = 1 / (1 + np.exp(-x)) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def init_dtype(self): + self.dtype = np.float16 + + def test_check_grad(self): + self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True) + + def test_check_output(self): + check_eager = False + if hasattr(self, 'check_eager'): + check_eager = self.check_eager + self.check_output(check_eager=check_eager, check_prim=True) + + @unittest.skipIf( not core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestSigmoidBF16(OpTest): def setUp(self): self.op_type = "sigmoid" + self.prim_op_type = "comp" + self.enable_cinn = False + self.python_api = paddle.nn.functional.sigmoid self.init_dtype() self.init_shape() @@ -309,6 +345,7 @@ class TestSigmoidBF16(OpTest): def test_check_output(self): place = core.CUDAPlace(0) + # elementwise_pow can not support bfloat16, skip check_prim = True. self.check_output_with_place(place) def test_check_grad(self): @@ -3733,6 +3770,7 @@ def create_test_act_fp16_class( create_test_act_fp16_class(TestActivation) create_test_act_fp16_class(TestExpm1) create_test_act_fp16_class(TestSigmoid) +create_test_act_fp16_class(TestSigmoidFP16) create_test_act_fp16_class(TestSilu) create_test_act_fp16_class(TestSiluFP16) create_test_act_fp16_class(TestLogSigmoid) diff --git a/python/paddle/incubate/autograd/composite_rules.py b/python/paddle/incubate/autograd/composite_rules.py index 97dc9e183f..86a9cc9bbf 100644 --- a/python/paddle/incubate/autograd/composite_rules.py +++ b/python/paddle/incubate/autograd/composite_rules.py @@ -286,6 +286,17 @@ def hard_swish_composite(x): return res +@REGISTER_COMPOSITE('sigmoid') +def sigmoid_composite(x): + """ + define composite rule of op sigmoid + res = 1 / (1 + exp(-x)) + """ + sum_temp = 1 + exp(-x) + res = 1 / sum_temp + return res + + @REGISTER_COMPOSITE('silu') def silu_composite(x): """ diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index 9ea51d0bca..e83d04f3c0 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -569,6 +569,7 @@ def exp(x, name=None): 'float64', 'complex64', 'complex128', + 'uint16', ], 'exp', ) @@ -808,7 +809,7 @@ def sigmoid(x, name=None): return _C_ops.sigmoid(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'sigmoid' + x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'sigmoid' ) helper = LayerHelper('sigmoid', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) -- GitLab