未验证 提交 f23665e5 编写于 作者: H hong19860320 提交者: GitHub

Refine the doc and unit test for Sigmoid and stanh (#29198)

上级 b5c63423
...@@ -9520,36 +9520,35 @@ def pow(x, factor=1.0, name=None): ...@@ -9520,36 +9520,35 @@ def pow(x, factor=1.0, name=None):
@templatedoc() @templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
""" """
stanh activation.
${comment} .. math::
Args:
x(${x_type}): ${x_comment} out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}
scale_a(${scale_a_type}|2.0 / 3.0): ${scale_a_comment}
scale_b(${scale_b_type}|1.7159): ${scale_b_comment} Parameters:
name(str|None): A name for this layer(optional). If set None, the layer x (Tensor): The input Tensor with data type float32, float64.
will be named automatically. scale_a (float, optional): The scale factor a of the input. Default is 0.67.
scale_b (float, optional): The scale factor b of the output. Default is 1.7159.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
output(Tensor): ${out_comment}. A Tensor with the same data type and shape as ``x`` .
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
data = paddle.rand(shape=[3, 3], dtype='float32') x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
output = paddle.stanh(data, scale_a=0.67, scale_b=1.72) out = paddle.stanh(x, scale_a=0.67, scale_b=1.72) # [1.00616539, 1.49927628, 1.65933108, 1.70390463]
print(data)
# [[0.19412413, 0.66871136, 0.77059180],
# [0.89738929, 0.35827777, 0.60592669],
# [0.66346580, 0.78424633, 0.46533889]]
print(output)
# [[0.22245567, 0.72288811, 0.81671900],
# [0.92525512, 0.40512756, 0.66227961],
# [0.71790355, 0.82885355, 0.51953089]]
""" """
if in_dygraph_mode():
return core.ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
helper = LayerHelper('stanh', **locals()) helper = LayerHelper('stanh', **locals())
......
...@@ -1906,18 +1906,30 @@ class TestPow_factor_tensor(TestActivation): ...@@ -1906,18 +1906,30 @@ class TestPow_factor_tensor(TestActivation):
self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1) self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
out = scale_b * np.tanh(x * scale_a)
return out
class TestSTanh(TestActivation): class TestSTanh(TestActivation):
def get_scale_a(self):
return 0.67
def get_scale_b(self):
return 1.7159
def setUp(self): def setUp(self):
self.op_type = "stanh" self.op_type = "stanh"
self.init_dtype() self.init_dtype()
scale_a = self.get_scale_a()
scale_b = self.get_scale_b()
np.random.seed(1024) np.random.seed(1024)
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
scale_a = 2.0 / 3.0 # The same reason with TestAbs
scale_b = 1.7159 out = ref_stanh(x, scale_a, scale_b)
out = scale_b * np.tanh(x * scale_a)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': x}
self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
self.outputs = {'Out': out} self.outputs = {'Out': out}
...@@ -1927,17 +1939,85 @@ class TestSTanh(TestActivation): ...@@ -1927,17 +1939,85 @@ class TestSTanh(TestActivation):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestSTanhOpError(unittest.TestCase): class TestSTanhScaleA(TestSTanh):
def get_scale_a(self):
return 2.0
class TestSTanhScaleB(TestSTanh):
def get_scale_b(self):
return 0.5
class TestSTanhAPI(unittest.TestCase):
# test paddle.nn.stanh
def get_scale_a(self):
return 0.67
def get_scale_b(self):
return 1.7159
def setUp(self):
np.random.seed(1024)
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
self.scale_a = self.get_scale_a()
self.scale_b = self.get_scale_b()
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out = paddle.stanh(x, self.scale_a, self.scale_b)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.stanh(x, self.scale_a, self.scale_b)
out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
for r in [out]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [10, 12])
out = fluid.layers.stanh(x, self.scale_a, self.scale_b)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
self.assertEqual(np.allclose(out_ref, res[0]), True)
def test_errors(self): def test_errors(self):
with program_guard(Program()): paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, fluid.layers.stanh, 1) self.assertRaises(TypeError, paddle.stanh, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(
self.assertRaises(TypeError, fluid.layers.stanh, x_int32) name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, paddle.stanh, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(
fluid.layers.stanh(x_fp16) name='x_fp16', shape=[12, 10], dtype='float16')
paddle.stanh(x_fp16)
class TestSTanhAPIScaleA(TestSTanhAPI):
def get_scale_a(self):
return 2.0
class TestSTanhAPIScaleB(TestSTanhAPI):
def get_scale_b(self):
return 0.5
def ref_softplus(x, beta=1, threshold=20): def ref_softplus(x, beta=1, threshold=20):
......
...@@ -536,7 +536,7 @@ class Sigmoid(layers.Layer): ...@@ -536,7 +536,7 @@ class Sigmoid(layers.Layer):
.. math:: .. math::
Sigmoid(x) = \frac{1}{1 + e^{-x}} Sigmoid(x) = \\frac{1}{1 + e^{-x}}
Parameters: Parameters:
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
...@@ -551,15 +551,11 @@ class Sigmoid(layers.Layer): ...@@ -551,15 +551,11 @@ class Sigmoid(layers.Layer):
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
paddle.disable_static()
input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32')
m = paddle.nn.Sigmoid() m = paddle.nn.Sigmoid()
x = paddle.to_tensor(input_data) x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
output = m(x) out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
""" """
def __init__(self, name=None): def __init__(self, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册