Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
f23665e5
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f23665e5
编写于
11月 30, 2020
作者:
H
hong19860320
提交者:
GitHub
11月 30, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refine the doc and unit test for Sigmoid and stanh (#29198)
上级
b5c63423
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
112 addition
and
37 deletion
+112
-37
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+18
-19
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+91
-11
python/paddle/nn/layer/activation.py
python/paddle/nn/layer/activation.py
+3
-7
未找到文件。
python/paddle/fluid/layers/nn.py
浏览文件 @
f23665e5
...
...
@@ -9520,36 +9520,35 @@ def pow(x, factor=1.0, name=None):
@templatedoc()
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
stanh activation.
${comment}
Args:
x(${x_type}): ${x_comment}
scale_a(${scale_a_type}|2.0 / 3.0): ${scale_a_comment}
scale_b(${scale_b_type}|1.7159): ${scale_b_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
.. math::
out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
scale_a (float, optional): The scale factor a of the input. Default is 0.67.
scale_b (float, optional): The scale factor b of the output. Default is 1.7159.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
output(Tensor): ${out_comment}
.
A Tensor with the same data type and shape as ``x``
.
Examples:
.. code-block:: python
import paddle
data = paddle.rand(shape=[3, 3], dtype='float32')
output = paddle.stanh(data, scale_a=0.67, scale_b=1.72)
print(data)
# [[0.19412413, 0.66871136, 0.77059180],
# [0.89738929, 0.35827777, 0.60592669],
# [0.66346580, 0.78424633, 0.46533889]]
print(output)
# [[0.22245567, 0.72288811, 0.81671900],
# [0.92525512, 0.40512756, 0.66227961],
# [0.71790355, 0.82885355, 0.51953089]]
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = paddle.stanh(x, scale_a=0.67, scale_b=1.72) # [1.00616539, 1.49927628, 1.65933108, 1.70390463]
"""
if in_dygraph_mode():
return core.ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
helper = LayerHelper('stanh', **locals())
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
f23665e5
...
...
@@ -1906,18 +1906,30 @@ class TestPow_factor_tensor(TestActivation):
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
pow
,
x
=
in4
,
factor
=
factor_1
)
def
ref_stanh
(
x
,
scale_a
=
0.67
,
scale_b
=
1.7159
):
out
=
scale_b
*
np
.
tanh
(
x
*
scale_a
)
return
out
class
TestSTanh
(
TestActivation
):
def
get_scale_a
(
self
):
return
0.67
def
get_scale_b
(
self
):
return
1.7159
def
setUp
(
self
):
self
.
op_type
=
"stanh"
self
.
init_dtype
()
scale_a
=
self
.
get_scale_a
()
scale_b
=
self
.
get_scale_b
()
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
scale_a
=
2.0
/
3.0
scale_b
=
1.7159
out
=
scale_b
*
np
.
tanh
(
x
*
scale_a
)
# The same reason with TestAbs
out
=
ref_stanh
(
x
,
scale_a
,
scale_b
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)
}
self
.
inputs
=
{
'X'
:
x
}
self
.
attrs
=
{
'scale_a'
:
scale_a
,
'scale_b'
:
scale_b
}
self
.
outputs
=
{
'Out'
:
out
}
...
...
@@ -1927,17 +1939,85 @@ class TestSTanh(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSTanhOpError
(
unittest
.
TestCase
):
class
TestSTanhScaleA
(
TestSTanh
):
def
get_scale_a
(
self
):
return
2.0
class
TestSTanhScaleB
(
TestSTanh
):
def
get_scale_b
(
self
):
return
0.5
class
TestSTanhAPI
(
unittest
.
TestCase
):
# test paddle.nn.stanh
def
get_scale_a
(
self
):
return
0.67
def
get_scale_b
(
self
):
return
1.7159
def
setUp
(
self
):
np
.
random
.
seed
(
1024
)
self
.
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
'float32'
)
self
.
scale_a
=
self
.
get_scale_a
()
self
.
scale_b
=
self
.
get_scale_b
()
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
[
10
,
12
])
out
=
paddle
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out
=
paddle
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
for
r
in
[
out
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
paddle
.
enable_static
()
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
[
10
,
12
])
out
=
fluid
.
layers
.
stanh
(
x
,
self
.
scale_a
,
self
.
scale_b
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_stanh
(
self
.
x_np
,
self
.
scale_a
,
self
.
scale_b
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
with
program_guard
(
Program
()):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
stanh
,
1
)
self
.
assertRaises
(
TypeError
,
paddle
.
stanh
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
stanh
,
x_int32
)
x_int32
=
paddle
.
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
paddle
.
stanh
,
x_int32
)
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
fluid
.
layers
.
stanh
(
x_fp16
)
x_fp16
=
paddle
.
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
paddle
.
stanh
(
x_fp16
)
class
TestSTanhAPIScaleA
(
TestSTanhAPI
):
def
get_scale_a
(
self
):
return
2.0
class
TestSTanhAPIScaleB
(
TestSTanhAPI
):
def
get_scale_b
(
self
):
return
0.5
def
ref_softplus
(
x
,
beta
=
1
,
threshold
=
20
):
...
...
python/paddle/nn/layer/activation.py
浏览文件 @
f23665e5
...
...
@@ -536,7 +536,7 @@ class Sigmoid(layers.Layer):
.. math::
Sigmoid(x) =
\f
rac{1}{1 + e^{-x}}
Sigmoid(x) =
\
\
frac{1}{1 + e^{-x}}
Parameters:
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
...
...
@@ -551,15 +551,11 @@ class Sigmoid(layers.Layer):
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32')
m = paddle.nn.Sigmoid()
x = paddle.to_tensor(input_data)
output = m(x)
print(output.numpy()) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376]
"""
def
__init__
(
self
,
name
=
None
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录