Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
40d193ed
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
40d193ed
编写于
8月 20, 2020
作者:
H
hong19860320
提交者:
GitHub
8月 20, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add the ReLU6, Tanhshrink, SELU, Softplus, Softshrink and Softsign for the api 2.0 (#26376)
上级
6e13e86a
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
993 addition
and
116 deletion
+993
-116
paddle/fluid/operators/activation_op.cc
paddle/fluid/operators/activation_op.cc
+30
-8
paddle/fluid/operators/activation_op.h
paddle/fluid/operators/activation_op.h
+27
-13
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+2
-7
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+22
-20
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+302
-42
python/paddle/fluid/tests/unittests/test_selu_op.py
python/paddle/fluid/tests/unittests/test_selu_op.py
+71
-18
python/paddle/nn/__init__.py
python/paddle/nn/__init__.py
+6
-0
python/paddle/nn/functional/__init__.py
python/paddle/nn/functional/__init__.py
+1
-1
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+280
-7
python/paddle/nn/layer/activation.py
python/paddle/nn/layer/activation.py
+252
-0
未找到文件。
paddle/fluid/operators/activation_op.cc
浏览文件 @
40d193ed
...
@@ -317,13 +317,6 @@ $$out = x^2$$
...
@@ -317,13 +317,6 @@ $$out = x^2$$
)DOC"
;
)DOC"
;
UNUSED
constexpr
char
SoftplusDoc
[]
=
R"DOC(
Softplus Activation Operator.
$$out = \ln(1 + e^{x})$$
)DOC"
;
UNUSED
constexpr
char
SoftsignDoc
[]
=
R"DOC(
UNUSED
constexpr
char
SoftsignDoc
[]
=
R"DOC(
Softsign Activation Operator.
Softsign Activation Operator.
...
@@ -396,6 +389,36 @@ $$out = \max(x, \alpha * x)$$
...
@@ -396,6 +389,36 @@ $$out = \max(x, \alpha * x)$$
}
}
};
};
class
SoftplusOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
"Input of Softplus operator, an N-D Tensor, with data type "
"float32, float64 or float16."
);
AddOutput
(
"Out"
,
"Output of Softplus operator, a Tensor with shape same as input."
);
AddAttr
<
float
>
(
"beta"
,
"The value of beta for Softplus."
).
SetDefault
(
1.0
f
);
AddAttr
<
float
>
(
"threshold"
,
"The value of threshold for Softplus."
)
.
SetDefault
(
20.0
f
);
AddAttr
<
bool
>
(
"use_mkldnn"
,
"(bool, default false) Only used in mkldnn kernel."
)
.
SetDefault
(
false
);
AddAttr
<
bool
>
(
"use_cudnn"
,
"(bool, default false) Only used in cudnn kernel, need install cudnn."
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
:strong:`Softplus Activation Operator`
.. math::
out = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\
\text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold.
)DOC"
);
}
};
class
SoftShrinkOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
class
SoftShrinkOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
public:
void
Make
()
override
{
void
Make
()
override
{
...
@@ -672,7 +695,6 @@ REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
...
@@ -672,7 +695,6 @@ REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER
(
Log
,
LogDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Log
,
LogDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Log1p
,
Log1pDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Log1p
,
Log1pDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Square
,
SquareDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Square
,
SquareDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Softplus
,
SoftplusDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Softsign
,
SoftsignDoc
);
REGISTER_ACTIVATION_OP_MAKER
(
Softsign
,
SoftsignDoc
);
template
<
ActBwdOpFwdDeps
kDepValue
>
template
<
ActBwdOpFwdDeps
kDepValue
>
...
...
paddle/fluid/operators/activation_op.h
浏览文件 @
40d193ed
...
@@ -975,32 +975,46 @@ struct HardSwishGradFunctor : public BaseActivationFunctor<T> {
...
@@ -975,32 +975,46 @@ struct HardSwishGradFunctor : public BaseActivationFunctor<T> {
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
kDepX
;
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
kDepX
;
}
};
};
// softplus(x) = log(1 + exp(x))
// For numerical stability, using the following formula instead of softplus(x) =
// When x is a very large positive number, exp(x) may explode to inf,
// log(1 + exp(x))
// Using trick below for numerical stability
// softplus(x) = log(1 + exp(beta * x)) / beta when beta * x <= threshold(beta =
// https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
// 1, threshold = 20 by default), otherwise x
// Then: softplus(x) = max(x, 0) + log(exp(-max(x, 0)) + exp(x - max(x, 0)))
template
<
typename
T
>
template
<
typename
T
>
struct
SoftplusFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
SoftplusFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
beta
;
float
threshold
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"beta"
,
&
beta
},
{
"threshold"
,
&
threshold
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
>
template
<
typename
Device
,
typename
X
,
typename
Out
>
void
operator
()(
Device
d
,
X
x
,
Out
out
)
{
void
operator
()(
Device
d
,
X
x
,
Out
out
)
{
auto
temp
=
x
.
cwiseMax
(
static_cast
<
T
>
(
0
));
// temp = max(x, 0)
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
out
.
device
(
d
)
=
temp
+
(((
-
temp
).
exp
()
+
(
x
-
temp
).
exp
()).
log
());
out
.
device
(
d
)
=
(
x_beta
>
static_cast
<
T
>
(
threshold
))
.
select
(
x
,
(
static_cast
<
T
>
(
1
)
+
x_beta
.
exp
()).
log
()
/
static_cast
<
T
>
(
beta
));
}
}
};
};
//
d(softplus(x))/dx = exp(x) / (1 + exp(x))
//
For numerical stability, using the following formula instead of
//
For numerical stability:
//
d(softplus(x))/dx = 1 / (1 + exp(-x))
// d(softplus(x))/dx =
exp(x - max(x, 0)) / (exp(-max(x, 0)) +
// d(softplus(x))/dx =
1 / (1 + exp(-beta * x)) when beta * x <= threshold(beta
//
exp(x - max(x, 0)))
//
= 1, threshold = 20 by default), otherwise x
template
<
typename
T
>
template
<
typename
T
>
struct
SoftplusGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
struct
SoftplusGradFunctor
:
public
BaseActivationFunctor
<
T
>
{
float
beta
;
float
threshold
;
typename
BaseActivationFunctor
<
T
>::
AttrPair
GetAttrs
()
{
return
{{
"beta"
,
&
beta
},
{
"threshold"
,
&
threshold
}};
}
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
template
<
typename
Device
,
typename
X
,
typename
Out
,
typename
dOut
,
typename
dX
>
typename
dX
>
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
{
void
operator
()(
Device
d
,
X
x
,
Out
out
,
dOut
dout
,
dX
dx
)
{
auto
temp
=
x
.
cwiseMax
(
static_cast
<
T
>
(
0
));
// temp = max(x, 0)
auto
x_beta
=
static_cast
<
T
>
(
beta
)
*
x
;
dx
.
device
(
d
)
=
dx
.
device
(
d
)
=
dout
*
((
x
-
temp
).
exp
()
/
((
-
temp
).
exp
()
+
(
x
-
temp
).
exp
()));
(
x_beta
>
static_cast
<
T
>
(
threshold
))
.
select
(
dout
,
dout
/
(
static_cast
<
T
>
(
1
)
+
(
-
x_beta
).
exp
()));
}
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
kDepX
;
}
static
constexpr
ActBwdOpFwdDeps
FwdDeps
()
{
return
kDepX
;
}
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
40d193ed
...
@@ -8643,11 +8643,9 @@ def relu(x, name=None):
...
@@ -8643,11 +8643,9 @@ def relu(x, name=None):
return out
return out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.selu")
def selu(x, scale=None, alpha=None, name=None):
def selu(x, scale=None, alpha=None, name=None):
"""
"""
:alias_main: paddle.nn.functional.selu
:alias: paddle.nn.functional.selu,paddle.nn.functional.activation.selu
:old_api: paddle.fluid.layers.selu
Selu Operator.
Selu Operator.
...
@@ -9304,12 +9302,9 @@ def elu(x, alpha=1.0, name=None):
...
@@ -9304,12 +9302,9 @@ def elu(x, alpha=1.0, name=None):
return out
return out
@
templatedoc(
)
@
deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6"
)
def relu6(x, threshold=6.0, name=None):
def relu6(x, threshold=6.0, name=None):
"""
"""
:alias_main: paddle.nn.functional.relu6
:alias: paddle.nn.functional.relu6,paddle.nn.functional.activation.relu6
:old_api: paddle.fluid.layers.relu6
${comment}
${comment}
...
...
python/paddle/fluid/layers/ops.py
浏览文件 @
40d193ed
...
@@ -20,6 +20,8 @@ from ..framework import convert_np_dtype_to_dtype_, Variable
...
@@ -20,6 +20,8 @@ from ..framework import convert_np_dtype_to_dtype_, Variable
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
paddle.utils
import
deprecated
from
paddle.utils
import
deprecated
__deprecated_func_name__
=
{
'tanh_shrink'
:
'tanhshrink'
,
}
__activations_noattr__
=
[
__activations_noattr__
=
[
'sigmoid'
,
'sigmoid'
,
'logsigmoid'
,
'logsigmoid'
,
...
@@ -64,14 +66,20 @@ __all__ += __activations_noattr__
...
@@ -64,14 +66,20 @@ __all__ += __activations_noattr__
__all__
+=
__unary_func__
__all__
+=
__unary_func__
for
_OP
in
set
(
__activations_noattr__
):
for
_OP
in
set
(
__activations_noattr__
):
_new_OP
=
_OP
if
_OP
in
__deprecated_func_name__
:
_new_OP
=
__deprecated_func_name__
[
_OP
]
func
=
generate_activation_fn
(
_OP
)
func
=
generate_activation_fn
(
_OP
)
func
=
deprecated
(
func
=
deprecated
(
since
=
"2.0.0"
,
update_to
=
"paddle.nn.functional.%s"
%
(
_OP
))(
func
)
since
=
"2.0.0"
,
update_to
=
"paddle.nn.functional.%s"
%
(
_
new_
OP
))(
func
)
globals
()[
_OP
]
=
func
globals
()[
_OP
]
=
func
for
_OP
in
set
(
__unary_func__
):
for
_OP
in
set
(
__unary_func__
):
_new_OP
=
_OP
if
_OP
in
__deprecated_func_name__
:
_new_OP
=
__deprecated_func_name__
[
_OP
]
func
=
generate_activation_fn
(
_OP
)
func
=
generate_activation_fn
(
_OP
)
func
=
deprecated
(
since
=
"2.0.0"
,
update_to
=
"paddle.%s"
%
(
_OP
))(
func
)
func
=
deprecated
(
since
=
"2.0.0"
,
update_to
=
"paddle.%s"
%
(
_
new_
OP
))(
func
)
globals
()[
_OP
]
=
func
globals
()[
_OP
]
=
func
add_sample_code
(
globals
()[
"sigmoid"
],
r
"""
add_sample_code
(
globals
()[
"sigmoid"
],
r
"""
...
@@ -160,16 +168,14 @@ add_sample_code(globals()["tanh_shrink"], r"""
...
@@ -160,16 +168,14 @@ add_sample_code(globals()["tanh_shrink"], r"""
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
import paddle.nn.functional as F
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
paddle.disable_static()
x_data = np.array([-0.4, -0.2, 0.1, 0.3])
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_variable(x_data)
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
out = F.tanh_shrink(x)
print(out.numpy())
# [-0.02005104 -0.00262468 0.00033201 0.00868739]
"""
)
"""
)
...
@@ -401,16 +407,14 @@ add_sample_code(globals()["softplus"], r"""
...
@@ -401,16 +407,14 @@ add_sample_code(globals()["softplus"], r"""
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
import paddle.nn.functional as F
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
paddle.disable_static()
x_data = np.array([-0.4, -0.2, 0.1, 0.3])
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_variable(x_data)
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
out = F.softplus(x)
print(out.numpy())
# [0.51301525 0.59813887 0.74439666 0.85435524]
"""
)
"""
)
...
@@ -418,16 +422,14 @@ add_sample_code(globals()["softsign"], r"""
...
@@ -418,16 +422,14 @@ add_sample_code(globals()["softsign"], r"""
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
import paddle.nn.functional as F
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
paddle.disable_static()
x_data = np.array([-0.4, -0.2, 0.1, 0.3])
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
x = paddle.to_variable(x_data)
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
out = F.softsign(x)
print(out.numpy())
# [-0.28571429 -0.16666667 0.09090909 0.23076923]
"""
)
"""
)
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
40d193ed
...
@@ -369,15 +369,20 @@ class TestCoshOpError(unittest.TestCase):
...
@@ -369,15 +369,20 @@ class TestCoshOpError(unittest.TestCase):
fluid
.
layers
.
cosh
(
x_fp16
)
fluid
.
layers
.
cosh
(
x_fp16
)
class
TestTanhShrink
(
TestActivation
):
def
ref_tanhshrink
(
x
):
out
=
x
-
np
.
tanh
(
x
)
return
out
class
TestTanhshrink
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"tanh_shrink"
self
.
op_type
=
"tanh_shrink"
self
.
init_dtype
()
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
10
,
17
]).
astype
(
self
.
dtype
)
x
=
np
.
random
.
uniform
(
10
,
20
,
[
10
,
17
]).
astype
(
self
.
dtype
)
out
=
x
-
np
.
tanh
(
x
)
out
=
ref_tanhshrink
(
x
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)
}
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
...
@@ -386,6 +391,57 @@ class TestTanhShrink(TestActivation):
...
@@ -386,6 +391,57 @@ class TestTanhShrink(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestTanhshrinkAPI
(
unittest
.
TestCase
):
# test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
uniform
(
10
,
20
,
[
10
,
17
]).
astype
(
np
.
float64
)
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
tanhshrink
(
x
)
tanhshrink
=
paddle
.
nn
.
Tanhshrink
()
out2
=
tanhshrink
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_tanhshrink
(
self
.
x_np
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
tanhshrink
(
x
)
tanhshrink
=
paddle
.
nn
.
Tanhshrink
()
out2
=
tanhshrink
(
x
)
out_ref
=
ref_tanhshrink
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
fluid
.
layers
.
tanh_shrink
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_tanhshrink
(
self
.
x_np
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
tanhshrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
tanhshrink
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
tanhshrink
(
x_fp16
)
def
ref_hardshrink
(
x
,
threshold
):
def
ref_hardshrink
(
x
,
threshold
):
out
=
np
.
copy
(
x
)
out
=
np
.
copy
(
x
)
out
[(
out
>=
-
threshold
)
&
(
out
<=
threshold
)]
=
0
out
[(
out
>=
-
threshold
)
&
(
out
<=
threshold
)]
=
0
...
@@ -469,19 +525,24 @@ class TestHardShrinkAPI(unittest.TestCase):
...
@@ -469,19 +525,24 @@ class TestHardShrinkAPI(unittest.TestCase):
F
.
hardshrink
(
x_fp16
)
F
.
hardshrink
(
x_fp16
)
class
TestSoftShrink
(
TestActivation
):
def
ref_softshrink
(
x
,
threshold
=
0.5
):
out
=
np
.
copy
(
x
)
out
=
(
out
<
-
threshold
)
*
(
out
+
threshold
)
+
(
out
>
threshold
)
*
(
out
-
threshold
)
return
out
class
TestSoftshrink
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"softshrink"
self
.
op_type
=
"softshrink"
self
.
init_dtype
()
self
.
init_dtype
()
lambda_val
=
0.1
threshold
=
0.8
x
=
np
.
random
.
uniform
(
0.25
,
10
,
[
10
,
12
]).
astype
(
self
.
dtype
)
out
=
np
.
copy
(
x
)
out
=
(
out
<
-
lambda_val
)
*
(
out
+
lambda_val
)
+
(
out
>
lambda_val
)
*
(
out
-
lambda_val
)
self
.
attrs
=
{
'lambda'
:
lambda_val
}
x
=
np
.
random
.
uniform
(
0.25
,
10
,
[
10
,
12
]).
astype
(
self
.
dtype
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
out
=
ref_softshrink
(
x
,
threshold
)
self
.
inputs
=
{
'X'
:
x
}
self
.
attrs
=
{
"lambda"
:
threshold
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
...
@@ -490,17 +551,56 @@ class TestSoftShrink(TestActivation):
...
@@ -490,17 +551,56 @@ class TestSoftShrink(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSoftShrinkOpError
(
unittest
.
TestCase
):
class
TestSoftshrinkAPI
(
unittest
.
TestCase
):
# test paddle.nn.Softshrink, paddle.nn.functional.softshrink
def
setUp
(
self
):
self
.
threshold
=
0.8
self
.
x_np
=
np
.
random
.
uniform
(
0.25
,
10
,
[
10
,
12
]).
astype
(
np
.
float64
)
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softshrink
(
x
,
self
.
threshold
)
softshrink
=
paddle
.
nn
.
Softshrink
(
self
.
threshold
)
out2
=
softshrink
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softshrink
(
self
.
x_np
,
self
.
threshold
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
softshrink
(
x
,
self
.
threshold
)
softshrink
=
paddle
.
nn
.
Softshrink
(
self
.
threshold
)
out2
=
softshrink
(
x
)
out_ref
=
ref_softshrink
(
self
.
x_np
,
self
.
threshold
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
fluid
.
layers
.
softshrink
(
x
,
self
.
threshold
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_softshrink
(
self
.
x_np
,
self
.
threshold
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
def
test_errors
(
self
):
with
p
rogram_guard
(
Program
()):
with
p
addle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
softshrink
,
1
)
self
.
assertRaises
(
TypeError
,
F
.
softshrink
,
1
)
# The input dtype must be float16, float32, float64.
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
x_int32
=
paddle
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
softshrink
,
x_int32
)
self
.
assertRaises
(
TypeError
,
F
.
softshrink
,
x_int32
)
# support the input dtype is float16
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
x_fp16
=
paddle
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
fluid
.
layers
.
softshrink
(
x_fp16
)
F
.
softshrink
(
x_fp16
)
class
TestSqrt
(
TestActivation
,
TestParameter
):
class
TestSqrt
(
TestActivation
,
TestParameter
):
...
@@ -903,20 +1003,24 @@ class TestBReluOpError(unittest.TestCase):
...
@@ -903,20 +1003,24 @@ class TestBReluOpError(unittest.TestCase):
fluid
.
layers
.
brelu
(
x_fp16
)
fluid
.
layers
.
brelu
(
x_fp16
)
def
ref_relu6
(
x
,
threshold
=
6.0
):
out
=
np
.
copy
(
x
)
out
[
np
.
abs
(
x
-
threshold
)
<
0.005
]
=
threshold
+
0.02
out
=
np
.
minimum
(
np
.
maximum
(
x
,
0
),
threshold
)
return
out
class
TestRelu6
(
TestActivation
):
class
TestRelu6
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"relu6"
self
.
op_type
=
"relu6"
self
.
init_dtype
()
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
-
1
,
10
,
[
10
,
12
]).
astype
(
self
.
dtype
)
x
=
np
.
random
.
uniform
(
-
1
,
10
,
[
10
,
12
]).
astype
(
self
.
dtype
)
threshold
=
6.0
# The same with TestAbs
x
[
np
.
abs
(
x
)
<
0.005
]
=
0.02
x
[
np
.
abs
(
x
)
<
0.005
]
=
0.02
x
[
np
.
abs
(
x
-
threshold
)
<
0.005
]
=
threshold
+
0.02
out
=
ref_relu6
(
x
)
out
=
np
.
minimum
(
np
.
maximum
(
x
,
0
),
threshold
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)
}
self
.
inputs
=
{
'X'
:
x
}
self
.
attrs
=
{
'threshold'
:
threshold
}
self
.
attrs
=
{
'threshold'
:
6.0
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
...
@@ -925,17 +1029,56 @@ class TestRelu6(TestActivation):
...
@@ -925,17 +1029,56 @@ class TestRelu6(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestRelu6OpError
(
unittest
.
TestCase
):
class
TestRelu6API
(
unittest
.
TestCase
):
# test paddle.nn.ReLU6, paddle.nn.functional.relu6
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
uniform
(
-
1
,
10
,
[
10
,
12
]).
astype
(
np
.
float64
)
self
.
x_np
[
np
.
abs
(
self
.
x_np
)
<
0.005
]
=
0.02
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
relu6
(
x
)
relu6
=
paddle
.
nn
.
ReLU6
()
out2
=
relu6
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_relu6
(
self
.
x_np
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
relu6
(
x
)
relu6
=
paddle
.
nn
.
ReLU6
()
out2
=
relu6
(
x
)
out_ref
=
ref_relu6
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
fluid
.
layers
.
relu6
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_relu6
(
self
.
x_np
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
def
test_errors
(
self
):
with
p
rogram_guard
(
Program
()):
with
p
addle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
relu6
,
1
)
self
.
assertRaises
(
TypeError
,
F
.
relu6
,
1
)
# The input dtype must be float16, float32, float64.
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
x_int32
=
paddle
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
relu6
,
x_int32
)
self
.
assertRaises
(
TypeError
,
F
.
relu6
,
x_int32
)
# support the input dtype is float16
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
x_fp16
=
paddle
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
fluid
.
layers
.
relu6
(
x_fp16
)
F
.
relu6
(
x_fp16
)
class
TestHardSwish
(
TestActivation
):
class
TestHardSwish
(
TestActivation
):
...
@@ -1318,16 +1461,25 @@ class TestSTanhOpError(unittest.TestCase):
...
@@ -1318,16 +1461,25 @@ class TestSTanhOpError(unittest.TestCase):
fluid
.
layers
.
stanh
(
x_fp16
)
fluid
.
layers
.
stanh
(
x_fp16
)
def
ref_softplus
(
x
,
beta
=
1
,
threshold
=
20
):
x_beta
=
beta
*
x
out
=
np
.
select
([
x_beta
<=
threshold
,
x_beta
>
threshold
],
[
np
.
log
(
1
+
np
.
exp
(
x_beta
))
/
beta
,
x
])
return
out
class
TestSoftplus
(
TestActivation
):
class
TestSoftplus
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"softplus"
self
.
op_type
=
"softplus"
self
.
init_dtype
()
self
.
init_dtype
()
self
.
dtype
=
np
.
float64
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
beta
=
2
out
=
np
.
log
(
1
+
np
.
exp
(
x
))
threshold
=
15
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
self
.
dtype
)
out
=
ref_softplus
(
x
,
beta
,
threshold
)
self
.
inputs
=
{
'X'
:
x
}
self
.
attrs
=
{
'beta'
:
beta
,
"threshold"
:
threshold
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
...
@@ -1336,15 +1488,72 @@ class TestSoftplus(TestActivation):
...
@@ -1336,15 +1488,72 @@ class TestSoftplus(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSoftplusAPI
(
unittest
.
TestCase
):
# test paddle.nn.Softplus, paddle.nn.functional.softplus
def
setUp
(
self
):
self
.
beta
=
2
self
.
threshold
=
15
self
.
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
np
.
float64
)
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softplus
(
x
,
self
.
beta
,
self
.
threshold
)
softplus
=
paddle
.
nn
.
Softplus
(
self
.
beta
,
self
.
threshold
)
out2
=
softplus
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softplus
(
self
.
x_np
,
self
.
beta
,
self
.
threshold
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
softplus
(
x
,
self
.
beta
,
self
.
threshold
)
softplus
=
paddle
.
nn
.
Softplus
(
self
.
beta
,
self
.
threshold
)
out2
=
softplus
(
x
)
out_ref
=
ref_softplus
(
self
.
x_np
,
self
.
beta
,
self
.
threshold
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
fluid
.
layers
.
softplus
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_softplus
(
self
.
x_np
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softplus
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softplus
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softplus
(
x_fp16
)
def
ref_softsign
(
x
):
out
=
np
.
divide
(
x
,
1
+
np
.
abs
(
x
))
return
out
class
TestSoftsign
(
TestActivation
):
class
TestSoftsign
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"softsign"
self
.
op_type
=
"softsign"
self
.
init_dtype
()
self
.
init_dtype
()
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
11
,
17
]).
astype
(
self
.
dtype
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
self
.
dtype
)
out
=
np
.
divide
(
x
,
1
+
np
.
abs
(
x
))
out
=
ref_softsign
(
x
)
self
.
inputs
=
{
'X'
:
x
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
...
@@ -1353,6 +1562,57 @@ class TestSoftsign(TestActivation):
...
@@ -1353,6 +1562,57 @@ class TestSoftsign(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSoftsignAPI
(
unittest
.
TestCase
):
# test paddle.nn.Softsign, paddle.nn.functional.softsign
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
np
.
float64
)
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
softsign
(
x
)
softsign
=
paddle
.
nn
.
Softsign
()
out2
=
softsign
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_softsign
(
self
.
x_np
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
softsign
(
x
)
softsign
=
paddle
.
nn
.
Softsign
()
out2
=
softsign
(
x
)
out_ref
=
ref_softsign
(
self
.
x_np
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
fluid
.
layers
.
softsign
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_softsign
(
self
.
x_np
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
F
.
softsign
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
paddle
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
softsign
,
x_int32
)
# support the input dtype is float16
x_fp16
=
paddle
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
softsign
(
x_fp16
)
class
TestThresholdedRelu
(
TestActivation
):
class
TestThresholdedRelu
(
TestActivation
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"thresholded_relu"
self
.
op_type
=
"thresholded_relu"
...
@@ -1548,9 +1808,9 @@ create_test_act_fp16_class(TestActivation)
...
@@ -1548,9 +1808,9 @@ create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class
(
TestSigmoid
)
create_test_act_fp16_class
(
TestSigmoid
)
create_test_act_fp16_class
(
TestLogSigmoid
)
create_test_act_fp16_class
(
TestLogSigmoid
)
create_test_act_fp16_class
(
TestTanh
)
create_test_act_fp16_class
(
TestTanh
)
create_test_act_fp16_class
(
TestTanh
S
hrink
)
create_test_act_fp16_class
(
TestTanh
s
hrink
)
create_test_act_fp16_class
(
TestHardShrink
)
create_test_act_fp16_class
(
TestHardShrink
)
create_test_act_fp16_class
(
TestSoft
S
hrink
)
create_test_act_fp16_class
(
TestSoft
s
hrink
)
create_test_act_fp16_class
(
TestSqrt
)
create_test_act_fp16_class
(
TestSqrt
)
create_test_act_fp16_class
(
TestAbs
)
create_test_act_fp16_class
(
TestAbs
)
create_test_act_fp16_class
(
TestCeil
,
grad_check
=
False
)
create_test_act_fp16_class
(
TestCeil
,
grad_check
=
False
)
...
...
python/paddle/fluid/tests/unittests/test_selu_op.py
浏览文件 @
40d193ed
...
@@ -17,9 +17,26 @@ from __future__ import print_function
...
@@ -17,9 +17,26 @@ from __future__ import print_function
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
six
import
six
import
paddle.fluid.core
as
core
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.fluid
import
compiler
,
Program
,
program_guard
def
ref_selu
(
x
,
scale
=
1.0507009873554804934193349852946
,
alpha
=
1.6732632423543772848170429916717
):
out
=
np
.
copy
(
x
)
out_flat
=
out
.
flatten
()
for
i
in
range
(
out_flat
.
size
):
if
out_flat
[
i
]
<
0
:
out_flat
[
i
]
=
alpha
*
np
.
exp
(
out_flat
[
i
])
-
alpha
out_flat
[
i
]
=
scale
*
out_flat
[
i
]
out
=
out_flat
.
reshape
(
x
.
shape
)
return
out
class
SeluTest
(
OpTest
):
class
SeluTest
(
OpTest
):
...
@@ -39,17 +56,10 @@ class SeluTest(OpTest):
...
@@ -39,17 +56,10 @@ class SeluTest(OpTest):
# zero.
# zero.
x
[
np
.
abs
(
x
)
<
0.005
]
=
0.02
x
[
np
.
abs
(
x
)
<
0.005
]
=
0.02
x_flat
=
x
.
flatten
()
out
=
ref_selu
(
x
,
scale
,
alpha
)
for
i
in
range
(
x_flat
.
size
):
if
x_flat
[
i
]
<
0
:
x_flat
[
i
]
=
alpha
*
np
.
exp
(
x_flat
[
i
])
-
alpha
x_flat
[
i
]
=
scale
*
x_flat
[
i
]
out_np
=
x_flat
.
reshape
(
self
.
x_shape
)
self
.
inputs
=
{
'X'
:
x
}
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
_np
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
self
.
attrs
=
{
'alpha'
:
alpha
,
'alpha'
:
alpha
,
...
@@ -69,17 +79,60 @@ class SeluTest(OpTest):
...
@@ -69,17 +79,60 @@ class SeluTest(OpTest):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestSeluOpError
(
unittest
.
TestCase
):
class
TestSeluAPI
(
unittest
.
TestCase
):
# test paddle.nn.SELU, paddle.nn.functional.selu
def
setUp
(
self
):
self
.
scale
=
1.5
self
.
alpha
=
2.0
self
.
x_np
=
np
.
random
.
normal
(
size
=
[
3
,
5
,
5
,
10
]).
astype
(
np
.
float64
)
# Since zero point in selu is not differentiable, avoid randomize
# zero.
self
.
x_np
[
np
.
abs
(
self
.
x_np
)
<
0.005
]
=
0.02
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out1
=
F
.
selu
(
x
,
self
.
scale
,
self
.
alpha
)
selu
=
paddle
.
nn
.
SELU
(
self
.
scale
,
self
.
alpha
)
out2
=
selu
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_selu
(
self
.
x_np
,
self
.
scale
,
self
.
alpha
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_tensor
(
self
.
x_np
)
out1
=
F
.
selu
(
x
,
self
.
scale
,
self
.
alpha
)
selu
=
paddle
.
nn
.
SELU
(
self
.
scale
,
self
.
alpha
)
out2
=
selu
(
x
)
out_ref
=
ref_selu
(
self
.
x_np
,
self
.
scale
,
self
.
alpha
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
out
=
fluid
.
layers
.
selu
(
x
,
self
.
scale
,
self
.
alpha
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_selu
(
self
.
x_np
,
self
.
scale
,
self
.
alpha
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
def
test_errors
(
self
):
with
p
rogram_guard
(
Program
()):
with
p
addle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
selu
,
1
)
self
.
assertRaises
(
TypeError
,
F
.
selu
,
1
)
# The input dtype must be float16, float32, float64.
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
x_int32
=
paddle
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
selu
,
x_int32
)
self
.
assertRaises
(
TypeError
,
F
.
selu
,
x_int32
)
# support the input dtype is float
32
# support the input dtype is float
16
x_fp
32
=
fluid
.
data
(
name
=
'x_fp32'
,
shape
=
[
12
,
10
],
dtype
=
'float32
'
)
x_fp
16
=
paddle
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16
'
)
fluid
.
layers
.
selu
(
x_fp32
)
F
.
selu
(
x_fp16
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/nn/__init__.py
浏览文件 @
40d193ed
...
@@ -57,10 +57,16 @@ from .layer.activation import GELU
...
@@ -57,10 +57,16 @@ from .layer.activation import GELU
from
.layer.activation
import
Hardshrink
from
.layer.activation
import
Hardshrink
# from .layer.activation import PReLU #DEFINE_ALIAS
# from .layer.activation import PReLU #DEFINE_ALIAS
from
.layer.activation
import
ReLU
from
.layer.activation
import
ReLU
from
.layer.activation
import
ReLU6
#DEFINE_ALIAS
from
.layer.activation
import
SELU
#DEFINE_ALIAS
from
.layer.activation
import
LeakyReLU
#DEFINE_ALIAS
from
.layer.activation
import
LeakyReLU
#DEFINE_ALIAS
from
.layer.activation
import
Sigmoid
#DEFINE_ALIAS
from
.layer.activation
import
Sigmoid
#DEFINE_ALIAS
from
.layer.activation
import
LogSigmoid
from
.layer.activation
import
LogSigmoid
# from .layer.activation import Softmax #DEFINE_ALIAS
# from .layer.activation import Softmax #DEFINE_ALIAS
from
.layer.activation
import
Softplus
#DEFINE_ALIAS
from
.layer.activation
import
Softshrink
#DEFINE_ALIAS
from
.layer.activation
import
Softsign
#DEFINE_ALIAS
from
.layer.activation
import
Tanhshrink
#DEFINE_ALIAS
from
.layer.activation
import
LogSoftmax
#DEFINE_ALIAS
from
.layer.activation
import
LogSoftmax
#DEFINE_ALIAS
from
.layer.activation
import
HSigmoid
#DEFINE_ALIAS
from
.layer.activation
import
HSigmoid
#DEFINE_ALIAS
from
.layer.common
import
BilinearTensorProduct
#DEFINE_ALIAS
from
.layer.common
import
BilinearTensorProduct
#DEFINE_ALIAS
...
...
python/paddle/nn/functional/__init__.py
浏览文件 @
40d193ed
...
@@ -47,7 +47,7 @@ from .activation import softplus #DEFINE_ALIAS
...
@@ -47,7 +47,7 @@ from .activation import softplus #DEFINE_ALIAS
from
.activation
import
softshrink
#DEFINE_ALIAS
from
.activation
import
softshrink
#DEFINE_ALIAS
from
.activation
import
softsign
#DEFINE_ALIAS
from
.activation
import
softsign
#DEFINE_ALIAS
from
.activation
import
swish
#DEFINE_ALIAS
from
.activation
import
swish
#DEFINE_ALIAS
from
.activation
import
tanh
_
shrink
#DEFINE_ALIAS
from
.activation
import
tanhshrink
#DEFINE_ALIAS
from
.activation
import
thresholded_relu
#DEFINE_ALIAS
from
.activation
import
thresholded_relu
#DEFINE_ALIAS
from
.activation
import
log_softmax
#DEFINE_ALIAS
from
.activation
import
log_softmax
#DEFINE_ALIAS
from
.common
import
dropout
#DEFINE_ALIAS
from
.common
import
dropout
#DEFINE_ALIAS
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
40d193ed
...
@@ -19,15 +19,9 @@ from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS
...
@@ -19,15 +19,9 @@ from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS
from
...fluid.layers
import
hard_swish
#DEFINE_ALIAS
from
...fluid.layers
import
hard_swish
#DEFINE_ALIAS
from
...fluid.layers
import
leaky_relu
#DEFINE_ALIAS
from
...fluid.layers
import
leaky_relu
#DEFINE_ALIAS
from
...fluid.layers
import
maxout
#DEFINE_ALIAS
from
...fluid.layers
import
maxout
#DEFINE_ALIAS
from
...fluid.layers
import
relu6
#DEFINE_ALIAS
from
...fluid.layers
import
selu
#DEFINE_ALIAS
from
...fluid.layers
import
soft_relu
#DEFINE_ALIAS
from
...fluid.layers
import
soft_relu
#DEFINE_ALIAS
from
...fluid.layers
import
softplus
#DEFINE_ALIAS
from
...fluid.layers
import
softshrink
#DEFINE_ALIAS
from
...fluid.layers
import
softsign
#DEFINE_ALIAS
from
...fluid.layers
import
swish
#DEFINE_ALIAS
from
...fluid.layers
import
swish
#DEFINE_ALIAS
from
...fluid.layers
import
sigmoid
#DEFINE_ALIAS
from
...fluid.layers
import
sigmoid
#DEFINE_ALIAS
from
...fluid.layers
import
tanh_shrink
#DEFINE_ALIAS
from
...fluid.layers
import
thresholded_relu
#DEFINE_ALIAS
from
...fluid.layers
import
thresholded_relu
#DEFINE_ALIAS
__all__
=
[
__all__
=
[
...
@@ -53,7 +47,7 @@ __all__ = [
...
@@ -53,7 +47,7 @@ __all__ = [
'softsign'
,
'softsign'
,
'sigmoid'
,
'sigmoid'
,
'swish'
,
'swish'
,
'tanh
_
shrink'
,
'tanhshrink'
,
'thresholded_relu'
,
'thresholded_relu'
,
'log_softmax'
'log_softmax'
]
]
...
@@ -423,6 +417,103 @@ def logsigmoid(x, name=None):
...
@@ -423,6 +417,103 @@ def logsigmoid(x, name=None):
return
out
return
out
def
relu6
(
x
,
name
=
None
):
"""
relu6 activation
.. math::
\t
ext{relu6}(x) = \min(\max(0,x), 6)
Args:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
out = F.relu6(x) # [0, 0.3, 6]
"""
threshold
=
6.0
if
in_dygraph_mode
():
return
core
.
ops
.
relu6
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu6'
)
helper
=
LayerHelper
(
'relu6'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'relu6'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'threshold'
:
threshold
})
return
out
def
selu
(
x
,
scale
=
1.0507009873554804934193349852946
,
alpha
=
1.6732632423543772848170429916717
,
name
=
None
):
"""
selu activation
.. math::
\t
ext{selu}(x) = scale * (\max(0,x) + \min(0,
\a
lpha * (\exp(x) - 1))),
\\
with\,alpha=1.6732632423543772848170429916717 and
\\
scale=1.0507009873554804934193349852946
Args:
x (Tensor): The input Tensor with data type float32, float64.
scale (float, optional): The value of scale for selu. Default is 1.0507009873554804934193349852946
alpha (float, optional): The value of alpha for selu. Default is 1.6732632423543772848170429916717
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[0, 1],[2, 3]]))
out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
selu
(
x
,
'scale'
,
scale
,
'alpha'
,
alpha
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'selu'
)
helper
=
LayerHelper
(
'selu'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'selu'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'scale'
:
scale
,
'alpha'
:
alpha
})
return
out
def
softmax
(
x
,
axis
=-
1
,
name
=
None
):
def
softmax
(
x
,
axis
=-
1
,
name
=
None
):
"""
"""
This operator implements the softmax layer. The calculation process is as follows:
This operator implements the softmax layer. The calculation process is as follows:
...
@@ -539,6 +630,188 @@ def softmax(x, axis=-1, name=None):
...
@@ -539,6 +630,188 @@ def softmax(x, axis=-1, name=None):
return
paddle
.
fluid
.
layers
.
softmax
(
input
=
x
,
axis
=
axis
,
name
=
name
)
return
paddle
.
fluid
.
layers
.
softmax
(
input
=
x
,
axis
=
axis
,
name
=
name
)
def
softplus
(
x
,
beta
=
1
,
threshold
=
20
,
name
=
None
):
"""
softplus activation
.. math::
\t
ext{softplus}(x) =
\f
rac{1}{
\b
eta} * \log(1 + \exp(
\b
eta * x))
\\
\t
ext{For numerical stability, the implementation reverts to the linear function when :}\,x
\t
imes
\b
eta > threshold.
Args:
x (Tensor): The input Tensor with data type float32, float64.
beta (float, optional): The value of beta for softplus. Default is 1
threshold (float, optional): The value of threshold for softplus. Default is 20
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
softplus
(
x
,
'beta'
,
beta
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softplus'
)
helper
=
LayerHelper
(
'softplus'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'softplus'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'beta'
:
beta
,
'threshold'
:
threshold
})
return
out
def
softshrink
(
x
,
threshold
=
0.5
,
name
=
None
):
"""
softshrink activation
.. math::
\t
ext{softshrink}(x) =
\b
egin{cases}
x - threshold, &
\t
ext{ if } x > threshold
\\
x + threshold, &
\t
ext{ if } x < -threshold
\\
0, &
\t
ext{ otherwise }
\end{cases}
Args:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
softshrink
(
x
,
'lambda'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softshrink'
)
helper
=
LayerHelper
(
'softshrink'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'softshrink'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'lambda'
:
threshold
})
return
out
def
softsign
(
x
,
name
=
None
):
"""
softsign activation
.. math::
\t
ext{softsign}(x) =
\f
rac{x}{1 + |x|}
Args:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
softsign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softsign'
)
helper
=
LayerHelper
(
'softsign'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'softsign'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
tanhshrink
(
x
,
name
=
None
):
"""
tanhshrink activation
.. math::
\t
ext{tanhshrink}(x) = x -
\t
ext{tanh}(x)
Args:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
tanh_shrink
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanhshrink'
)
helper
=
LayerHelper
(
'tanh_shrink'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'tanh_shrink'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
log_softmax
(
x
,
axis
=-
1
,
dtype
=
None
,
name
=
None
):
def
log_softmax
(
x
,
axis
=-
1
,
dtype
=
None
,
name
=
None
):
"""
"""
This operator implements the log_softmax layer. The calculation process is
This operator implements the log_softmax layer. The calculation process is
...
...
python/paddle/nn/layer/activation.py
浏览文件 @
40d193ed
...
@@ -20,9 +20,15 @@ __all__ = [
...
@@ -20,9 +20,15 @@ __all__ = [
'Hardshrink'
,
'Hardshrink'
,
# 'PReLU',
# 'PReLU',
'ReLU'
,
'ReLU'
,
'ReLU6'
,
'SELU'
,
'LeakyReLU'
,
'LeakyReLU'
,
'Sigmoid'
,
'Sigmoid'
,
# 'Softmax',
# 'Softmax',
'Softplus'
,
'Softshrink'
,
'Softsign'
,
'Tanhshrink'
,
'LogSigmoid'
,
'LogSigmoid'
,
'LogSoftmax'
,
'LogSoftmax'
,
'HSigmoid'
'HSigmoid'
...
@@ -351,6 +357,91 @@ class ReLU(layers.Layer):
...
@@ -351,6 +357,91 @@ class ReLU(layers.Layer):
return
F
.
relu
(
x
,
self
.
_name
)
return
F
.
relu
(
x
,
self
.
_name
)
class
ReLU6
(
layers
.
Layer
):
"""
ReLU6 Activation
.. math::
\t
ext{ReLU6}(x) = \min(\max(0,x), 6)
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
m = paddle.nn.ReLU6()
out = m(x) # [0, 0.3, 6]
"""
def
__init__
(
self
,
name
=
None
):
super
(
ReLU6
,
self
).
__init__
()
self
.
_name
=
name
def
forward
(
self
,
x
):
return
F
.
relu6
(
x
,
self
.
_name
)
class
SELU
(
layers
.
Layer
):
"""
SELU Activation
.. math::
\t
ext{SELU}(x) = scale * (\max(0,x) + \min(0,
\a
lpha * (\exp(x) - 1))),
\\
with\,alpha=1.6732632423543772848170429916717 and
\\
scale=1.0507009873554804934193349852946
Parameters:
scale (float, optional): The value of scale for SELU. Default is 1.0507009873554804934193349852946
alpha (float, optional): The value of alpha for SELU. Default is 1.6732632423543772848170429916717
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([[0, 1],[2, 3]]))
m = paddle.nn.SELU()
out = m(x) # [[0, 1.050701],[2.101402, 3.152103]]
"""
def
__init__
(
self
,
scale
=
1.0507009873554804934193349852946
,
alpha
=
1.6732632423543772848170429916717
,
name
=
None
):
super
(
SELU
,
self
).
__init__
()
self
.
_scale
=
scale
self
.
_alpha
=
alpha
self
.
_name
=
name
def
forward
(
self
,
x
):
return
F
.
selu
(
x
,
self
.
_scale
,
self
.
_alpha
,
self
.
_name
)
class
LeakyReLU
(
layers
.
Layer
):
class
LeakyReLU
(
layers
.
Layer
):
"""
"""
Leaky ReLU Activation.
Leaky ReLU Activation.
...
@@ -431,6 +522,167 @@ class Sigmoid(layers.Layer):
...
@@ -431,6 +522,167 @@ class Sigmoid(layers.Layer):
return
F
.
sigmoid
(
x
,
self
.
name
)
return
F
.
sigmoid
(
x
,
self
.
name
)
class
Softplus
(
layers
.
Layer
):
"""
Softplus Activation
.. math::
\t
ext{Softplus}(x) =
\f
rac{1}{
\b
eta} * \log(1 + \exp(
\b
eta * x))
\\
\t
ext{For numerical stability, the implementation reverts to the linear function when :}\,x
\t
imes
\b
eta > threshold.
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Softplus()
out = m(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
def
__init__
(
self
,
beta
=
1
,
threshold
=
20
,
name
=
None
):
super
(
Softplus
,
self
).
__init__
()
self
.
_beta
=
beta
self
.
_threshold
=
threshold
self
.
_name
=
name
def
forward
(
self
,
x
):
return
F
.
softplus
(
x
,
self
.
_beta
,
self
.
_threshold
,
self
.
_name
)
class
Softshrink
(
layers
.
Layer
):
"""
Softshrink Activation
.. math::
\t
ext{Softshrink}(x) =
\b
egin{cases}
x - threshold, &
\t
ext{ if } x > threshold
\\
x + threshold, &
\t
ext{ if } x < -threshold
\\
0, &
\t
ext{ otherwise }
\end{cases}
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
m = paddle.nn.Softshrink()
out = m(x) # [-0.4, 0, 0, 0.3]
"""
def
__init__
(
self
,
threshold
=
0.5
,
name
=
None
):
super
(
Softshrink
,
self
).
__init__
()
self
.
_threshold
=
threshold
self
.
_name
=
name
def
forward
(
self
,
x
):
return
F
.
softshrink
(
x
,
self
.
_threshold
,
self
.
_name
)
class
Softsign
(
layers
.
Layer
):
"""
Softsign Activation
.. math::
\t
ext{Softsign}(x) =
\f
rac{x}{1 + |x|}
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Softsign()
out = m(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
def
__init__
(
self
,
name
=
None
):
super
(
Softsign
,
self
).
__init__
()
self
.
_name
=
name
def
forward
(
self
,
x
):
return
F
.
softsign
(
x
,
self
.
_name
)
class
Tanhshrink
(
layers
.
Layer
):
"""
Tanhshrink Activation
.. math::
\t
ext{Tanhshrink}(x) = x -
\t
ext{Tanh}(x)
Parameters:
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
m = paddle.nn.Tanhshrink()
out = m(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
def
__init__
(
self
,
name
=
None
):
super
(
Tanhshrink
,
self
).
__init__
()
self
.
_name
=
name
def
forward
(
self
,
x
):
return
F
.
tanhshrink
(
x
,
self
.
_name
)
class
LogSigmoid
(
layers
.
Layer
):
class
LogSigmoid
(
layers
.
Layer
):
"""
"""
LogSigmoid Activation.
LogSigmoid Activation.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录