Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
7a89a0a7
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
7a89a0a7
编写于
8月 14, 2020
作者:
Z
zhupengyang
提交者:
GitHub
8月 14, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
hardshrink and Hardshrink: add class, threshold default 0.5 (#26198)
上级
935da32d
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
169 addition
and
21 deletion
+169
-21
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+1
-4
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+64
-14
python/paddle/nn/__init__.py
python/paddle/nn/__init__.py
+1
-0
python/paddle/nn/functional/__init__.py
python/paddle/nn/functional/__init__.py
+1
-1
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+54
-2
python/paddle/nn/layer/activation.py
python/paddle/nn/layer/activation.py
+48
-0
未找到文件。
python/paddle/fluid/layers/ops.py
浏览文件 @
7a89a0a7
...
...
@@ -474,6 +474,7 @@ __all__ += ['hard_shrink']
_hard_shrink_
=
generate_layer_fn
(
'hard_shrink'
)
@
deprecated
(
since
=
"2.0.0"
,
update_to
=
"paddle.nn.functional.hardshrink"
)
def
hard_shrink
(
x
,
threshold
=
None
):
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hard_shrink'
)
...
...
@@ -487,10 +488,6 @@ def hard_shrink(x, threshold=None):
hard_shrink
.
__doc__
=
_hard_shrink_
.
__doc__
+
"""
:alias_main: paddle.nn.functional.hard_shrink
:alias: paddle.nn.functional.hard_shrink,paddle.nn.functional.activation.hard_shrink
:old_api: paddle.fluid.layers.hard_shrink
Examples:
>>> import paddle.fluid as fluid
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
7a89a0a7
...
...
@@ -22,7 +22,7 @@ from scipy.special import expit, erf
import
paddle
import
paddle.fluid
as
fluid
import
paddle.nn
as
nn
import
paddle.nn.functional
as
functional
import
paddle.nn.functional
as
F
from
paddle.fluid
import
compiler
,
Program
,
program_guard
...
...
@@ -344,6 +344,12 @@ class TestTanhShrink(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
def
ref_hardshrink
(
x
,
threshold
):
out
=
np
.
copy
(
x
)
out
[(
out
>=
-
threshold
)
&
(
out
<=
threshold
)]
=
0
return
out
class
TestHardShrink
(
TestActivation
):
def
setUp
(
self
):
self
.
op_type
=
"hard_shrink"
...
...
@@ -351,11 +357,10 @@ class TestHardShrink(TestActivation):
threshold
=
0.5
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
self
.
dtype
)
*
10
out
=
np
.
copy
(
x
)
out
[(
out
>=
-
threshold
)
&
(
out
<=
threshold
)]
=
0
out
=
ref_hardshrink
(
x
,
threshold
)
self
.
attrs
=
{
'
lambda
'
:
threshold
}
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)
}
self
.
attrs
=
{
'
threshold
'
:
threshold
}
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
...
...
@@ -364,17 +369,62 @@ class TestHardShrink(TestActivation):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestHardShrinkOpError
(
unittest
.
TestCase
):
class
TestHardShrinkAPI
(
unittest
.
TestCase
):
# test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
def
setUp
(
self
):
self
.
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
'float32'
)
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_cuda
()
\
else
paddle
.
CPUPlace
()
def
test_static_api
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
data
(
'X'
,
[
10
,
12
])
out1
=
F
.
hardshrink
(
x
)
hd
=
paddle
.
nn
.
Hardshrink
()
out2
=
hd
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out1
,
out2
])
out_ref
=
ref_hardshrink
(
self
.
x_np
,
0.5
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
def
test_dygraph_api
(
self
):
paddle
.
disable_static
(
self
.
place
)
x
=
paddle
.
to_variable
(
self
.
x_np
)
out1
=
F
.
hardshrink
(
x
)
hd
=
paddle
.
nn
.
Hardshrink
()
out2
=
hd
(
x
)
out_ref
=
ref_hardshrink
(
self
.
x_np
,
0.5
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
out1
=
F
.
hardshrink
(
x
,
0.6
)
hd
=
paddle
.
nn
.
Hardshrink
(
0.6
)
out2
=
hd
(
x
)
out_ref
=
ref_hardshrink
(
self
.
x_np
,
0.6
)
for
r
in
[
out1
,
out2
]:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
.
numpy
()),
True
)
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
'X'
,
[
10
,
12
])
out
=
fluid
.
layers
.
hard_shrink
(
x
)
exe
=
fluid
.
Executor
(
self
.
place
)
res
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
},
fetch_list
=
[
out
])
out_ref
=
ref_hardshrink
(
self
.
x_np
,
0.5
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
res
[
0
]),
True
)
def
test_errors
(
self
):
with
p
rogram_guard
(
Program
()):
with
p
addle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
# The input type must be Variable.
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
hard_
shrink
,
1
)
self
.
assertRaises
(
TypeError
,
F
.
hard
shrink
,
1
)
# The input dtype must be float16, float32, float64.
x_int32
=
fluid
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
hard_
shrink
,
x_int32
)
x_int32
=
paddle
.
data
(
name
=
'x_int32'
,
shape
=
[
12
,
10
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
hard
shrink
,
x_int32
)
# support the input dtype is float16
x_fp16
=
fluid
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
fluid
.
layers
.
hard_
shrink
(
x_fp16
)
x_fp16
=
paddle
.
data
(
name
=
'x_fp16'
,
shape
=
[
12
,
10
],
dtype
=
'float16'
)
F
.
hard
shrink
(
x_fp16
)
class
TestSoftShrink
(
TestActivation
):
...
...
@@ -1435,7 +1485,7 @@ class TestNNFunctionalReluAPI(unittest.TestCase):
main_program
=
Program
()
with
fluid
.
program_guard
(
main_program
):
x
=
fluid
.
data
(
name
=
'x'
,
shape
=
self
.
x_shape
)
y
=
functional
.
relu
(
x
)
y
=
F
.
relu
(
x
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
out
=
exe
.
run
(
main_program
,
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
self
.
assertTrue
(
np
.
allclose
(
out
[
0
],
self
.
y
))
...
...
@@ -1501,7 +1551,7 @@ class TestNNFunctionalSigmoidAPI(unittest.TestCase):
main_program
=
Program
()
with
fluid
.
program_guard
(
main_program
):
x
=
fluid
.
data
(
name
=
'x'
,
shape
=
self
.
x_shape
)
y
=
functional
.
sigmoid
(
x
)
y
=
F
.
sigmoid
(
x
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
out
=
exe
.
run
(
main_program
,
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
self
.
assertTrue
(
np
.
allclose
(
out
[
0
],
self
.
y
))
...
...
python/paddle/nn/__init__.py
浏览文件 @
7a89a0a7
...
...
@@ -51,6 +51,7 @@ from .decode import beam_search_decode #DEFINE_ALIAS
from
.decode
import
gather_tree
#DEFINE_ALIAS
from
.input
import
data
#DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
from
.layer.activation
import
Hardshrink
# from .layer.activation import PReLU #DEFINE_ALIAS
from
.layer.activation
import
ReLU
#DEFINE_ALIAS
from
.layer.activation
import
LeakyReLU
#DEFINE_ALIAS
...
...
python/paddle/nn/functional/__init__.py
浏览文件 @
7a89a0a7
...
...
@@ -29,7 +29,7 @@ from .activation import brelu #DEFINE_ALIAS
from
.activation
import
elu
#DEFINE_ALIAS
from
.activation
import
erf
#DEFINE_ALIAS
from
.activation
import
gelu
#DEFINE_ALIAS
from
.activation
import
hard
_
shrink
#DEFINE_ALIAS
from
.activation
import
hardshrink
#DEFINE_ALIAS
from
.activation
import
hard_sigmoid
#DEFINE_ALIAS
from
.activation
import
hard_swish
#DEFINE_ALIAS
from
.activation
import
hsigmoid
#DEFINE_ALIAS
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
7a89a0a7
...
...
@@ -17,7 +17,6 @@ from ...fluid.layers import brelu #DEFINE_ALIAS
from
...fluid.layers
import
elu
#DEFINE_ALIAS
from
...fluid.layers
import
erf
#DEFINE_ALIAS
from
...fluid.layers
import
gelu
#DEFINE_ALIAS
from
...fluid.layers
import
hard_shrink
#DEFINE_ALIAS
from
...fluid.layers
import
hard_sigmoid
#DEFINE_ALIAS
from
...fluid.layers
import
hard_swish
#DEFINE_ALIAS
from
...fluid.layers
import
leaky_relu
#DEFINE_ALIAS
...
...
@@ -38,7 +37,7 @@ __all__ = [
'elu'
,
'erf'
,
'gelu'
,
'hard
_
shrink'
,
'hardshrink'
,
'hard_sigmoid'
,
'hard_swish'
,
'hsigmoid'
,
...
...
@@ -69,6 +68,59 @@ from ...fluid.data_feeder import check_variable_and_dtype
import
paddle
def
hardshrink
(
x
,
threshold
=
0.5
,
name
=
None
):
"""
hard shrinkage activation
.. math::
hardshrink(x)=
\left\{
\b
egin{aligned}
&x, & & if \ x > threshold
\\
&x, & & if \ x < -threshold
\\
&0, & & if \ others
\end{aligned}
\r
ight.
Args:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
x = paddle.to_variable(np.array([-1, 0.3, 2.5]))
out = F.hardshrink(x) # [-1., 0., 2.5]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
hard_shrink
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardshrink'
)
helper
=
LayerHelper
(
'hardshrink'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'hard_shrink'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'threshold'
:
threshold
})
return
out
def
hsigmoid
(
input
,
label
,
weight
,
...
...
python/paddle/nn/layer/activation.py
浏览文件 @
7a89a0a7
...
...
@@ -15,6 +15,7 @@
# TODO: define activation functions of neural network
__all__
=
[
'Hardshrink'
,
# 'PReLU',
'ReLU'
,
'LeakyReLU'
,
...
...
@@ -30,6 +31,53 @@ from ...fluid.framework import in_dygraph_mode
from
..
import
functional
class
Hardshrink
(
layers
.
Layer
):
"""
Hardshrink Activation
.. math::
hardshrink(x)=
\left\{
\b
egin{aligned}
&x, & & if \ x > threshold
\\
&x, & & if \ x < -threshold
\\
&0, & & if \ others
\end{aligned}
\r
ight.
Parameters:
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
- input: Tensor with any shape.
- output: Tensor with the same shape as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_variable(np.array([-1, 0.3, 2.5]))
m = paddle.nn.Hardshrink()
out = m(x) # [-1., 0., 2.5]
"""
def
__init__
(
self
,
threshold
=
0.5
,
name
=
None
):
super
(
Hardshrink
,
self
).
__init__
()
self
.
_threshold
=
threshold
self
.
_name
=
name
def
forward
(
self
,
x
):
return
functional
.
hardshrink
(
x
,
self
.
_threshold
,
self
.
_name
)
class
HSigmoid
(
layers
.
Layer
):
"""
:alias_main: paddle.nn.HSigmoid
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录