Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
d2e30a2c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d2e30a2c
编写于
11月 20, 2017
作者:
C
Cao Ying
提交者:
GitHub
11月 20, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5771 from lcy-seso/softsign
add the softsign activation.
上级
9db4d019
fafd3e0f
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
51 addition
and
5 deletion
+51
-5
paddle/gserver/activations/ActivationFunction.cpp
paddle/gserver/activations/ActivationFunction.cpp
+31
-0
python/paddle/trainer_config_helpers/activations.py
python/paddle/trainer_config_helpers/activations.py
+15
-2
python/paddle/v2/fluid/tests/test_sequence_slice_op.py
python/paddle/v2/fluid/tests/test_sequence_slice_op.py
+5
-3
未找到文件。
paddle/gserver/activations/ActivationFunction.cpp
浏览文件 @
d2e30a2c
...
@@ -212,6 +212,37 @@ Error __must_check backward(Argument& act) {
...
@@ -212,6 +212,37 @@ Error __must_check backward(Argument& act) {
}
}
END_DEFINE_ACTIVATION
(
sequence_softmax
)
END_DEFINE_ACTIVATION
(
sequence_softmax
)
/*
* @brief SoftSign Activation.
* \f[
* f(z) = \frac{z}{1 + |z|}
* \f]
*/
BEGIN_DEFINE_ACTIVATION
(
softsign
)
private:
MatrixPtr
denominator_
;
Error
__must_check
forward
(
Argument
&
act
)
{
size_t
height
=
act
.
value
->
getHeight
();
size_t
width
=
act
.
value
->
getWidth
();
Matrix
::
resizeOrCreate
(
denominator_
,
height
,
width
,
false
,
useGpu
(
act
.
deviceId
));
denominator_
->
assign
(
*
act
.
value
);
denominator_
->
abs2
();
denominator_
->
add
(
1.
);
act
.
value
->
dotDiv
(
*
act
.
value
,
*
denominator_
);
return
Error
();
}
Error
__must_check
backward
(
Argument
&
act
)
{
denominator_
->
square2
();
denominator_
->
scalarDiv
(
*
denominator_
,
1.
);
act
.
grad
->
dotMul
(
*
act
.
grad
,
*
denominator_
);
return
Error
();
}
END_DEFINE_ACTIVATION
(
softsign
)
/**
/**
* @brief Relu Activation.
* @brief Relu Activation.
* forward. y = max(0, z)
* forward. y = max(0, z)
...
...
python/paddle/trainer_config_helpers/activations.py
浏览文件 @
d2e30a2c
...
@@ -17,7 +17,8 @@ __all__ = [
...
@@ -17,7 +17,8 @@ __all__ = [
"IdentityActivation"
,
"LinearActivation"
,
'SequenceSoftmaxActivation'
,
"IdentityActivation"
,
"LinearActivation"
,
'SequenceSoftmaxActivation'
,
'ExpActivation'
,
"ReluActivation"
,
"BReluActivation"
,
"SoftReluActivation"
,
'ExpActivation'
,
"ReluActivation"
,
"BReluActivation"
,
"SoftReluActivation"
,
"STanhActivation"
,
"AbsActivation"
,
"SquareActivation"
,
"BaseActivation"
,
"STanhActivation"
,
"AbsActivation"
,
"SquareActivation"
,
"BaseActivation"
,
"LogActivation"
,
"SqrtActivation"
,
"ReciprocalActivation"
"LogActivation"
,
"SqrtActivation"
,
"ReciprocalActivation"
,
"SoftSignActivation"
]
]
...
@@ -243,8 +244,20 @@ class ReciprocalActivation(BaseActivation):
...
@@ -243,8 +244,20 @@ class ReciprocalActivation(BaseActivation):
Reciprocal Activation.
Reciprocal Activation.
.. math::
.. math::
f(z)
= 1/z
f(z)
=
\\
frac{1}{z}
"""
"""
def
__init__
(
self
):
def
__init__
(
self
):
BaseActivation
.
__init__
(
self
,
'reciprocal'
,
False
)
BaseActivation
.
__init__
(
self
,
'reciprocal'
,
False
)
class
SoftSignActivation
(
BaseActivation
):
"""
SoftSign Activation.
.. math::
f(z)=
\\
frac{z}{1 + |z|}
"""
def
__init__
(
self
):
BaseActivation
.
__init__
(
self
,
'softsign'
,
False
)
python/paddle/v2/fluid/tests/test_sequence_slice_op.py
100755 → 100644
浏览文件 @
d2e30a2c
...
@@ -3,6 +3,7 @@ import numpy as np
...
@@ -3,6 +3,7 @@ import numpy as np
import
sys
import
sys
from
op_test
import
OpTest
from
op_test
import
OpTest
class
TestSequenceSliceOp
(
OpTest
):
class
TestSequenceSliceOp
(
OpTest
):
def
set_data
(
self
):
def
set_data
(
self
):
self
.
init_test_case
()
self
.
init_test_case
()
...
@@ -13,12 +14,12 @@ class TestSequenceSliceOp(OpTest):
...
@@ -13,12 +14,12 @@ class TestSequenceSliceOp(OpTest):
length
=
np
.
array
(
self
.
length
).
astype
(
"int64"
)
length
=
np
.
array
(
self
.
length
).
astype
(
"int64"
)
self
.
inputs
=
{
'X'
:
(
x
,
lod
),
'Offset'
:
offset
,
'Length'
:
length
}
self
.
inputs
=
{
'X'
:
(
x
,
lod
),
'Offset'
:
offset
,
'Length'
:
length
}
outs
=
[]
#np.zeros((100, 3, 2)).astype('float32')
outs
=
[]
#np.zeros((100, 3, 2)).astype('float32')
out_lod
=
[[
0
]]
out_lod
=
[[
0
]]
out_lod_offset
=
0
out_lod_offset
=
0
for
i
in
range
(
len
(
offset
)):
for
i
in
range
(
len
(
offset
)):
sub_x
=
x
[
lod
[
0
][
i
]
+
offset
[
i
,
0
]:
lod
[
0
]
sub_x
=
x
[
lod
[
0
][
i
]
+
offset
[
i
,
0
]:
lod
[
0
][
i
]
+
offset
[
i
,
0
]
+
[
i
]
+
offset
[
i
,
0
]
+
length
[
i
,
0
],
:]
length
[
i
,
0
],
:]
out_lod_offset
=
out_lod_offset
+
len
(
sub_x
)
out_lod_offset
=
out_lod_offset
+
len
(
sub_x
)
outs
.
append
(
sub_x
)
outs
.
append
(
sub_x
)
out_lod
[
0
].
append
(
out_lod_offset
)
out_lod
[
0
].
append
(
out_lod_offset
)
...
@@ -41,5 +42,6 @@ class TestSequenceSliceOp(OpTest):
...
@@ -41,5 +42,6 @@ class TestSequenceSliceOp(OpTest):
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录