Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
faad8351
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
faad8351
编写于
11月 03, 2017
作者:
G
guosheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refine GRU Operator by following comments
上级
23a631d4
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
12 addition
and
57 deletion
+12
-57
paddle/operators/gru_op.cc
paddle/operators/gru_op.cc
+10
-9
paddle/operators/math/gru_compute.h
paddle/operators/math/gru_compute.h
+0
-22
python/paddle/v2/framework/tests/test_gru_op.py
python/paddle/v2/framework/tests/test_gru_op.py
+2
-26
未找到文件。
paddle/operators/gru_op.cc
浏览文件 @
faad8351
...
...
@@ -61,8 +61,6 @@ class GRUOp : public framework::OperatorWithKernel {
ctx
->
SetOutputDim
(
"BatchResetHiddenPrev"
,
{
input_dims
[
0
],
frame_size
});
ctx
->
SetOutputDim
(
"BatchHidden"
,
{
input_dims
[
0
],
frame_size
});
ctx
->
SetOutputDim
(
"Hidden"
,
{
input_dims
[
0
],
frame_size
});
// ctx->ShareLoD("Input", "Gate");
// ctx->ShareLoD("Input", "ResetHiddenPrev");
ctx
->
ShareLoD
(
"Input"
,
"Hidden"
);
}
};
...
...
@@ -72,7 +70,7 @@ class GRUOpMaker : public framework::OpProtoAndCheckerMaker {
GRUOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"Input"
,
"(LoDTensor) The first input is a LodTensor, which support "
"(LoDTensor) The first input is a LodTensor, which support
s
"
"variable-time length input sequence. The underlying tensor in "
"this LoDTenosr is a matrix with shape (T X 3D), where, T is the "
"total time steps in this mini-batch, D is the hidden size."
);
...
...
@@ -132,14 +130,17 @@ class GRUOpMaker : public framework::OpProtoAndCheckerMaker {
"whether to compute reversed GRU."
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
GRUOp implements part calculations of the GRU as following:
GRU Operator implements part calculations of the complete GRU as following:
\f[
update \ gate: u_t = actGate(xu_t + W_u * h
idden_prev + bias
_u) \\
reset \ gate: r_t = actGate(xr_t + W_r * h
idden_prev + bias
_r) \\
output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h
idden_prev) + bias
_c) \\
output: h_t = dot((1
-u_t), hidden_prev
) + dot(u_t, {h}_t)
update \ gate: u_t = actGate(xu_t + W_u * h
_{t-1} + b
_u) \\
reset \ gate: r_t = actGate(xr_t + W_r * h
_{t-1} + b
_r) \\
output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h
_{t-1}) + b
_c) \\
output: h_t = dot((1
- u_t), h_{t-1}
) + dot(u_t, {h}_t)
\f]
The rest of GRU can be completed by using FCOp's output as the input of GRUOp.
@note To implement the complete GRU, fully-connected operator must be used
before to feed xu, xr and xc as the Input of GRU operator.
)DOC"
);
}
};
...
...
paddle/operators/math/gru_compute.h
浏览文件 @
faad8351
...
...
@@ -19,28 +19,6 @@ namespace paddle {
namespace
operators
{
namespace
math
{
// typedef enum {
// HL_ACTIVATION_SIGMOID = 0,
// HL_ACTIVATION_RELU = 1,
// HL_ACTIVATION_TANH = 2,
// HL_ACTIVATION_LINEAR = 3,
// HL_ACTIVATION_END
// } activation_mode_t;
// inline activation_mode_t ActiveType(const std::string &type) {
// if (type == "sigmoid") {
// return HL_ACTIVATION_SIGMOID;
// } else if (type == "relu") {
// return HL_ACTIVATION_RELU;
// } else if (type == "tanh") {
// return HL_ACTIVATION_TANH;
// } else if (type == "linear" || type == "") {
// return HL_ACTIVATION_LINEAR;
// } else {
// PADDLE_THROW("Do not support activation type.");
// }
// }
template
<
typename
T
>
struct
hl_gru_value
{
T
*
gateWeight
;
...
...
python/paddle/v2/framework/tests/test_gru_op.py
浏览文件 @
faad8351
...
...
@@ -2,31 +2,7 @@ import unittest
import
numpy
as
np
import
math
from
op_test
import
OpTest
SIGMOID_THRESHOLD_MIN
=
-
40.0
SIGMOID_THRESHOLD_MAX
=
13.0
EXP_MAX_INPUT
=
40.0
def
identity
(
x
):
return
x
def
sigmoid
(
x
):
y
=
np
.
copy
(
x
)
y
[
x
<
SIGMOID_THRESHOLD_MIN
]
=
SIGMOID_THRESHOLD_MIN
y
[
x
>
SIGMOID_THRESHOLD_MAX
]
=
SIGMOID_THRESHOLD_MAX
return
1.
/
(
1.
+
np
.
exp
(
-
y
))
def
tanh
(
x
):
y
=
-
2.
*
x
y
[
y
>
EXP_MAX_INPUT
]
=
EXP_MAX_INPUT
return
(
2.
/
(
1.
+
np
.
exp
(
y
)))
-
1.
def
relu
(
x
):
return
np
.
maximum
(
x
,
0
)
from
test_lstm_op
import
identity
,
sigmoid
,
tanh
,
relu
class
TestGRUOp
(
OpTest
):
...
...
@@ -108,7 +84,7 @@ class TestGRUOp(OpTest):
return
batch_gate
,
batch_reset_hidden_prev
,
hidden
def
set_data
(
self
):
lod
=
[[
0
,
2
,
6
,
9
]]
lod
=
[[
0
,
2
,
6
,
self
.
batch_size
]]
self
.
idx_in_seq_list
=
self
.
seq_to_batch
(
lod
,
self
.
is_reverse
)
batch_size
=
self
.
batch_size
frame_size
=
self
.
frame_size
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录