Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
04b5daf9
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
04b5daf9
编写于
2月 01, 2017
作者:
W
wangyang59
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change the parameter position of gru_step_layer from 1 back to 0
上级
c1f9cd9d
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
13 addition
and
10 deletion
+13
-10
paddle/gserver/layers/GruStepLayer.cpp
paddle/gserver/layers/GruStepLayer.cpp
+2
-2
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+2
-2
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+1
-1
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+4
-1
python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr
...config_helpers/tests/configs/protostr/shared_gru.protostr
+2
-2
python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr
...ig_helpers/tests/configs/protostr/test_rnn_group.protostr
+2
-2
未找到文件。
paddle/gserver/layers/GruStepLayer.cpp
浏览文件 @
04b5daf9
...
@@ -68,8 +68,8 @@ bool GruStepLayer::init(const LayerMap& layerMap,
...
@@ -68,8 +68,8 @@ bool GruStepLayer::init(const LayerMap& layerMap,
if
(
!
Layer
::
init
(
layerMap
,
parameterMap
))
return
false
;
if
(
!
Layer
::
init
(
layerMap
,
parameterMap
))
return
false
;
CHECK_EQ
(
2U
,
inputLayers_
.
size
());
CHECK_EQ
(
2U
,
inputLayers_
.
size
());
CHECK_EQ
(
getSize
()
*
getSize
()
*
3
,
parameters_
[
1
]
->
getSize
());
CHECK_EQ
(
getSize
()
*
getSize
()
*
3
,
parameters_
[
0
]
->
getSize
());
weight_
.
reset
(
new
Weight
(
getSize
(),
getSize
()
*
3
,
parameters_
[
1
]));
weight_
.
reset
(
new
Weight
(
getSize
(),
getSize
()
*
3
,
parameters_
[
0
]));
if
(
biasParameter_
.
get
()
!=
NULL
)
{
if
(
biasParameter_
.
get
()
!=
NULL
)
{
CHECK_EQ
(
getSize
()
*
3
,
biasParameter_
->
getSize
());
CHECK_EQ
(
getSize
()
*
3
,
biasParameter_
->
getSize
());
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
04b5daf9
...
@@ -1404,9 +1404,9 @@ TEST(Layer, GruStepLayer) {
...
@@ -1404,9 +1404,9 @@ TEST(Layer, GruStepLayer) {
config
.
biasSize
=
12
;
config
.
biasSize
=
12
;
config
.
inputDefs
.
push_back
(
config
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
12
,
/* paraSize= */
0
});
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
12
,
/* paraSize= */
48
});
config
.
inputDefs
.
push_back
(
config
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
4
,
/* paraSize= */
48
});
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
4
,
/* paraSize= */
0
});
config
.
layerConfig
.
add_inputs
();
config
.
layerConfig
.
add_inputs
();
config
.
layerConfig
.
add_inputs
();
config
.
layerConfig
.
add_inputs
();
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
04b5daf9
...
@@ -2996,7 +2996,7 @@ class GruStepLayer(LayerBase):
...
@@ -2996,7 +2996,7 @@ class GruStepLayer(LayerBase):
config_assert
(
input_layer1
.
size
==
size
,
config_assert
(
input_layer1
.
size
==
size
,
'input_layer1.size != layer.size'
)
'input_layer1.size != layer.size'
)
self
.
config
.
active_gate_type
=
active_gate_type
self
.
config
.
active_gate_type
=
active_gate_type
self
.
create_input_parameter
(
1
,
size
*
size
*
3
,
[
size
,
size
*
3
])
self
.
create_input_parameter
(
0
,
size
*
size
*
3
,
[
size
,
size
*
3
])
self
.
create_bias_parameter
(
bias
,
size
*
3
)
self
.
create_bias_parameter
(
bias
,
size
*
3
)
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
04b5daf9
...
@@ -2706,6 +2706,9 @@ def gru_step_layer(input,
...
@@ -2706,6 +2706,9 @@ def gru_step_layer(input,
:param name:
:param name:
:param gate_act:
:param gate_act:
:param bias_attr:
:param bias_attr:
:param param_attr: the parameter_attribute for transforming the output_mem
from previous step. It is instead grouped with input due
to backward model compatibility.
:param layer_attr:
:param layer_attr:
:return: LayerOutput object.
:return: LayerOutput object.
:rtype: LayerOutput
:rtype: LayerOutput
...
@@ -2716,7 +2719,7 @@ def gru_step_layer(input,
...
@@ -2716,7 +2719,7 @@ def gru_step_layer(input,
Layer
(
Layer
(
name
=
name
,
name
=
name
,
type
=
LayerType
.
GRU_STEP_LAYER
,
type
=
LayerType
.
GRU_STEP_LAYER
,
inputs
=
[
input
.
name
,
Input
(
output_mem
.
name
,
**
param_attr
.
attr
)
],
inputs
=
[
Input
(
input
.
name
,
**
param_attr
.
attr
),
output_mem
.
name
],
bias
=
ParamAttr
.
to_bias
(
bias_attr
),
bias
=
ParamAttr
.
to_bias
(
bias_attr
),
size
=
size
,
size
=
size
,
active_type
=
act
.
name
,
active_type
=
act
.
name
,
...
...
python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr
浏览文件 @
04b5daf9
...
@@ -51,10 +51,10 @@ layers {
...
@@ -51,10 +51,10 @@ layers {
active_type: "tanh"
active_type: "tanh"
inputs {
inputs {
input_layer_name: "__simple_gru_0___transform@__simple_gru_0___recurrent_group"
input_layer_name: "__simple_gru_0___transform@__simple_gru_0___recurrent_group"
input_parameter_name: "gru_param"
}
}
inputs {
inputs {
input_layer_name: "__simple_gru_0__+delay1@__simple_gru_0___recurrent_group"
input_layer_name: "__simple_gru_0__+delay1@__simple_gru_0___recurrent_group"
input_parameter_name: "gru_param"
}
}
bias_parameter_name: "gru_bias"
bias_parameter_name: "gru_bias"
active_gate_type: "sigmoid"
active_gate_type: "sigmoid"
...
@@ -105,10 +105,10 @@ layers {
...
@@ -105,10 +105,10 @@ layers {
active_type: "tanh"
active_type: "tanh"
inputs {
inputs {
input_layer_name: "__simple_gru_1___transform@__simple_gru_1___recurrent_group"
input_layer_name: "__simple_gru_1___transform@__simple_gru_1___recurrent_group"
input_parameter_name: "gru_param"
}
}
inputs {
inputs {
input_layer_name: "__simple_gru_1__+delay1@__simple_gru_1___recurrent_group"
input_layer_name: "__simple_gru_1__+delay1@__simple_gru_1___recurrent_group"
input_parameter_name: "gru_param"
}
}
bias_parameter_name: "gru_bias"
bias_parameter_name: "gru_bias"
active_gate_type: "sigmoid"
active_gate_type: "sigmoid"
...
...
python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr
浏览文件 @
04b5daf9
...
@@ -307,10 +307,10 @@ layers {
...
@@ -307,10 +307,10 @@ layers {
active_type: "tanh"
active_type: "tanh"
inputs {
inputs {
input_layer_name: "__mixed_1__@__gru_group_0___recurrent_group"
input_layer_name: "__mixed_1__@__gru_group_0___recurrent_group"
input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w0"
}
}
inputs {
inputs {
input_layer_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group"
input_layer_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group"
input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w1"
}
}
bias_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias"
bias_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias"
active_gate_type: "sigmoid"
active_gate_type: "sigmoid"
...
@@ -462,7 +462,7 @@ parameters {
...
@@ -462,7 +462,7 @@ parameters {
initial_smart: false
initial_smart: false
}
}
parameters {
parameters {
name: "___gru_group_0__@__gru_group_0___recurrent_group.w
1
"
name: "___gru_group_0__@__gru_group_0___recurrent_group.w
0
"
size: 30000
size: 30000
initial_mean: 0.0
initial_mean: 0.0
initial_std: 0.1
initial_std: 0.1
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录