提交 04b5daf9 编写于 作者: W wangyang59

change the parameter position of gru_step_layer from 1 back to 0

上级 c1f9cd9d
......@@ -68,8 +68,8 @@ bool GruStepLayer::init(const LayerMap& layerMap,
if (!Layer::init(layerMap, parameterMap)) return false;
CHECK_EQ(2U, inputLayers_.size());
CHECK_EQ(getSize() * getSize() * 3, parameters_[1]->getSize());
weight_.reset(new Weight(getSize(), getSize() * 3, parameters_[1]));
CHECK_EQ(getSize() * getSize() * 3, parameters_[0]->getSize());
weight_.reset(new Weight(getSize(), getSize() * 3, parameters_[0]));
if (biasParameter_.get() != NULL) {
CHECK_EQ(getSize() * 3, biasParameter_->getSize());
......
......@@ -1404,9 +1404,9 @@ TEST(Layer, GruStepLayer) {
config.biasSize = 12;
config.inputDefs.push_back(
{INPUT_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 0});
{INPUT_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 48});
config.inputDefs.push_back(
{INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 48});
{INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 0});
config.layerConfig.add_inputs();
config.layerConfig.add_inputs();
......
......@@ -2996,7 +2996,7 @@ class GruStepLayer(LayerBase):
config_assert(input_layer1.size == size,
'input_layer1.size != layer.size')
self.config.active_gate_type = active_gate_type
self.create_input_parameter(1, size * size * 3, [size, size * 3])
self.create_input_parameter(0, size * size * 3, [size, size * 3])
self.create_bias_parameter(bias, size * 3)
......
......@@ -2706,6 +2706,9 @@ def gru_step_layer(input,
:param name:
:param gate_act:
:param bias_attr:
:param param_attr: the parameter_attribute for transforming the output_mem
from previous step. It is instead grouped with input due
to backward model compatibility.
:param layer_attr:
:return: LayerOutput object.
:rtype: LayerOutput
......@@ -2716,7 +2719,7 @@ def gru_step_layer(input,
Layer(
name=name,
type=LayerType.GRU_STEP_LAYER,
inputs=[input.name, Input(output_mem.name, **param_attr.attr)],
inputs=[Input(input.name, **param_attr.attr), output_mem.name],
bias=ParamAttr.to_bias(bias_attr),
size=size,
active_type=act.name,
......
......@@ -51,10 +51,10 @@ layers {
active_type: "tanh"
inputs {
input_layer_name: "__simple_gru_0___transform@__simple_gru_0___recurrent_group"
input_parameter_name: "gru_param"
}
inputs {
input_layer_name: "__simple_gru_0__+delay1@__simple_gru_0___recurrent_group"
input_parameter_name: "gru_param"
}
bias_parameter_name: "gru_bias"
active_gate_type: "sigmoid"
......@@ -105,10 +105,10 @@ layers {
active_type: "tanh"
inputs {
input_layer_name: "__simple_gru_1___transform@__simple_gru_1___recurrent_group"
input_parameter_name: "gru_param"
}
inputs {
input_layer_name: "__simple_gru_1__+delay1@__simple_gru_1___recurrent_group"
input_parameter_name: "gru_param"
}
bias_parameter_name: "gru_bias"
active_gate_type: "sigmoid"
......
......@@ -307,10 +307,10 @@ layers {
active_type: "tanh"
inputs {
input_layer_name: "__mixed_1__@__gru_group_0___recurrent_group"
input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w0"
}
inputs {
input_layer_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group"
input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w1"
}
bias_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias"
active_gate_type: "sigmoid"
......@@ -462,7 +462,7 @@ parameters {
initial_smart: false
}
parameters {
name: "___gru_group_0__@__gru_group_0___recurrent_group.w1"
name: "___gru_group_0__@__gru_group_0___recurrent_group.w0"
size: 30000
initial_mean: 0.0
initial_std: 0.1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册