diff --git a/paddle/gserver/layers/GruStepLayer.cpp b/paddle/gserver/layers/GruStepLayer.cpp index ce692c490881c9330b8113185bbec98b8dfd63e0..4a1006aa941f396c233a0cecfc38228f1f9fafe1 100644 --- a/paddle/gserver/layers/GruStepLayer.cpp +++ b/paddle/gserver/layers/GruStepLayer.cpp @@ -68,8 +68,8 @@ bool GruStepLayer::init(const LayerMap& layerMap, if (!Layer::init(layerMap, parameterMap)) return false; CHECK_EQ(2U, inputLayers_.size()); - CHECK_EQ(getSize() * getSize() * 3, parameters_[1]->getSize()); - weight_.reset(new Weight(getSize(), getSize() * 3, parameters_[1])); + CHECK_EQ(getSize() * getSize() * 3, parameters_[0]->getSize()); + weight_.reset(new Weight(getSize(), getSize() * 3, parameters_[0])); if (biasParameter_.get() != NULL) { CHECK_EQ(getSize() * 3, biasParameter_->getSize()); diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 813a99d8cdae2e68473280ffc6e2370c02c9ec6c..8c8e876bd64fb97e11bc04c26ec45358f3f808a1 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -1404,9 +1404,9 @@ TEST(Layer, GruStepLayer) { config.biasSize = 12; config.inputDefs.push_back( - {INPUT_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 0}); + {INPUT_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 48}); config.inputDefs.push_back( - {INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 48}); + {INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 0}); config.layerConfig.add_inputs(); config.layerConfig.add_inputs(); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 4fbf076ae98c4d982329de1c1af7232c76ef8912..6701eced60d068312a1a866a6312002f9f5207f7 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2996,7 +2996,7 @@ class GruStepLayer(LayerBase): config_assert(input_layer1.size == size, 'input_layer1.size != layer.size') self.config.active_gate_type = active_gate_type - self.create_input_parameter(1, size * size * 3, [size, size * 3]) + self.create_input_parameter(0, size * size * 3, [size, size * 3]) self.create_bias_parameter(bias, size * 3) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index f0b5d7c3b4f26a723d7431eb1bfaa3a6eaec944f..770d6303c1358796c7d9337447b3e79f5b8ad5ba 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -2706,6 +2706,9 @@ def gru_step_layer(input, :param name: :param gate_act: :param bias_attr: + :param param_attr: the parameter_attribute for transforming the output_mem + from previous step. It is instead grouped with input due + to backward model compatibility. :param layer_attr: :return: LayerOutput object. :rtype: LayerOutput @@ -2716,7 +2719,7 @@ def gru_step_layer(input, Layer( name=name, type=LayerType.GRU_STEP_LAYER, - inputs=[input.name, Input(output_mem.name, **param_attr.attr)], + inputs=[Input(input.name, **param_attr.attr), output_mem.name], bias=ParamAttr.to_bias(bias_attr), size=size, active_type=act.name, diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr index c0868713ebbbc16c6cd3e557cb2af039cea70e0b..b6905824f0cb090375a38ff67e39fc626df0b2f6 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_gru.protostr @@ -51,10 +51,10 @@ layers { active_type: "tanh" inputs { input_layer_name: "__simple_gru_0___transform@__simple_gru_0___recurrent_group" + input_parameter_name: "gru_param" } inputs { input_layer_name: "__simple_gru_0__+delay1@__simple_gru_0___recurrent_group" - input_parameter_name: "gru_param" } bias_parameter_name: "gru_bias" active_gate_type: "sigmoid" @@ -105,10 +105,10 @@ layers { active_type: "tanh" inputs { input_layer_name: "__simple_gru_1___transform@__simple_gru_1___recurrent_group" + input_parameter_name: "gru_param" } inputs { input_layer_name: "__simple_gru_1__+delay1@__simple_gru_1___recurrent_group" - input_parameter_name: "gru_param" } bias_parameter_name: "gru_bias" active_gate_type: "sigmoid" diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr index c1d39f77295892afc5ae12ec3f7c7c4ab6d5c0c2..3e9d28416ed5066461e960f0a9f085e057c28346 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr @@ -307,10 +307,10 @@ layers { active_type: "tanh" inputs { input_layer_name: "__mixed_1__@__gru_group_0___recurrent_group" + input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" } inputs { input_layer_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" - input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w1" } bias_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias" active_gate_type: "sigmoid" @@ -462,7 +462,7 @@ parameters { initial_smart: false } parameters { - name: "___gru_group_0__@__gru_group_0___recurrent_group.w1" + name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" size: 30000 initial_mean: 0.0 initial_std: 0.1