提交 da7c0f13 编写于 作者: Y Yu Yang

Format sequence_nest_rnn_multi_unequalength*.conf

上级 0c981164
...@@ -16,11 +16,11 @@ ...@@ -16,11 +16,11 @@
from paddle.trainer_config_helpers import * from paddle.trainer_config_helpers import *
######################## data source ################################ ######################## data source ################################
define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', define_py_data_sources2(
test_list=None, train_list='gserver/tests/Sequence/dummy.list',
module='rnn_data_provider', test_list=None,
obj='process_unequalength_subseq') module='rnn_data_provider',
obj='process_unequalength_subseq')
settings(batch_size=2, learning_rate=0.01) settings(batch_size=2, learning_rate=0.01)
######################## network configure ################################ ######################## network configure ################################
...@@ -38,46 +38,46 @@ emb2 = embedding_layer(input=speaker2, size=word_dim) ...@@ -38,46 +38,46 @@ emb2 = embedding_layer(input=speaker2, size=word_dim)
# This hierachical RNN is designed to be equivalent to the simple RNN in # This hierachical RNN is designed to be equivalent to the simple RNN in
# sequence_rnn_multi_unequalength_inputs.conf # sequence_rnn_multi_unequalength_inputs.conf
def outer_step(x1, x2): def outer_step(x1, x2):
outer_mem1 = memory(name = "outer_rnn_state1", size = hidden_dim) outer_mem1 = memory(name="outer_rnn_state1", size=hidden_dim)
outer_mem2 = memory(name = "outer_rnn_state2", size = hidden_dim) outer_mem2 = memory(name="outer_rnn_state2", size=hidden_dim)
def inner_step1(y): def inner_step1(y):
inner_mem = memory(name = 'inner_rnn_state_' + y.name, inner_mem = memory(
size = hidden_dim, name='inner_rnn_state_' + y.name,
boot_layer = outer_mem1) size=hidden_dim,
out = fc_layer(input = [y, inner_mem], boot_layer=outer_mem1)
size = hidden_dim, out = fc_layer(
act = TanhActivation(), input=[y, inner_mem],
bias_attr = True, size=hidden_dim,
name = 'inner_rnn_state_' + y.name) act=TanhActivation(),
bias_attr=True,
name='inner_rnn_state_' + y.name)
return out return out
def inner_step2(y): def inner_step2(y):
inner_mem = memory(name = 'inner_rnn_state_' + y.name, inner_mem = memory(
size = hidden_dim, name='inner_rnn_state_' + y.name,
boot_layer = outer_mem2) size=hidden_dim,
out = fc_layer(input = [y, inner_mem], boot_layer=outer_mem2)
size = hidden_dim, out = fc_layer(
act = TanhActivation(), input=[y, inner_mem],
bias_attr = True, size=hidden_dim,
name = 'inner_rnn_state_' + y.name) act=TanhActivation(),
bias_attr=True,
name='inner_rnn_state_' + y.name)
return out return out
encoder1 = recurrent_group( encoder1 = recurrent_group(step=inner_step1, name='inner1', input=x1)
step = inner_step1,
name = 'inner1',
input = x1)
encoder2 = recurrent_group( encoder2 = recurrent_group(step=inner_step2, name='inner2', input=x2)
step = inner_step2,
name = 'inner2',
input = x2)
sentence_last_state1 = last_seq(input = encoder1, name = 'outer_rnn_state1') sentence_last_state1 = last_seq(input=encoder1, name='outer_rnn_state1')
sentence_last_state2_ = last_seq(input = encoder2, name = 'outer_rnn_state2') sentence_last_state2_ = last_seq(input=encoder2, name='outer_rnn_state2')
encoder1_expand = expand_layer(input = sentence_last_state1, encoder1_expand = expand_layer(
expand_as = encoder2) input=sentence_last_state1, expand_as=encoder2)
return [encoder1_expand, encoder2] return [encoder1_expand, encoder2]
...@@ -88,19 +88,20 @@ encoder1_rep, encoder2_rep = recurrent_group( ...@@ -88,19 +88,20 @@ encoder1_rep, encoder2_rep = recurrent_group(
input=[SubsequenceInput(emb1), SubsequenceInput(emb2)], input=[SubsequenceInput(emb1), SubsequenceInput(emb2)],
targetInlink=emb2) targetInlink=emb2)
encoder1_last = last_seq(input = encoder1_rep) encoder1_last = last_seq(input=encoder1_rep)
encoder1_expandlast = expand_layer(input = encoder1_last, encoder1_expandlast = expand_layer(input=encoder1_last, expand_as=encoder2_rep)
expand_as = encoder2_rep) context = mixed_layer(
context = mixed_layer(input = [identity_projection(encoder1_expandlast), input=[
identity_projection(encoder2_rep)], identity_projection(encoder1_expandlast),
size = hidden_dim) identity_projection(encoder2_rep)
],
size=hidden_dim)
rep = last_seq(input=context) rep = last_seq(input=context)
prob = fc_layer(size=label_dim, prob = fc_layer(
input=rep, size=label_dim, input=rep, act=SoftmaxActivation(), bias_attr=True)
act=SoftmaxActivation(),
bias_attr=True)
outputs(classification_cost(input=prob,
label=data_layer(name="label", size=label_dim)))
outputs(
classification_cost(
input=prob, label=data_layer(
name="label", size=label_dim)))
...@@ -16,11 +16,11 @@ ...@@ -16,11 +16,11 @@
from paddle.trainer_config_helpers import * from paddle.trainer_config_helpers import *
######################## data source ################################ ######################## data source ################################
define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', define_py_data_sources2(
test_list=None, train_list='gserver/tests/Sequence/dummy.list',
module='rnn_data_provider', test_list=None,
obj='process_unequalength_seq') module='rnn_data_provider',
obj='process_unequalength_seq')
settings(batch_size=2, learning_rate=0.01) settings(batch_size=2, learning_rate=0.01)
######################## network configure ################################ ######################## network configure ################################
...@@ -38,38 +38,40 @@ emb2 = embedding_layer(input=speaker2, size=word_dim) ...@@ -38,38 +38,40 @@ emb2 = embedding_layer(input=speaker2, size=word_dim)
# This hierachical RNN is designed to be equivalent to the RNN in # This hierachical RNN is designed to be equivalent to the RNN in
# sequence_nest_rnn_multi_unequalength_inputs.conf # sequence_nest_rnn_multi_unequalength_inputs.conf
def step(x1, x2): def step(x1, x2):
def calrnn(y): def calrnn(y):
mem = memory(name = 'rnn_state_' + y.name, size = hidden_dim) mem = memory(name='rnn_state_' + y.name, size=hidden_dim)
out = fc_layer(input = [y, mem], out = fc_layer(
size = hidden_dim, input=[y, mem],
act = TanhActivation(), size=hidden_dim,
bias_attr = True, act=TanhActivation(),
name = 'rnn_state_' + y.name) bias_attr=True,
return out name='rnn_state_' + y.name)
return out
encoder1 = calrnn(x1)
encoder2 = calrnn(x2) encoder1 = calrnn(x1)
return [encoder1, encoder2] encoder2 = calrnn(x2)
return [encoder1, encoder2]
encoder1_rep, encoder2_rep = recurrent_group( encoder1_rep, encoder2_rep = recurrent_group(
name="stepout", name="stepout", step=step, input=[emb1, emb2])
step=step,
input=[emb1, emb2])
encoder1_last = last_seq(input = encoder1_rep) encoder1_last = last_seq(input=encoder1_rep)
encoder1_expandlast = expand_layer(input = encoder1_last, encoder1_expandlast = expand_layer(input=encoder1_last, expand_as=encoder2_rep)
expand_as = encoder2_rep) context = mixed_layer(
context = mixed_layer(input = [identity_projection(encoder1_expandlast), input=[
identity_projection(encoder2_rep)], identity_projection(encoder1_expandlast),
size = hidden_dim) identity_projection(encoder2_rep)
],
size=hidden_dim)
rep = last_seq(input=context) rep = last_seq(input=context)
prob = fc_layer(size=label_dim, prob = fc_layer(
input=rep, size=label_dim, input=rep, act=SoftmaxActivation(), bias_attr=True)
act=SoftmaxActivation(),
bias_attr=True)
outputs(classification_cost(input=prob,
label=data_layer(name="label", size=label_dim)))
outputs(
classification_cost(
input=prob, label=data_layer(
name="label", size=label_dim)))
...@@ -13,12 +13,12 @@ See the License for the specific language governing permissions and ...@@ -13,12 +13,12 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <paddle/utils/Util.h> #include <paddle/gserver/gradientmachines/GradientMachine.h>
#include <paddle/utils/Version.h>
#include <paddle/utils/PythonUtil.h>
#include <paddle/trainer/Trainer.h> #include <paddle/trainer/Trainer.h>
#include <paddle/trainer/TrainerInternal.h> #include <paddle/trainer/TrainerInternal.h>
#include <paddle/gserver/gradientmachines/GradientMachine.h> #include <paddle/utils/PythonUtil.h>
#include <paddle/utils/Util.h>
#include <paddle/utils/Version.h>
P_DECLARE_int32(seed); P_DECLARE_int32(seed);
...@@ -45,10 +45,9 @@ public: ...@@ -45,10 +45,9 @@ public:
auto p = const_cast<TrainerForTest*>(this); auto p = const_cast<TrainerForTest*>(this);
auto& params = p->getGradientMachine()->getParameters(); auto& params = p->getGradientMachine()->getParameters();
return std::accumulate( return std::accumulate(
params.begin(), params.begin(), params.end(), 0UL, [](size_t a, const ParameterPtr& p) {
params.end(), return a + p->getSize();
0UL, });
[](size_t a, const ParameterPtr& p) { return a + p->getSize(); });
} }
}; };
...@@ -148,8 +147,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) { ...@@ -148,8 +147,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) {
TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) {
for (bool useGpu : {false, true}) { for (bool useGpu : {false, true}) {
test("gserver/tests/sequence_rnn_multi_unequalength_inputs.conf", test("gserver/tests/sequence_rnn_multi_unequalength_inputs.py",
"gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.conf", "gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py",
1e-6, 1e-6,
useGpu); useGpu);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册