提交 da7c0f13 编写于 作者: Y Yu Yang

Format sequence_nest_rnn_multi_unequalength*.conf

上级 0c981164
......@@ -16,12 +16,12 @@
from paddle.trainer_config_helpers import *
######################## data source ################################
define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list',
define_py_data_sources2(
train_list='gserver/tests/Sequence/dummy.list',
test_list=None,
module='rnn_data_provider',
obj='process_unequalength_subseq')
settings(batch_size=2, learning_rate=0.01)
######################## network configure ################################
dict_dim = 10
......@@ -38,46 +38,46 @@ emb2 = embedding_layer(input=speaker2, size=word_dim)
# This hierachical RNN is designed to be equivalent to the simple RNN in
# sequence_rnn_multi_unequalength_inputs.conf
def outer_step(x1, x2):
outer_mem1 = memory(name = "outer_rnn_state1", size = hidden_dim)
outer_mem2 = memory(name = "outer_rnn_state2", size = hidden_dim)
outer_mem1 = memory(name="outer_rnn_state1", size=hidden_dim)
outer_mem2 = memory(name="outer_rnn_state2", size=hidden_dim)
def inner_step1(y):
inner_mem = memory(name = 'inner_rnn_state_' + y.name,
size = hidden_dim,
boot_layer = outer_mem1)
out = fc_layer(input = [y, inner_mem],
size = hidden_dim,
act = TanhActivation(),
bias_attr = True,
name = 'inner_rnn_state_' + y.name)
inner_mem = memory(
name='inner_rnn_state_' + y.name,
size=hidden_dim,
boot_layer=outer_mem1)
out = fc_layer(
input=[y, inner_mem],
size=hidden_dim,
act=TanhActivation(),
bias_attr=True,
name='inner_rnn_state_' + y.name)
return out
def inner_step2(y):
inner_mem = memory(name = 'inner_rnn_state_' + y.name,
size = hidden_dim,
boot_layer = outer_mem2)
out = fc_layer(input = [y, inner_mem],
size = hidden_dim,
act = TanhActivation(),
bias_attr = True,
name = 'inner_rnn_state_' + y.name)
inner_mem = memory(
name='inner_rnn_state_' + y.name,
size=hidden_dim,
boot_layer=outer_mem2)
out = fc_layer(
input=[y, inner_mem],
size=hidden_dim,
act=TanhActivation(),
bias_attr=True,
name='inner_rnn_state_' + y.name)
return out
encoder1 = recurrent_group(
step = inner_step1,
name = 'inner1',
input = x1)
encoder1 = recurrent_group(step=inner_step1, name='inner1', input=x1)
encoder2 = recurrent_group(
step = inner_step2,
name = 'inner2',
input = x2)
encoder2 = recurrent_group(step=inner_step2, name='inner2', input=x2)
sentence_last_state1 = last_seq(input = encoder1, name = 'outer_rnn_state1')
sentence_last_state2_ = last_seq(input = encoder2, name = 'outer_rnn_state2')
sentence_last_state1 = last_seq(input=encoder1, name='outer_rnn_state1')
sentence_last_state2_ = last_seq(input=encoder2, name='outer_rnn_state2')
encoder1_expand = expand_layer(input = sentence_last_state1,
expand_as = encoder2)
encoder1_expand = expand_layer(
input=sentence_last_state1, expand_as=encoder2)
return [encoder1_expand, encoder2]
......@@ -88,19 +88,20 @@ encoder1_rep, encoder2_rep = recurrent_group(
input=[SubsequenceInput(emb1), SubsequenceInput(emb2)],
targetInlink=emb2)
encoder1_last = last_seq(input = encoder1_rep)
encoder1_expandlast = expand_layer(input = encoder1_last,
expand_as = encoder2_rep)
context = mixed_layer(input = [identity_projection(encoder1_expandlast),
identity_projection(encoder2_rep)],
size = hidden_dim)
encoder1_last = last_seq(input=encoder1_rep)
encoder1_expandlast = expand_layer(input=encoder1_last, expand_as=encoder2_rep)
context = mixed_layer(
input=[
identity_projection(encoder1_expandlast),
identity_projection(encoder2_rep)
],
size=hidden_dim)
rep = last_seq(input=context)
prob = fc_layer(size=label_dim,
input=rep,
act=SoftmaxActivation(),
bias_attr=True)
outputs(classification_cost(input=prob,
label=data_layer(name="label", size=label_dim)))
prob = fc_layer(
size=label_dim, input=rep, act=SoftmaxActivation(), bias_attr=True)
outputs(
classification_cost(
input=prob, label=data_layer(
name="label", size=label_dim)))
......@@ -16,12 +16,12 @@
from paddle.trainer_config_helpers import *
######################## data source ################################
define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list',
define_py_data_sources2(
train_list='gserver/tests/Sequence/dummy.list',
test_list=None,
module='rnn_data_provider',
obj='process_unequalength_seq')
settings(batch_size=2, learning_rate=0.01)
######################## network configure ################################
dict_dim = 10
......@@ -38,38 +38,40 @@ emb2 = embedding_layer(input=speaker2, size=word_dim)
# This hierachical RNN is designed to be equivalent to the RNN in
# sequence_nest_rnn_multi_unequalength_inputs.conf
def step(x1, x2):
def calrnn(y):
mem = memory(name = 'rnn_state_' + y.name, size = hidden_dim)
out = fc_layer(input = [y, mem],
size = hidden_dim,
act = TanhActivation(),
bias_attr = True,
name = 'rnn_state_' + y.name)
mem = memory(name='rnn_state_' + y.name, size=hidden_dim)
out = fc_layer(
input=[y, mem],
size=hidden_dim,
act=TanhActivation(),
bias_attr=True,
name='rnn_state_' + y.name)
return out
encoder1 = calrnn(x1)
encoder2 = calrnn(x2)
return [encoder1, encoder2]
encoder1_rep, encoder2_rep = recurrent_group(
name="stepout",
step=step,
input=[emb1, emb2])
name="stepout", step=step, input=[emb1, emb2])
encoder1_last = last_seq(input = encoder1_rep)
encoder1_expandlast = expand_layer(input = encoder1_last,
expand_as = encoder2_rep)
context = mixed_layer(input = [identity_projection(encoder1_expandlast),
identity_projection(encoder2_rep)],
size = hidden_dim)
encoder1_last = last_seq(input=encoder1_rep)
encoder1_expandlast = expand_layer(input=encoder1_last, expand_as=encoder2_rep)
context = mixed_layer(
input=[
identity_projection(encoder1_expandlast),
identity_projection(encoder2_rep)
],
size=hidden_dim)
rep = last_seq(input=context)
prob = fc_layer(size=label_dim,
input=rep,
act=SoftmaxActivation(),
bias_attr=True)
outputs(classification_cost(input=prob,
label=data_layer(name="label", size=label_dim)))
prob = fc_layer(
size=label_dim, input=rep, act=SoftmaxActivation(), bias_attr=True)
outputs(
classification_cost(
input=prob, label=data_layer(
name="label", size=label_dim)))
......@@ -13,12 +13,12 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <paddle/utils/Util.h>
#include <paddle/utils/Version.h>
#include <paddle/utils/PythonUtil.h>
#include <paddle/gserver/gradientmachines/GradientMachine.h>
#include <paddle/trainer/Trainer.h>
#include <paddle/trainer/TrainerInternal.h>
#include <paddle/gserver/gradientmachines/GradientMachine.h>
#include <paddle/utils/PythonUtil.h>
#include <paddle/utils/Util.h>
#include <paddle/utils/Version.h>
P_DECLARE_int32(seed);
......@@ -45,10 +45,9 @@ public:
auto p = const_cast<TrainerForTest*>(this);
auto& params = p->getGradientMachine()->getParameters();
return std::accumulate(
params.begin(),
params.end(),
0UL,
[](size_t a, const ParameterPtr& p) { return a + p->getSize(); });
params.begin(), params.end(), 0UL, [](size_t a, const ParameterPtr& p) {
return a + p->getSize();
});
}
};
......@@ -148,8 +147,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) {
TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) {
for (bool useGpu : {false, true}) {
test("gserver/tests/sequence_rnn_multi_unequalength_inputs.conf",
"gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.conf",
test("gserver/tests/sequence_rnn_multi_unequalength_inputs.py",
"gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py",
1e-6,
useGpu);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册