提交 b4a8f0dc 编写于 作者: C caoying03

refine NMT.

上级 35d9ab1b
import sys import sys
import gzip
import numpy as np
import paddle.v2 as paddle import paddle.v2 as paddle
def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False): def save_model(parameters, save_path):
with gzip.open(save_path, 'w') as f:
parameters.to_tar(f)
def seq_to_seq_net(source_dict_dim,
target_dict_dim,
is_generating,
beam_size=3,
max_length=250):
### Network Architecture ### Network Architecture
word_vector_dim = 512 # dimension of word vector word_vector_dim = 512 # dimension of word vector
decoder_size = 512 # dimension of hidden unit in GRU Decoder network decoder_size = 512 # dimension of hidden unit of GRU decoder
encoder_size = 512 # dimension of hidden unit in GRU Encoder network encoder_size = 512 # dimension of hidden unit of GRU encoder
beam_size = 3
max_length = 250
#### Encoder #### Encoder
src_word_id = paddle.layer.data( src_word_id = paddle.layer.data(
name='source_language_word', name='source_language_word',
type=paddle.data_type.integer_value_sequence(source_dict_dim)) type=paddle.data_type.integer_value_sequence(source_dict_dim))
src_embedding = paddle.layer.embedding( src_embedding = paddle.layer.embedding(
input=src_word_id, input=src_word_id, size=word_vector_dim)
size=word_vector_dim,
param_attr=paddle.attr.ParamAttr(name='_source_language_embedding'))
src_forward = paddle.networks.simple_gru( src_forward = paddle.networks.simple_gru(
input=src_embedding, size=encoder_size) input=src_embedding, size=encoder_size)
src_backward = paddle.networks.simple_gru( src_backward = paddle.networks.simple_gru(
...@@ -27,16 +33,19 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False): ...@@ -27,16 +33,19 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False):
encoded_vector = paddle.layer.concat(input=[src_forward, src_backward]) encoded_vector = paddle.layer.concat(input=[src_forward, src_backward])
#### Decoder #### Decoder
encoded_proj = paddle.layer.mixed( encoded_proj = paddle.layer.fc(
act=paddle.activation.Linear(),
size=decoder_size, size=decoder_size,
input=paddle.layer.full_matrix_projection(encoded_vector)) bias_attr=False,
input=encoded_vector)
backward_first = paddle.layer.first_seq(input=src_backward) backward_first = paddle.layer.first_seq(input=src_backward)
decoder_boot = paddle.layer.mixed( decoder_boot = paddle.layer.fc(
size=decoder_size, size=decoder_size,
act=paddle.activation.Tanh(), act=paddle.activation.Tanh(),
input=paddle.layer.full_matrix_projection(backward_first)) bias_attr=False,
input=backward_first)
def gru_decoder_with_attention(enc_vec, enc_proj, current_word): def gru_decoder_with_attention(enc_vec, enc_proj, current_word):
...@@ -48,12 +57,13 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False): ...@@ -48,12 +57,13 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False):
encoded_proj=enc_proj, encoded_proj=enc_proj,
decoder_state=decoder_mem) decoder_state=decoder_mem)
decoder_inputs = paddle.layer.mixed( decoder_inputs = paddle.layer.fc(
act=paddle.activation.Linear(),
size=decoder_size * 3, size=decoder_size * 3,
input=[ bias_attr=False,
paddle.layer.full_matrix_projection(input=context), input=[context, current_word],
paddle.layer.full_matrix_projection(input=current_word) layer_attr=paddle.attr.ExtraLayerAttribute(
]) error_clipping_threshold=100.0))
gru_step = paddle.layer.gru_step( gru_step = paddle.layer.gru_step(
name='gru_decoder', name='gru_decoder',
...@@ -61,16 +71,16 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False): ...@@ -61,16 +71,16 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False):
output_mem=decoder_mem, output_mem=decoder_mem,
size=decoder_size) size=decoder_size)
out = paddle.layer.mixed( out = paddle.layer.fc(
size=target_dict_dim, size=target_dict_dim,
bias_attr=True, bias_attr=True,
act=paddle.activation.Softmax(), act=paddle.activation.Softmax(),
input=paddle.layer.full_matrix_projection(input=gru_step)) input=gru_step)
return out return out
decoder_group_name = "decoder_group" decoder_group_name = 'decoder_group'
group_input1 = paddle.layer.StaticInput(input=encoded_vector, is_seq=True) group_input1 = paddle.layer.StaticInput(input=encoded_vector)
group_input2 = paddle.layer.StaticInput(input=encoded_proj, is_seq=True) group_input2 = paddle.layer.StaticInput(input=encoded_proj)
group_inputs = [group_input1, group_input2] group_inputs = [group_input1, group_input2]
if not is_generating: if not is_generating:
...@@ -100,13 +110,12 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False): ...@@ -100,13 +110,12 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False):
return cost return cost
else: else:
# In generation, the decoder predicts a next target word based on # In generation, the decoder predicts a next target word based on
# the encoded source sequence and the last generated target word. # the encoded source sequence and the previous generated target word.
# The encoded source sequence (encoder's output) must be specified by # The encoded source sequence (encoder's output) must be specified by
# StaticInput, which is a read-only memory. # StaticInput, which is a read-only memory.
# Embedding of the last generated word is automatically gotten by # Embedding of the previous generated word is automatically retrieved
# GeneratedInputs, which is initialized by a start mark, such as <s>, # by GeneratedInputs initialized by a start mark <s>.
# and must be included in generation.
trg_embedding = paddle.layer.GeneratedInput( trg_embedding = paddle.layer.GeneratedInput(
size=target_dict_dim, size=target_dict_dim,
...@@ -127,8 +136,8 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False): ...@@ -127,8 +136,8 @@ def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False):
def main(): def main():
paddle.init(use_gpu=False, trainer_count=1) paddle.init(use_gpu=True, trainer_count=1)
is_generating = False is_generating = True
# source and target dict dim. # source and target dict dim.
dict_size = 30000 dict_size = 30000
...@@ -136,32 +145,43 @@ def main(): ...@@ -136,32 +145,43 @@ def main():
# train the network # train the network
if not is_generating: if not is_generating:
cost = seqToseq_net(source_dict_dim, target_dict_dim)
parameters = paddle.parameters.create(cost)
# define optimize method and trainer # define optimize method and trainer
optimizer = paddle.optimizer.Adam( optimizer = paddle.optimizer.Adam(
learning_rate=5e-5, learning_rate=5e-5,
regularization=paddle.optimizer.L2Regularization(rate=8e-4)) regularization=paddle.optimizer.L2Regularization(rate=8e-4))
cost = seq_to_seq_net(source_dict_dim, target_dict_dim, is_generating)
parameters = paddle.parameters.create(cost)
trainer = paddle.trainer.SGD( trainer = paddle.trainer.SGD(
cost=cost, parameters=parameters, update_equation=optimizer) cost=cost, parameters=parameters, update_equation=optimizer)
# define data reader # define data reader
wmt14_reader = paddle.batch( wmt14_reader = paddle.batch(
paddle.reader.shuffle( paddle.reader.shuffle(
paddle.dataset.wmt14.train(dict_size), buf_size=8192), paddle.dataset.wmt14.train(dict_size), buf_size=8192),
batch_size=5) batch_size=4)
# define event_handler callback # define event_handler callback
def event_handler(event): def event_handler(event):
if isinstance(event, paddle.event.EndIteration): if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 10 == 0: if event.batch_id % 10 == 0:
print "\nPass %d, Batch %d, Cost %f, %s" % ( print("\nPass %d, Batch %d, Cost %f, %s" %
event.pass_id, event.batch_id, event.cost, (event.pass_id, event.batch_id, event.cost,
event.metrics) event.metrics))
else: else:
sys.stdout.write('.') sys.stdout.write('.')
sys.stdout.flush() sys.stdout.flush()
if not event.batch_id % 10:
save_path = 'params_pass_%05d_batch_%05d.tar.gz' % (
event.pass_id, event.batch_id)
save_model(parameters, save_path)
if isinstance(event, paddle.event.EndPass):
# save parameters
save_path = 'params_pass_%05d.tar.gz' % (event.pass_id)
save_model(parameters, save_path)
# start to train # start to train
trainer.train( trainer.train(
reader=wmt14_reader, event_handler=event_handler, num_passes=2) reader=wmt14_reader, event_handler=event_handler, num_passes=2)
...@@ -169,17 +189,20 @@ def main(): ...@@ -169,17 +189,20 @@ def main():
# generate a english sequence to french # generate a english sequence to french
else: else:
# use the first 3 samples for generation # use the first 3 samples for generation
gen_creator = paddle.dataset.wmt14.gen(dict_size)
gen_data = [] gen_data = []
gen_num = 3 gen_num = 3
for item in gen_creator(): for item in paddle.dataset.wmt14.gen(dict_size)():
gen_data.append((item[0], )) gen_data.append([item[0]])
if len(gen_data) == gen_num: if len(gen_data) == gen_num:
break break
beam_gen = seqToseq_net(source_dict_dim, target_dict_dim, is_generating) beam_size = 3
# get the pretrained model, whose bleu = 26.92 beam_gen = seq_to_seq_net(source_dict_dim, target_dict_dim,
is_generating, beam_size)
# get the trained model, whose bleu = 26.92
parameters = paddle.dataset.wmt14.model() parameters = paddle.dataset.wmt14.model()
# prob is the prediction probabilities, and id is the prediction word. # prob is the prediction probabilities, and id is the prediction word.
beam_result = paddle.infer( beam_result = paddle.infer(
output_layer=beam_gen, output_layer=beam_gen,
...@@ -187,28 +210,25 @@ def main(): ...@@ -187,28 +210,25 @@ def main():
input=gen_data, input=gen_data,
field=['prob', 'id']) field=['prob', 'id'])
# get the dictionary # load the dictionary
src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size)
# the delimited element of generated sequences is -1, gen_sen_idx = np.where(beam_result[1] == -1)[0]
# the first element of each generated sequence is the sequence length assert len(gen_sen_idx) == len(gen_data) * beam_size
seq_list = []
seq = [] # -1 is the delimiter of generated sequences.
for w in beam_result[1]: # the first element of each generated sequence its length.
if w != -1: start_pos, end_pos = 1, 0
seq.append(w) for i, sample in enumerate(gen_data):
else: print(
seq_list.append(' '.join([trg_dict.get(w) for w in seq[1:]])) " ".join([src_dict[w] for w in sample[0][1:-1]])
seq = [] ) # skip the start and ending mark when printing the source sentence
prob = beam_result[0]
beam_size = 3
for i in xrange(gen_num):
print "\n*******************************************************\n"
print "src:", ' '.join(
[src_dict.get(w) for w in gen_data[i][0]]), "\n"
for j in xrange(beam_size): for j in xrange(beam_size):
print "prob = %f:" % (prob[i][j]), seq_list[i * beam_size + j] end_pos = gen_sen_idx[i * beam_size + j]
print("%.4f\t%s" % (beam_result[0][i][j], " ".join(
trg_dict[w] for w in beam_result[1][start_pos:end_pos])))
start_pos = end_pos + 2
print("\n")
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册