From e60d94b3d89233d8272a98a67cd980f6b837c40b Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 3 Mar 2017 15:13:45 +0800 Subject: [PATCH] correct data_type --- demo/seqToseq/api_train_v2.py | 12 ++++++------ demo/seqToseq/seqToseq_net_v2.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/demo/seqToseq/api_train_v2.py b/demo/seqToseq/api_train_v2.py index efbab8d7b9..bdcf3a5af0 100644 --- a/demo/seqToseq/api_train_v2.py +++ b/demo/seqToseq/api_train_v2.py @@ -4,7 +4,8 @@ import paddle.v2 as paddle from seqToseq_net_v2 import seqToseq_net_v2 -### Data Definiation +# Data Definiation. +# TODO:This code should be merged to dataset package. data_dir = "./data/pre-wmt14" src_lang_dict = os.path.join(data_dir, 'src.dict') trg_lang_dict = os.path.join(data_dir, 'trg.dict') @@ -68,15 +69,14 @@ def train_reader(file_name): def main(): paddle.init(use_gpu=False, trainer_count=1) - # reader = train_reader("data/pre-wmt14/train/train") # define network topology cost = seqToseq_net_v2(source_dict_dim, target_dict_dim) parameters = paddle.parameters.create(cost) - optimizer = paddle.optimizer.Adam(batch_size=50, learning_rate=5e-4) + optimizer = paddle.optimizer.Adam(learning_rate=1e-4) def event_handler(event): if isinstance(event, paddle.event.EndIteration): - if event.batch_id % 100 == 0: + if event.batch_id % 10 == 0: print "Pass %d, Batch %d, Cost %f, %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics) @@ -93,12 +93,12 @@ def main(): trn_reader = paddle.reader.batched( paddle.reader.shuffle( train_reader("data/pre-wmt14/train/train"), buf_size=8192), - batch_size=10) + batch_size=10000) trainer.train( reader=trn_reader, event_handler=event_handler, - num_passes=10000, + num_passes=10, reader_dict=reader_dict) diff --git a/demo/seqToseq/seqToseq_net_v2.py b/demo/seqToseq/seqToseq_net_v2.py index 7e057e2440..1ac95686b4 100644 --- a/demo/seqToseq/seqToseq_net_v2.py +++ b/demo/seqToseq/seqToseq_net_v2.py @@ -14,7 +14,7 @@ def seqToseq_net_v2(source_dict_dim, target_dict_dim): #### Encoder src_word_id = layer.data( name='source_language_word', - type=data_type.dense_vector(source_dict_dim)) + type=data_type.integer_value_sequence(source_dict_dim)) src_embedding = layer.embedding( input=src_word_id, size=word_vector_dim, @@ -67,7 +67,7 @@ def seqToseq_net_v2(source_dict_dim, target_dict_dim): trg_embedding = layer.embedding( input=layer.data( name='target_language_word', - type=data_type.dense_vector(target_dict_dim)), + type=data_type.integer_value_sequence(target_dict_dim)), size=word_vector_dim, param_attr=attr.ParamAttr(name='_target_language_embedding')) group_inputs.append(trg_embedding) @@ -84,7 +84,7 @@ def seqToseq_net_v2(source_dict_dim, target_dict_dim): lbl = layer.data( name='target_language_next_word', - type=data_type.dense_vector(target_dict_dim)) + type=data_type.integer_value_sequence(target_dict_dim)) cost = layer.classification_cost(input=decoder, label=lbl) return cost -- GitLab