From 054ba61467c7970acab2221e58815f51f819ebf6 Mon Sep 17 00:00:00 2001 From: LiuChiaChi <709153940@qq.com> Date: Wed, 23 Sep 2020 02:26:28 +0000 Subject: [PATCH] correct some args --- dygraph/seq2seq/args.py | 6 ------ dygraph/seq2seq/attention_model.py | 4 +--- dygraph/seq2seq/reader.py | 17 +++++++++-------- dygraph/seq2seq/run.sh | 8 ++++---- 4 files changed, 14 insertions(+), 21 deletions(-) diff --git a/dygraph/seq2seq/args.py b/dygraph/seq2seq/args.py index b754702e..df787c8c 100644 --- a/dygraph/seq2seq/args.py +++ b/dygraph/seq2seq/args.py @@ -34,12 +34,6 @@ def parse_args(): parser.add_argument("--src_lang", type=str, help="source language suffix") parser.add_argument("--tar_lang", type=str, help="target language suffix") - parser.add_argument( - "--attention", - type=eval, - default=False, - help="Whether use attention model") - parser.add_argument( "--optimizer", type=str, diff --git a/dygraph/seq2seq/attention_model.py b/dygraph/seq2seq/attention_model.py index bae46d47..7fc2e337 100644 --- a/dygraph/seq2seq/attention_model.py +++ b/dygraph/seq2seq/attention_model.py @@ -28,13 +28,12 @@ class AttentionModel(Layer): trg_vocab_size, num_layers=1, init_scale=0.1, - padding_idx=2, + padding_idx=0, dropout=None, beam_size=1, beam_start_token=1, beam_end_token=2, beam_max_step_num=100, - mode='train', dtype="float32"): super(AttentionModel, self).__init__() self.hidden_size = hidden_size @@ -47,7 +46,6 @@ class AttentionModel(Layer): self.beam_start_token = beam_start_token self.beam_end_token = beam_end_token self.beam_max_step_num = beam_max_step_num - self.mode = mode self.kinf = 1e9 self.encoder = Encoder(src_vocab_size, hidden_size, num_layers, diff --git a/dygraph/seq2seq/reader.py b/dygraph/seq2seq/reader.py index c76fe7cf..907cf262 100644 --- a/dygraph/seq2seq/reader.py +++ b/dygraph/seq2seq/reader.py @@ -125,14 +125,14 @@ def raw_data(src_lang, src_vocab = _build_vocab(src_vocab_file) tar_vocab = _build_vocab(tar_vocab_file) - train_src, train_tar = _para_file_to_ids( src_train_file, tar_train_file, \ - src_vocab, tar_vocab ) + train_src, train_tar = _para_file_to_ids(src_train_file, tar_train_file, \ + src_vocab, tar_vocab) train_src, train_tar = filter_len( train_src, train_tar, max_sequence_len=max_sequence_len) - eval_src, eval_tar = _para_file_to_ids( src_eval_file, tar_eval_file, \ - src_vocab, tar_vocab ) + eval_src, eval_tar = _para_file_to_ids(src_eval_file, tar_eval_file, \ + src_vocab, tar_vocab) - test_src, test_tar = _para_file_to_ids( src_test_file, tar_test_file, \ + test_src, test_tar = _para_file_to_ids(src_test_file, tar_test_file, \ src_vocab, tar_vocab ) return (train_src, train_tar), (eval_src, eval_tar), (test_src, test_tar),\ @@ -143,8 +143,8 @@ def raw_mono_data(vocab_file, file_path): src_vocab = _build_vocab(vocab_file) - test_src, test_tar = _para_file_to_ids( file_path, file_path, \ - src_vocab, src_vocab ) + test_src, test_tar = _para_file_to_ids(file_path, file_path, \ + src_vocab, src_vocab) return (test_src, test_tar) @@ -160,7 +160,8 @@ class IWSLTDataset(Dataset): src_data, trg_data = raw_data data_pair = [] for src, trg in zip(src_data, trg_data): - data_pair.append([src, trg]) + if len(src) > 0: + data_pair.append([src, trg]) sorted_data_pair = sorted(data_pair, key=lambda k: len(k[0])) src_data = [data_pair[0] for data_pair in sorted_data_pair] diff --git a/dygraph/seq2seq/run.sh b/dygraph/seq2seq/run.sh index 8005ec26..d8eb0800 100644 --- a/dygraph/seq2seq/run.sh +++ b/dygraph/seq2seq/run.sh @@ -3,13 +3,12 @@ export CUDA_VISIBLE_DEVICES=0 python train.py \ --src_lang en --tar_lang vi \ - --attention True \ --num_layers 2 \ --hidden_size 512 \ --src_vocab_size 17191 \ --tar_vocab_size 7709 \ --batch_size 128 \ - --dropout 0.0 \ + --dropout 0.2 \ --init_scale 0.2 \ --max_grad_norm 5.0 \ --train_data_prefix data/en-vi/train \ @@ -20,6 +19,7 @@ python train.py \ --model_path attention_models \ --enable_ce \ --learning_rate 0.002 \ - --dtype float64 \ + --dtype float32 \ --optimizer adam \ - --max_epoch 1 + --max_epoch 12 \ + --padding_idx 2 -- GitLab