From 17c3661ef1182490edd9805d48d6110e47c6187b Mon Sep 17 00:00:00 2001 From: chenxuyi Date: Mon, 28 Oct 2019 19:25:28 +0800 Subject: [PATCH] paddle 1.6 compat --- ernie/batching.py | 2 +- ernie/ernie_encoder.py | 2 +- ernie/finetune/sequence_label.py | 2 +- ernie/model/ernie.py | 6 +++--- ernie/model/ernie_v1.py | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ernie/batching.py b/ernie/batching.py index c3130a3..d4248cf 100644 --- a/ernie/batching.py +++ b/ernie/batching.py @@ -208,7 +208,7 @@ def pad_batch_data(insts, if return_seq_lens: seq_lens = np.array([len(inst) for inst in insts]) - return_list += [seq_lens.astype("int64").reshape([-1, 1])] + return_list += [seq_lens.astype("int64").reshape([-1])] return return_list if len(return_list) > 1 else return_list[0] diff --git a/ernie/ernie_encoder.py b/ernie/ernie_encoder.py index 9773edf..1c47aa0 100644 --- a/ernie/ernie_encoder.py +++ b/ernie/ernie_encoder.py @@ -56,7 +56,7 @@ def create_model(args, pyreader_name, ernie_config): capacity=50, shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], - [-1, args.max_seq_len, 1], [-1, 1]], + [-1, args.max_seq_len, 1], [-1]], dtypes=['int64', 'int64', 'int64', 'int64', 'float', 'int64'], lod_levels=[0, 0, 0, 0, 0, 0], name=pyreader_name, diff --git a/ernie/finetune/sequence_label.py b/ernie/finetune/sequence_label.py index 550388d..d517634 100644 --- a/ernie/finetune/sequence_label.py +++ b/ernie/finetune/sequence_label.py @@ -40,7 +40,7 @@ def create_model(args, pyreader_name, ernie_config, is_prediction=False): capacity=50, shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], - [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1, 1]], + [-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1]], dtypes=[ 'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64' ], diff --git a/ernie/model/ernie.py b/ernie/model/ernie.py index 9ee254d..84d0cbe 100644 --- a/ernie/model/ernie.py +++ b/ernie/model/ernie.py @@ -86,7 +86,7 @@ class ErnieModel(object): self._sent_emb_name = "sent_embedding" self._task_emb_name = "task_embedding" self._dtype = "float16" if use_fp16 else "float32" - self._emb_dtype = "float32" + self._emb_dtype = 'float32' # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. @@ -138,7 +138,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype == "float16": + if self._dtype == 'float16': emb_out = fluid.layers.cast(x=emb_out, dtype=self._dtype) input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype) self_attn_mask = fluid.layers.matmul( @@ -167,7 +167,7 @@ class ErnieModel(object): postprocess_cmd="dan", param_initializer=self._param_initializer, name='encoder') - if self._dtype == "float16": + if self._dtype == 'float16': self._enc_out = fluid.layers.cast( x=self._enc_out, dtype=self._emb_dtype) diff --git a/ernie/model/ernie_v1.py b/ernie/model/ernie_v1.py index e05dd86..eea1787 100644 --- a/ernie/model/ernie_v1.py +++ b/ernie/model/ernie_v1.py @@ -76,7 +76,7 @@ class ErnieModel(object): self._word_emb_name = "word_embedding" self._pos_emb_name = "pos_embedding" self._sent_emb_name = "sent_embedding" - self._dtype = "float16" if use_fp16 else "float32" + self._dtype = 'float16' if use_fp16 else 'float32' # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. @@ -114,7 +114,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype == "float16": + if self._dtype == 'float16': input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype) self_attn_mask = fluid.layers.matmul( x=input_mask, y=input_mask, transpose_y=True) -- GitLab