提交 17c3661e 编写于 作者: C chenxuyi 提交者: Meiyim

paddle 1.6 compat

上级 da04e0b4
......@@ -208,7 +208,7 @@ def pad_batch_data(insts,
if return_seq_lens:
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape([-1, 1])]
return_list += [seq_lens.astype("int64").reshape([-1])]
return return_list if len(return_list) > 1 else return_list[0]
......
......@@ -56,7 +56,7 @@ def create_model(args, pyreader_name, ernie_config):
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, 1]],
[-1, args.max_seq_len, 1], [-1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0],
name=pyreader_name,
......
......@@ -40,7 +40,7 @@ def create_model(args, pyreader_name, ernie_config, is_prediction=False):
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1, 1]],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64'
],
......
......@@ -86,7 +86,7 @@ class ErnieModel(object):
self._sent_emb_name = "sent_embedding"
self._task_emb_name = "task_embedding"
self._dtype = "float16" if use_fp16 else "float32"
self._emb_dtype = "float32"
self._emb_dtype = 'float32'
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
......@@ -138,7 +138,7 @@ class ErnieModel(object):
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder')
if self._dtype == "float16":
if self._dtype == 'float16':
emb_out = fluid.layers.cast(x=emb_out, dtype=self._dtype)
input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype)
self_attn_mask = fluid.layers.matmul(
......@@ -167,7 +167,7 @@ class ErnieModel(object):
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='encoder')
if self._dtype == "float16":
if self._dtype == 'float16':
self._enc_out = fluid.layers.cast(
x=self._enc_out, dtype=self._emb_dtype)
......
......@@ -76,7 +76,7 @@ class ErnieModel(object):
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._dtype = "float16" if use_fp16 else "float32"
self._dtype = 'float16' if use_fp16 else 'float32'
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
......@@ -114,7 +114,7 @@ class ErnieModel(object):
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder')
if self._dtype == "float16":
if self._dtype == 'float16':
input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册