From 4d0c99f72891bcc65ba3675b3e40f7d7ffae4e5d Mon Sep 17 00:00:00 2001 From: chenxuyi Date: Mon, 4 Nov 2019 16:30:43 +0800 Subject: [PATCH] fix python2 compat --- ernie/model/ernie.py | 9 +++++---- ernie/model/ernie_v1.py | 5 +++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/ernie/model/ernie.py b/ernie/model/ernie.py index 84d0cbe..92669e7 100644 --- a/ernie/model/ernie.py +++ b/ernie/model/ernie.py @@ -24,6 +24,7 @@ import six import logging import paddle.fluid as fluid from io import open +from paddle.fluid.layers import core from model.transformer_encoder import encoder, pre_process_layer @@ -85,8 +86,8 @@ class ErnieModel(object): self._pos_emb_name = "pos_embedding" self._sent_emb_name = "sent_embedding" self._task_emb_name = "task_embedding" - self._dtype = "float16" if use_fp16 else "float32" - self._emb_dtype = 'float32' + self._dtype = core.VarDesc.VarType.FP16 if use_fp16 else core.VarDesc.VarType.FP32 + self._emb_dtype = core.VarDesc.VarType.FP32 # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. @@ -138,7 +139,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype == 'float16': + if self._dtype == core.VarDesc.VarType.FP16: emb_out = fluid.layers.cast(x=emb_out, dtype=self._dtype) input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype) self_attn_mask = fluid.layers.matmul( @@ -167,7 +168,7 @@ class ErnieModel(object): postprocess_cmd="dan", param_initializer=self._param_initializer, name='encoder') - if self._dtype == 'float16': + if self._dtype == core.VarDesc.VarType.FP16: self._enc_out = fluid.layers.cast( x=self._enc_out, dtype=self._emb_dtype) diff --git a/ernie/model/ernie_v1.py b/ernie/model/ernie_v1.py index eea1787..74d25c9 100644 --- a/ernie/model/ernie_v1.py +++ b/ernie/model/ernie_v1.py @@ -24,6 +24,7 @@ import logging import six import paddle.fluid as fluid from io import open +from paddle.fluid.layers import core from model.transformer_encoder import encoder, pre_process_layer @@ -76,7 +77,7 @@ class ErnieModel(object): self._word_emb_name = "word_embedding" self._pos_emb_name = "pos_embedding" self._sent_emb_name = "sent_embedding" - self._dtype = 'float16' if use_fp16 else 'float32' + self._dtype = core.VarDesc.VarType.FP16 if use_fp16 else core.VarDesc.VarType.FP32 # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. @@ -114,7 +115,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype == 'float16': + if self._dtype == core.VarDesc.VarType.FP16: input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype) self_attn_mask = fluid.layers.matmul( x=input_mask, y=input_mask, transpose_y=True) -- GitLab