diff --git a/ernie/model/ernie.py b/ernie/model/ernie.py index 84d0cbe718d1a28a6761c8a3d43cf02cd2a10a3a..92669e7e07f4e757455da7ff82d579d0bc6efa29 100644 --- a/ernie/model/ernie.py +++ b/ernie/model/ernie.py @@ -24,6 +24,7 @@ import six import logging import paddle.fluid as fluid from io import open +from paddle.fluid.layers import core from model.transformer_encoder import encoder, pre_process_layer @@ -85,8 +86,8 @@ class ErnieModel(object): self._pos_emb_name = "pos_embedding" self._sent_emb_name = "sent_embedding" self._task_emb_name = "task_embedding" - self._dtype = "float16" if use_fp16 else "float32" - self._emb_dtype = 'float32' + self._dtype = core.VarDesc.VarType.FP16 if use_fp16 else core.VarDesc.VarType.FP32 + self._emb_dtype = core.VarDesc.VarType.FP32 # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. @@ -138,7 +139,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype == 'float16': + if self._dtype == core.VarDesc.VarType.FP16: emb_out = fluid.layers.cast(x=emb_out, dtype=self._dtype) input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype) self_attn_mask = fluid.layers.matmul( @@ -167,7 +168,7 @@ class ErnieModel(object): postprocess_cmd="dan", param_initializer=self._param_initializer, name='encoder') - if self._dtype == 'float16': + if self._dtype == core.VarDesc.VarType.FP16: self._enc_out = fluid.layers.cast( x=self._enc_out, dtype=self._emb_dtype) diff --git a/ernie/model/ernie_v1.py b/ernie/model/ernie_v1.py index eea17877e0feae128a1528be4c4036c9fb95d578..74d25c93d921c943679be06c28de3af2536eb010 100644 --- a/ernie/model/ernie_v1.py +++ b/ernie/model/ernie_v1.py @@ -24,6 +24,7 @@ import logging import six import paddle.fluid as fluid from io import open +from paddle.fluid.layers import core from model.transformer_encoder import encoder, pre_process_layer @@ -76,7 +77,7 @@ class ErnieModel(object): self._word_emb_name = "word_embedding" self._pos_emb_name = "pos_embedding" self._sent_emb_name = "sent_embedding" - self._dtype = 'float16' if use_fp16 else 'float32' + self._dtype = core.VarDesc.VarType.FP16 if use_fp16 else core.VarDesc.VarType.FP32 # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. @@ -114,7 +115,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype == 'float16': + if self._dtype == core.VarDesc.VarType.FP16: input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype) self_attn_mask = fluid.layers.matmul( x=input_mask, y=input_mask, transpose_y=True)