diff --git a/BERT/convert_params.py b/BERT/convert_params.py index 17cada954d4920dcb6a3c92d0371415655508a23..5cb95dbddfad200e402d94ccd835b5efb9dd2248 100644 --- a/BERT/convert_params.py +++ b/BERT/convert_params.py @@ -137,7 +137,7 @@ def parse(init_checkpoint): else: print("ignored param: %s" % var_name) - if fluid_param_name is not '': + if fluid_param_name != '': tf_fluid_param_name_map[var_name] = fluid_param_name tf_param_name_shape_map[var_name] = var_shape fluid_param_name = '' diff --git a/BERT/model/bert.py b/BERT/model/bert.py index ace3e609e64b90170f7144586189f155d3dd2220..e6348d54688057c101ceeb71083d286518798269 100644 --- a/BERT/model/bert.py +++ b/BERT/model/bert.py @@ -73,7 +73,7 @@ class BertModel(object): self._sent_emb_name = "sent_embedding" self._dtype = "float16" if use_fp16 else "float32" - # Initialize all weigths by truncated normal initializer, and all biases + # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. self._param_initializer = fluid.initializer.TruncatedNormal( scale=config['initializer_range']) @@ -109,7 +109,7 @@ class BertModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype is "float16": + if self._dtype == "float16": self_attn_mask = fluid.layers.cast( x=self_attn_mask, dtype=self._dtype) @@ -175,7 +175,7 @@ class BertModel(object): name='mask_lm_trans_fc.w_0', initializer=self._param_initializer), bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0')) - # transform: layer norm + # transform: layer norm mask_trans_feat = pre_process_layer( mask_trans_feat, 'n', name='mask_lm_trans') diff --git a/BERT/run_squad.py b/BERT/run_squad.py index efc0d9c73bc5c995c55b2aa93aabc8fc71155c99..1f7da456c5a25dde99bcb39d46b3ee15a339dd94 100644 --- a/BERT/run_squad.py +++ b/BERT/run_squad.py @@ -17,11 +17,13 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import argparse +import collections +import multiprocessing import os import time -import argparse + import numpy as np -import collections import paddle import paddle.fluid as fluid diff --git a/ERNIE/batching.py b/ERNIE/batching.py index beea08241c7e3607ec226895901bef59ca097237..797ab9f5938324df07ee3506870fc4cc21d6e75d 100644 --- a/ERNIE/batching.py +++ b/ERNIE/batching.py @@ -19,6 +19,8 @@ from __future__ import print_function import numpy as np +from six.moves import xrange + def mask(batch_tokens, seg_labels, diff --git a/ERNIE/finetune/classifier.py b/ERNIE/finetune/classifier.py index b5609af32f8399228e5b2450d07760e253ea7c60..37415fb97f9b3e425d60ff414fe548d83c5f13b1 100644 --- a/ERNIE/finetune/classifier.py +++ b/ERNIE/finetune/classifier.py @@ -22,6 +22,8 @@ import numpy as np import paddle.fluid as fluid +from six.moves import xrange + from model.ernie import ErnieModel diff --git a/ERNIE/finetune/sequence_label.py b/ERNIE/finetune/sequence_label.py index 327c9e56f0d2ae6f10718a88bce440b79b63dc18..dab6a58c85587381c27a53a94306d360fc405ac6 100644 --- a/ERNIE/finetune/sequence_label.py +++ b/ERNIE/finetune/sequence_label.py @@ -25,6 +25,8 @@ import multiprocessing import paddle import paddle.fluid as fluid +from six.moves import xrange + from model.ernie import ErnieModel def create_model(args, diff --git a/ERNIE/model/ernie.py b/ERNIE/model/ernie.py index 69d6e112eb97855e933800b57d5eeb560caf2257..e42b2f4558097f90ed1732dc0a7dd29b3a011aee 100644 --- a/ERNIE/model/ernie.py +++ b/ERNIE/model/ernie.py @@ -73,7 +73,7 @@ class ErnieModel(object): self._sent_emb_name = "sent_embedding" self._dtype = "float16" if use_fp16 else "float32" - # Initialize all weigths by truncated normal initializer, and all biases + # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. self._param_initializer = fluid.initializer.TruncatedNormal( scale=config['initializer_range']) @@ -109,7 +109,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype is "float16": + if self._dtype == "float16": self_attn_mask = fluid.layers.cast( x=self_attn_mask, dtype=self._dtype) @@ -175,7 +175,7 @@ class ErnieModel(object): name='mask_lm_trans_fc.w_0', initializer=self._param_initializer), bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0')) - # transform: layer norm + # transform: layer norm mask_trans_feat = pre_process_layer( mask_trans_feat, 'n', name='mask_lm_trans')