From 3aeb517a76c219ee5ee5d68c07d07108a8931d57 Mon Sep 17 00:00:00 2001 From: cclauss Date: Tue, 19 Mar 2019 11:15:59 +0100 Subject: [PATCH] Fixes for Python 3 --- BERT/convert_params.py | 2 +- BERT/model/bert.py | 6 +++--- BERT/run_squad.py | 6 ++++-- ERNIE/batching.py | 2 ++ ERNIE/finetune/classifier.py | 2 ++ ERNIE/finetune/sequence_label.py | 2 ++ ERNIE/model/ernie.py | 6 +++--- 7 files changed, 17 insertions(+), 9 deletions(-) diff --git a/BERT/convert_params.py b/BERT/convert_params.py index 17cada9..5cb95db 100644 --- a/BERT/convert_params.py +++ b/BERT/convert_params.py @@ -137,7 +137,7 @@ def parse(init_checkpoint): else: print("ignored param: %s" % var_name) - if fluid_param_name is not '': + if fluid_param_name != '': tf_fluid_param_name_map[var_name] = fluid_param_name tf_param_name_shape_map[var_name] = var_shape fluid_param_name = '' diff --git a/BERT/model/bert.py b/BERT/model/bert.py index ace3e60..e6348d5 100644 --- a/BERT/model/bert.py +++ b/BERT/model/bert.py @@ -73,7 +73,7 @@ class BertModel(object): self._sent_emb_name = "sent_embedding" self._dtype = "float16" if use_fp16 else "float32" - # Initialize all weigths by truncated normal initializer, and all biases + # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. self._param_initializer = fluid.initializer.TruncatedNormal( scale=config['initializer_range']) @@ -109,7 +109,7 @@ class BertModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype is "float16": + if self._dtype == "float16": self_attn_mask = fluid.layers.cast( x=self_attn_mask, dtype=self._dtype) @@ -175,7 +175,7 @@ class BertModel(object): name='mask_lm_trans_fc.w_0', initializer=self._param_initializer), bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0')) - # transform: layer norm + # transform: layer norm mask_trans_feat = pre_process_layer( mask_trans_feat, 'n', name='mask_lm_trans') diff --git a/BERT/run_squad.py b/BERT/run_squad.py index efc0d9c..1f7da45 100644 --- a/BERT/run_squad.py +++ b/BERT/run_squad.py @@ -17,11 +17,13 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import argparse +import collections +import multiprocessing import os import time -import argparse + import numpy as np -import collections import paddle import paddle.fluid as fluid diff --git a/ERNIE/batching.py b/ERNIE/batching.py index beea082..797ab9f 100644 --- a/ERNIE/batching.py +++ b/ERNIE/batching.py @@ -19,6 +19,8 @@ from __future__ import print_function import numpy as np +from six.moves import xrange + def mask(batch_tokens, seg_labels, diff --git a/ERNIE/finetune/classifier.py b/ERNIE/finetune/classifier.py index b5609af..37415fb 100644 --- a/ERNIE/finetune/classifier.py +++ b/ERNIE/finetune/classifier.py @@ -22,6 +22,8 @@ import numpy as np import paddle.fluid as fluid +from six.moves import xrange + from model.ernie import ErnieModel diff --git a/ERNIE/finetune/sequence_label.py b/ERNIE/finetune/sequence_label.py index 327c9e5..dab6a58 100644 --- a/ERNIE/finetune/sequence_label.py +++ b/ERNIE/finetune/sequence_label.py @@ -25,6 +25,8 @@ import multiprocessing import paddle import paddle.fluid as fluid +from six.moves import xrange + from model.ernie import ErnieModel def create_model(args, diff --git a/ERNIE/model/ernie.py b/ERNIE/model/ernie.py index 69d6e11..e42b2f4 100644 --- a/ERNIE/model/ernie.py +++ b/ERNIE/model/ernie.py @@ -73,7 +73,7 @@ class ErnieModel(object): self._sent_emb_name = "sent_embedding" self._dtype = "float16" if use_fp16 else "float32" - # Initialize all weigths by truncated normal initializer, and all biases + # Initialize all weigths by truncated normal initializer, and all biases # will be initialized by constant zero by default. self._param_initializer = fluid.initializer.TruncatedNormal( scale=config['initializer_range']) @@ -109,7 +109,7 @@ class ErnieModel(object): emb_out = pre_process_layer( emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder') - if self._dtype is "float16": + if self._dtype == "float16": self_attn_mask = fluid.layers.cast( x=self_attn_mask, dtype=self._dtype) @@ -175,7 +175,7 @@ class ErnieModel(object): name='mask_lm_trans_fc.w_0', initializer=self._param_initializer), bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0')) - # transform: layer norm + # transform: layer norm mask_trans_feat = pre_process_layer( mask_trans_feat, 'n', name='mask_lm_trans') -- GitLab