diff --git a/paddlepalm/mtl_controller.py b/paddlepalm/mtl_controller.py index 9abedb92135055da316b722c9bb879c94dd5e618..fccafd433e707dfd6d6c6f06dd973d035bfe6071 100755 --- a/paddlepalm/mtl_controller.py +++ b/paddlepalm/mtl_controller.py @@ -95,7 +95,7 @@ def _try_float(s): def _check_conf(conf, checklist=None): assert isinstance(conf, dict), "{} is not a dict.".format(conf) ret = {} - for k,v in list(conf.items()): + for k,v in conf.items(): if isinstance(v, str): v = _try_float(v) ret[k] = v diff --git a/paddlepalm/task_instance.py b/paddlepalm/task_instance.py index 9c33d848fbbc6ab5c3927bc3196bcd02382a1a76..091526912cdaa3db8f475f10cc4bf2409d2bd31e 100644 --- a/paddlepalm/task_instance.py +++ b/paddlepalm/task_instance.py @@ -92,7 +92,7 @@ class TaskInstance(object): output_vars = self._task_layer[phase].build(net_inputs, scope_name=scope) if phase == 'pred': if output_vars is not None: - self._pred_fetch_name_list, self._pred_fetch_var_list = list(zip(*list(output_vars.items()))) + self._pred_fetch_name_list, self._pred_fetch_var_list = zip(*output_vars.items()) else: self._pred_fetch_name_list = [] self._pred_fetch_var_list = [] @@ -113,7 +113,7 @@ class TaskInstance(object): fluid.io.save_inference_model(dirpath, self._pred_input_varname_list, self._pred_fetch_var_list, self._exe, prog) conf = {} - for k, strv in list(self._save_protocol.items()): # py3 + for k, strv in self._save_protocol.items(): d = None v = locals() exec('d={}'.format(strv), globals(), v) @@ -125,7 +125,7 @@ class TaskInstance(object): def load(self, infer_model_path=None): if infer_model_path is None: infer_model_path = self._save_infermodel_path - for k,v in list(json.load(open(os.path.join(infer_model_path, '__conf__'))).items()): # py3 + for k,v in json.load(open(os.path.join(infer_model_path, '__conf__'))).items(): strv = self._save_protocol[k] exec('{}=v'.format(strv)) pred_prog, self._pred_input_varname_list, self._pred_fetch_var_list = \ @@ -169,13 +169,13 @@ class TaskInstance(object): @property def pred_input(self): - return list(zip(*[self._pred_input_name_list, self._pred_input_varname_list])) # py3 + return zip(*[self._pred_input_name_list, self._pred_input_varname_list]) @pred_input.setter def pred_input(self, val): assert isinstance(val, dict) self._pred_input_name_list, self._pred_input_varname_list = \ - list(zip(*[[k, v.name] for k,v in list(val.items())])) # py3 + zip(*[[k, v.name] for k,v in val.items()]) @property def pred_fetch_list(self): diff --git a/paddlepalm/tokenizer/bert_tokenizer.py b/paddlepalm/tokenizer/bert_tokenizer.py index 43cbee4a332f17a72a901aea1e7bde0a5ce4ea4b..f4cefd0fdfe76dbd729301734dbec8a2c7c9ce9b 100644 --- a/paddlepalm/tokenizer/bert_tokenizer.py +++ b/paddlepalm/tokenizer/bert_tokenizer.py @@ -111,7 +111,7 @@ class FullTokenizer(object): def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) - self.inv_vocab = {v: k for k, v in list(self.vocab.items())} + self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) @@ -135,7 +135,7 @@ class CharTokenizer(object): def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) - self.inv_vocab = {v: k for k, v in list(self.vocab.items())} + self.inv_vocab = {v: k for k, v in self.vocab.items()} self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): diff --git a/paddlepalm/tokenizer/ernie_tokenizer.py b/paddlepalm/tokenizer/ernie_tokenizer.py index 61517de3e9aaf7bd202a571ce3d31c7272be837c..2e6b044fb870699dff60e0772c1fefa2f2c3db3f 100644 --- a/paddlepalm/tokenizer/ernie_tokenizer.py +++ b/paddlepalm/tokenizer/ernie_tokenizer.py @@ -115,7 +115,7 @@ class FullTokenizer(object): def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) - self.inv_vocab = {v: k for k, v in list(self.vocab.items())} + self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) @@ -139,7 +139,7 @@ class CharTokenizer(object): def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) - self.inv_vocab = {v: k for k, v in list(self.vocab.items())} + self.inv_vocab = {v: k for k, v in self.vocab.items()} self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): diff --git a/paddlepalm/utils/reader_helper.py b/paddlepalm/utils/reader_helper.py index d3a8431dc49eb450698c5bea11391af4092d5934..63362b3ba19d43511e290dbe21cf733b283bbec5 100644 --- a/paddlepalm/utils/reader_helper.py +++ b/paddlepalm/utils/reader_helper.py @@ -87,7 +87,7 @@ def create_iterator_fn(iterator, iterator_prefix, shape_and_dtypes, outname_to_p outputs = next(iterator) # dict type prefix = iterator_prefixe - for outname, val in list(outputs.items()): + for outname, val in outputs.items(): task_outname = prefix + '/' + outname if outname in outname_to_pos: @@ -121,7 +121,7 @@ def create_joint_iterator_fn(iterators, iterator_prefixes, joint_shape_and_dtype outputs = next(iterators[id]) # dict type outbuf[id] = outputs prefix = iterator_prefixes[id] - for outname, val in list(outputs.items()): + for outname, val in outputs.items(): task_outname = prefix + '/' + outname if outname in outname_to_pos: @@ -176,7 +176,7 @@ def create_joint_iterator_fn(iterators, iterator_prefixes, joint_shape_and_dtype has_show_warn = True prefix = iterator_prefixes[id] - for outname, val in list(outputs.items()): + for outname, val in outputs.items(): if v > 0: print('reader generate: '+outname) task_outname = prefix + '/' + outname