From 559d36328cb9722b05c4b24410d87685e45fb595 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Thu, 26 Jul 2018 18:08:09 +0800 Subject: [PATCH] Apply 2to3 to current paddle main python code --- paddle/fluid/pybind/pybind.cc | 6 +- python/paddle/dataset/cifar.py | 6 +- python/paddle/dataset/common.py | 26 ++- python/paddle/dataset/conll05.py | 4 +- python/paddle/dataset/flowers.py | 8 +- python/paddle/dataset/image.py | 10 +- python/paddle/dataset/imdb.py | 8 +- python/paddle/dataset/imikolov.py | 4 +- python/paddle/dataset/mnist.py | 2 +- python/paddle/dataset/movielens.py | 8 +- python/paddle/dataset/mq2007.py | 4 +- python/paddle/dataset/sentiment.py | 11 +- python/paddle/dataset/tests/common_test.py | 6 +- python/paddle/dataset/tests/imikolov_test.py | 2 +- python/paddle/dataset/tests/test_sentiment.py | 5 +- python/paddle/dataset/uci_housing.py | 9 +- python/paddle/dataset/wmt14.py | 4 +- python/paddle/dataset/wmt16.py | 4 +- python/paddle/fluid/__init__.py | 84 +++---- python/paddle/fluid/annotations.py | 3 +- python/paddle/fluid/backward.py | 132 ++++++----- python/paddle/fluid/clip.py | 13 +- python/paddle/fluid/concurrency.py | 8 +- python/paddle/fluid/data_feeder.py | 7 +- python/paddle/fluid/debugger.py | 4 +- python/paddle/fluid/evaluator.py | 10 +- python/paddle/fluid/executor.py | 10 +- python/paddle/fluid/framework.py | 111 +++++---- python/paddle/fluid/graphviz.py | 8 +- python/paddle/fluid/inferencer.py | 16 +- python/paddle/fluid/initializer.py | 6 +- python/paddle/fluid/io.py | 217 ++++++++++-------- python/paddle/fluid/layer_helper.py | 16 +- python/paddle/fluid/layers/__init__.py | 38 +-- python/paddle/fluid/layers/control_flow.py | 11 +- python/paddle/fluid/layers/detection.py | 11 +- python/paddle/fluid/layers/device.py | 2 +- python/paddle/fluid/layers/io.py | 36 +-- .../fluid/layers/layer_function_generator.py | 8 +- .../fluid/layers/learning_rate_scheduler.py | 8 +- python/paddle/fluid/layers/math_op_patch.py | 2 +- python/paddle/fluid/layers/metric_op.py | 2 +- python/paddle/fluid/layers/nn.py | 9 +- python/paddle/fluid/layers/ops.py | 2 +- python/paddle/fluid/layers/tensor.py | 2 +- python/paddle/fluid/lod_tensor.py | 26 +-- python/paddle/fluid/metrics.py | 6 +- python/paddle/fluid/net_drawer.py | 12 +- python/paddle/fluid/nets.py | 13 +- python/paddle/fluid/op.py | 12 +- python/paddle/fluid/optimizer.py | 20 +- python/paddle/fluid/parallel_executor.py | 25 +- python/paddle/fluid/param_attr.py | 6 +- python/paddle/fluid/profiler.py | 4 +- python/paddle/fluid/recordio_writer.py | 2 +- python/paddle/fluid/regularizer.py | 2 +- .../fit_a_line/test_fit_a_line.py | 4 +- .../cifar10_small_test_set.py | 6 +- .../test_image_classification_resnet.py | 6 +- .../test_image_classification_vgg.py | 6 +- .../test_label_semantic_roles_newapi.py | 15 +- .../test_machine_translation.py | 6 +- .../test_recognize_digits_conv.py | 18 +- .../test_recognize_digits_mlp.py | 12 +- .../test_recommender_system_newapi.py | 9 +- .../test_understand_sentiment_conv.py | 16 +- .../test_understand_sentiment_dynamic_rnn.py | 16 +- .../test_understand_sentiment_stacked_lstm.py | 16 +- .../word2vec/test_word2vec_new_api.py | 4 +- .../tests/book/notest_understand_sentiment.py | 12 +- .../fluid/tests/book/test_fit_a_line.py | 8 +- .../tests/book/test_image_classification.py | 8 +- .../tests/book/test_label_semantic_roles.py | 12 +- .../tests/book/test_machine_translation.py | 10 +- .../fluid/tests/book/test_recognize_digits.py | 7 +- .../tests/book/test_recommender_system.py | 2 +- .../tests/book/test_rnn_encoder_decoder.py | 12 +- .../paddle/fluid/tests/book/test_word2vec.py | 12 +- .../test_memopt_fit_a_line.py | 2 +- .../test_memopt_image_classification_train.py | 6 +- .../test_memopt_machine_translation.py | 6 +- python/paddle/fluid/tests/demo/fc_gan.py | 8 +- .../file_reader/convert_data_to_recordio.py | 2 +- .../paddle/fluid/tests/no_test_concurrency.py | 2 +- python/paddle/fluid/tests/test_detection.py | 9 +- python/paddle/fluid/tests/test_error_clip.py | 1 - python/paddle/fluid/tests/test_if_else_op.py | 12 +- .../paddle/fluid/tests/unittests/benchmark.py | 17 +- .../fluid/tests/unittests/benchmark_sum_op.py | 4 +- .../unittests/parallel_executor_test_base.py | 8 +- .../paddle/fluid/tests/unittests/testsuite.py | 2 +- .../tests/unittests/transformer_model.py | 5 +- python/paddle/fluid/trainer.py | 80 +++---- python/paddle/fluid/transpiler/__init__.py | 8 +- .../fluid/transpiler/details/__init__.py | 4 +- .../fluid/transpiler/details/program_utils.py | 4 +- .../fluid/transpiler/distribute_transpiler.py | 51 ++-- .../fluid/transpiler/inference_transpiler.py | 2 +- .../memory_optimization_transpiler.py | 34 +-- python/paddle/fluid/unique_name.py | 2 +- python/paddle/reader/creator.py | 4 +- python/paddle/reader/decorator.py | 13 +- python/paddle/reader/tests/decorator_test.py | 4 +- tools/test_runner.py | 12 +- 104 files changed, 805 insertions(+), 745 deletions(-) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index ee1c8d46ddf..2320f3e4dbc 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -394,8 +394,10 @@ All parameter, weight, gradient are variables in Paddle. InferenceOptimize(*(origin.Proto()), &pruned_desc); return new ProgramDesc(pruned_desc); }); - m.def("empty_var_name", []() { return framework::kEmptyVarName; }); - m.def("grad_var_suffix", []() { return framework::kGradVarSuffix; }); + m.def("empty_var_name", + []() { return std::string(framework::kEmptyVarName); }); + m.def("grad_var_suffix", + []() { return std::string(framework::kGradVarSuffix); }); m.def_submodule( "var_names", "The module will return special predefined variable name in Paddle") diff --git a/python/paddle/dataset/cifar.py b/python/paddle/dataset/cifar.py index 79ddd8b7e6f..cc6384e74c4 100644 --- a/python/paddle/dataset/cifar.py +++ b/python/paddle/dataset/cifar.py @@ -28,7 +28,7 @@ images per class. """ -import cPickle +import pickle import itertools import numpy import paddle.dataset.common @@ -48,7 +48,7 @@ def reader_creator(filename, sub_name, cycle=False): data = batch['data'] labels = batch.get('labels', batch.get('fine_labels', None)) assert labels is not None - for sample, label in itertools.izip(data, labels): + for sample, label in zip(data, labels): yield (sample / 255.0).astype(numpy.float32), int(label) def reader(): @@ -58,7 +58,7 @@ def reader_creator(filename, sub_name, cycle=False): while True: for name in names: - batch = cPickle.load(f.extractfile(name)) + batch = pickle.load(f.extractfile(name)) for item in read_batch(batch): yield item if not cycle: diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index 68660601c16..771577c89d9 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -20,9 +20,9 @@ import shutil import sys import importlib import paddle.dataset -import cPickle +import pickle import glob -import cPickle as pickle +import pickle as pickle __all__ = [ 'DATA_HOME', @@ -75,13 +75,13 @@ def download(url, module_name, md5sum, save_name=None): retry_limit = 3 while not (os.path.exists(filename) and md5file(filename) == md5sum): if os.path.exists(filename): - print "file md5", md5file(filename), md5sum + print(("file md5", md5file(filename), md5sum)) if retry < retry_limit: retry += 1 else: raise RuntimeError("Cannot download {0} within retry limit {1}". format(url, retry_limit)) - print "Cache file %s not found, downloading %s" % (filename, url) + print(("Cache file %s not found, downloading %s" % (filename, url))) r = requests.get(url, stream=True) total_length = r.headers.get('content-length') @@ -104,8 +104,9 @@ def download(url, module_name, md5sum, save_name=None): def fetch_all(): - for module_name in filter(lambda x: not x.startswith("__"), - dir(paddle.dataset)): + for module_name in [ + x for x in dir(paddle.dataset) if not x.startswith("__") + ]: if "fetch" in dir( importlib.import_module("paddle.dataset.%s" % module_name)): getattr( @@ -114,8 +115,9 @@ def fetch_all(): def fetch_all_recordio(path): - for module_name in filter(lambda x: not x.startswith("__"), - dir(paddle.dataset)): + for module_name in [ + x for x in dir(paddle.dataset) if not x.startswith("__") + ]: if "convert" in dir( importlib.import_module("paddle.dataset.%s" % module_name)) and \ not module_name == "common": @@ -126,7 +128,7 @@ def fetch_all_recordio(path): "convert")(ds_path) -def split(reader, line_count, suffix="%05d.pickle", dumper=cPickle.dump): +def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump): """ you can call the function as: @@ -167,7 +169,7 @@ def split(reader, line_count, suffix="%05d.pickle", dumper=cPickle.dump): def cluster_files_reader(files_pattern, trainer_count, trainer_id, - loader=cPickle.load): + loader=pickle.load): """ Create a reader that yield element from the given files, select a file set according trainer count and trainer_id @@ -188,7 +190,7 @@ def cluster_files_reader(files_pattern, my_file_list = [] for idx, fn in enumerate(file_list): if idx % trainer_count == trainer_id: - print "append file: %s" % fn + print(("append file: %s" % fn)) my_file_list.append(fn) for fn in my_file_list: with open(fn, "r") as f: @@ -221,7 +223,7 @@ def convert(output_path, reader, line_count, name_prefix): for l in lines: # FIXME(Yancey1989): # dumps with protocol: pickle.HIGHEST_PROTOCOL - writer.write(cPickle.dumps(l)) + writer.write(pickle.dumps(l)) writer.close() lines = [] diff --git a/python/paddle/dataset/conll05.py b/python/paddle/dataset/conll05.py index 4e94ce89892..466344cc9db 100644 --- a/python/paddle/dataset/conll05.py +++ b/python/paddle/dataset/conll05.py @@ -87,12 +87,12 @@ def corpus_reader(data_path, words_name, props_name): sentences = [] labels = [] one_seg = [] - for word, label in itertools.izip(words_file, props_file): + for word, label in zip(words_file, props_file): word = word.strip() label = label.strip().split() if len(label) == 0: # end of sentence - for i in xrange(len(one_seg[0])): + for i in range(len(one_seg[0])): a_kind_lable = [x[i] for x in one_seg] labels.append(a_kind_lable) diff --git a/python/paddle/dataset/flowers.py b/python/paddle/dataset/flowers.py index 2354987d20b..db9be0c04bb 100644 --- a/python/paddle/dataset/flowers.py +++ b/python/paddle/dataset/flowers.py @@ -28,10 +28,10 @@ Graphics and Image Processing (2008) http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}. """ -import cPickle +import pickle import itertools import functools -from common import download +from .common import download import tarfile import scipy.io as scio from paddle.dataset.image import * @@ -116,10 +116,10 @@ def reader_creator(data_file, file = file.strip() batch = None with open(file, 'r') as f: - batch = cPickle.load(f) + batch = pickle.load(f) data = batch['data'] labels = batch['label'] - for sample, label in itertools.izip(data, batch['label']): + for sample, label in zip(data, batch['label']): yield sample, int(label) - 1 if not cycle: break diff --git a/python/paddle/dataset/image.py b/python/paddle/dataset/image.py index 9235c41e9eb..3048dfd5182 100644 --- a/python/paddle/dataset/image.py +++ b/python/paddle/dataset/image.py @@ -36,7 +36,7 @@ except ImportError: cv2 = None import os import tarfile -import cPickle +import pickle __all__ = [ "load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop", @@ -86,10 +86,10 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - cPickle.dump( + pickle.dump( output, open('%s/batch_%d' % (out_path, file_id), 'w'), - protocol=cPickle.HIGHEST_PROTOCOL) + protocol=pickle.HIGHEST_PROTOCOL) file_id += 1 data = [] labels = [] @@ -97,10 +97,10 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - cPickle.dump( + pickle.dump( output, open('%s/batch_%d' % (out_path, file_id), 'w'), - protocol=cPickle.HIGHEST_PROTOCOL) + protocol=pickle.HIGHEST_PROTOCOL) with open(meta_file, 'a') as meta: for file in os.listdir(out_path): diff --git a/python/paddle/dataset/imdb.py b/python/paddle/dataset/imdb.py index 5ff05b1e9b7..e7fe4e0b7e5 100644 --- a/python/paddle/dataset/imdb.py +++ b/python/paddle/dataset/imdb.py @@ -42,13 +42,13 @@ def tokenize(pattern): # sequential access of member files, other than # tarfile.extractfile, which does random access and might # destroy hard disks. - tf = tarf.next() + tf = next(tarf) while tf != None: if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip("\n\r").translate( None, string.punctuation).lower().split() - tf = tarf.next() + tf = next(tarf) def build_dict(pattern, cutoff): @@ -62,11 +62,11 @@ def build_dict(pattern, cutoff): word_freq[word] += 1 # Not sure if we should prune less-frequent words here. - word_freq = filter(lambda x: x[1] > cutoff, word_freq.items()) + word_freq = [x for x in list(word_freq.items()) if x[1] > cutoff] dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*dictionary)) - word_idx = dict(zip(words, xrange(len(words)))) + word_idx = dict(list(zip(words, list(range(len(words)))))) word_idx[''] = len(words) return word_idx diff --git a/python/paddle/dataset/imikolov.py b/python/paddle/dataset/imikolov.py index c6c0a0f5437..bc007c9d3c8 100644 --- a/python/paddle/dataset/imikolov.py +++ b/python/paddle/dataset/imikolov.py @@ -64,11 +64,11 @@ def build_dict(min_word_freq=50): # remove for now, since we will set it as last index del word_freq[''] - word_freq = filter(lambda x: x[1] > min_word_freq, word_freq.items()) + word_freq = [x for x in list(word_freq.items()) if x[1] > min_word_freq] word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*word_freq_sorted)) - word_idx = dict(zip(words, xrange(len(words)))) + word_idx = dict(list(zip(words, list(range(len(words)))))) word_idx[''] = len(words) return word_idx diff --git a/python/paddle/dataset/mnist.py b/python/paddle/dataset/mnist.py index 6259cc35b4f..ffa9008c801 100644 --- a/python/paddle/dataset/mnist.py +++ b/python/paddle/dataset/mnist.py @@ -65,7 +65,7 @@ def reader_creator(image_filename, label_filename, buffer_size): images = images / 255.0 * 2.0 - 1.0 - for i in xrange(buffer_size): + for i in range(buffer_size): yield images[i, :], int(labels[i]) finally: try: diff --git a/python/paddle/dataset/movielens.py b/python/paddle/dataset/movielens.py index ab11716202a..f60f5eefc9c 100644 --- a/python/paddle/dataset/movielens.py +++ b/python/paddle/dataset/movielens.py @@ -187,7 +187,7 @@ def max_movie_id(): Get the maximum value of movie id. """ __initialize_meta_info__() - return reduce(__max_index_info__, MOVIE_INFO.viewvalues()).index + return reduce(__max_index_info__, list(MOVIE_INFO.values())).index def max_user_id(): @@ -195,7 +195,7 @@ def max_user_id(): Get the maximum value of user id. """ __initialize_meta_info__() - return reduce(__max_index_info__, USER_INFO.viewvalues()).index + return reduce(__max_index_info__, list(USER_INFO.values())).index def __max_job_id_impl__(a, b): @@ -210,7 +210,7 @@ def max_job_id(): Get the maximum value of job id. """ __initialize_meta_info__() - return reduce(__max_job_id_impl__, USER_INFO.viewvalues()).job_id + return reduce(__max_job_id_impl__, list(USER_INFO.values())).job_id def movie_categories(): @@ -243,7 +243,7 @@ def unittest(): for test_count, _ in enumerate(test()()): pass - print train_count, test_count + print((train_count, test_count)) def fetch(): diff --git a/python/paddle/dataset/mq2007.py b/python/paddle/dataset/mq2007.py index d3b3dd524c3..20766a289b9 100644 --- a/python/paddle/dataset/mq2007.py +++ b/python/paddle/dataset/mq2007.py @@ -26,7 +26,7 @@ http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ20 import os import functools import rarfile -from common import download +from .common import download import numpy as np # URL = "http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2007.rar" @@ -330,4 +330,4 @@ if __name__ == "__main__": mytest = functools.partial( __reader__, filepath="MQ2007/MQ2007/Fold1/sample", format="listwise") for label, query in mytest(): - print label, query + print((label, query)) diff --git a/python/paddle/dataset/sentiment.py b/python/paddle/dataset/sentiment.py index f5461164fe6..ceddfda94e4 100644 --- a/python/paddle/dataset/sentiment.py +++ b/python/paddle/dataset/sentiment.py @@ -43,11 +43,11 @@ def download_data_if_not_yet(): nltk.data.path.append(paddle.dataset.common.DATA_HOME) movie_reviews.categories() except LookupError: - print "Downloading movie_reviews data set, please wait....." + print("Downloading movie_reviews data set, please wait.....") nltk.download( 'movie_reviews', download_dir=paddle.dataset.common.DATA_HOME) - print "Download data set success....." - print "Path is " + nltk.data.find('corpora/movie_reviews').path + print("Download data set success.....") + print(("Path is " + nltk.data.find('corpora/movie_reviews').path)) def get_word_dict(): @@ -64,7 +64,7 @@ def get_word_dict(): for field in movie_reviews.fileids(category): for words in movie_reviews.words(field): word_freq_dict[words] += 1 - words_sort_list = word_freq_dict.items() + words_sort_list = list(word_freq_dict.items()) words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) for index, word in enumerate(words_sort_list): words_freq_sorted.append((word[0], index)) @@ -80,7 +80,8 @@ def sort_files(): files_list = list() neg_file_list = movie_reviews.fileids('neg') pos_file_list = movie_reviews.fileids('pos') - files_list = list(chain.from_iterable(zip(neg_file_list, pos_file_list))) + files_list = list( + chain.from_iterable(list(zip(neg_file_list, pos_file_list)))) return files_list diff --git a/python/paddle/dataset/tests/common_test.py b/python/paddle/dataset/tests/common_test.py index e7cc02aa830..777cd06a197 100644 --- a/python/paddle/dataset/tests/common_test.py +++ b/python/paddle/dataset/tests/common_test.py @@ -36,7 +36,7 @@ class TestCommon(unittest.TestCase): def test_split(self): def test_reader(): def reader(): - for x in xrange(10): + for x in range(10): yield x return reader @@ -49,7 +49,7 @@ class TestCommon(unittest.TestCase): def test_cluster_file_reader(self): _, temp_path = tempfile.mkstemp() - for x in xrange(5): + for x in range(5): with open(temp_path + '/%05d.test' % x) as f: f.write('%d\n' % x) reader = paddle.dataset.common.cluster_files_reader( @@ -63,7 +63,7 @@ class TestCommon(unittest.TestCase): def test_reader(): def reader(): - for x in xrange(record_num): + for x in range(record_num): yield x return reader diff --git a/python/paddle/dataset/tests/imikolov_test.py b/python/paddle/dataset/tests/imikolov_test.py index 233fd9fc8ce..50f50d947d2 100644 --- a/python/paddle/dataset/tests/imikolov_test.py +++ b/python/paddle/dataset/tests/imikolov_test.py @@ -59,7 +59,7 @@ class TestMikolov(unittest.TestCase): self.assertEqual(first_line, read_line) def test_total(self): - _, idx = zip(*WORD_DICT.items()) + _, idx = list(zip(*list(WORD_DICT.items()))) self.assertEqual(sorted(idx)[-1], len(WORD_DICT) - 1) diff --git a/python/paddle/dataset/tests/test_sentiment.py b/python/paddle/dataset/tests/test_sentiment.py index 543f4b7378b..37326517f7b 100644 --- a/python/paddle/dataset/tests/test_sentiment.py +++ b/python/paddle/dataset/tests/test_sentiment.py @@ -24,9 +24,8 @@ from nltk.corpus import movie_reviews class TestSentimentMethods(unittest.TestCase): def test_get_word_dict(self): word_dict = st.get_word_dict()[0:10] - test_word_list = [(u',', 0), (u'the', 1), (u'.', 2), (u'a', 3), - (u'and', 4), (u'of', 5), (u'to', 6), (u"'", 7), - (u'is', 8), (u'in', 9)] + test_word_list = [(',', 0), ('the', 1), ('.', 2), ('a', 3), ('and', 4), + ('of', 5), ('to', 6), ("'", 7), ('is', 8), ('in', 9)] for idx, each in enumerate(word_dict): self.assertEqual(each, test_word_list[idx]) self.assertTrue("/root/.cache/paddle/dataset" in nltk.data.path) diff --git a/python/paddle/dataset/uci_housing.py b/python/paddle/dataset/uci_housing.py index fbfa477d055..410ca7af0d6 100644 --- a/python/paddle/dataset/uci_housing.py +++ b/python/paddle/dataset/uci_housing.py @@ -49,9 +49,12 @@ def feature_range(maximums, minimums): import matplotlib.pyplot as plt fig, ax = plt.subplots() feature_num = len(maximums) - ax.bar(range(feature_num), maximums - minimums, color='r', align='center') + ax.bar(list(range(feature_num)), + maximums - minimums, + color='r', + align='center') ax.set_title('feature scale') - plt.xticks(range(feature_num), feature_names) + plt.xticks(list(range(feature_num)), feature_names) plt.xlim([-1, feature_num]) fig.set_figheight(6) fig.set_figwidth(10) @@ -71,7 +74,7 @@ def load_data(filename, feature_num=14, ratio=0.8): maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum( axis=0) / data.shape[0] feature_range(maximums[:-1], minimums[:-1]) - for i in xrange(feature_num - 1): + for i in range(feature_num - 1): data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) offset = int(data.shape[0] * ratio) UCI_TRAIN_DATA = data[:offset] diff --git a/python/paddle/dataset/wmt14.py b/python/paddle/dataset/wmt14.py index f0908c73787..250fd03ffb9 100644 --- a/python/paddle/dataset/wmt14.py +++ b/python/paddle/dataset/wmt14.py @@ -154,8 +154,8 @@ def get_dict(dict_size, reverse=True): tar_file = paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN) src_dict, trg_dict = __read_to_dict(tar_file, dict_size) if reverse: - src_dict = {v: k for k, v in src_dict.items()} - trg_dict = {v: k for k, v in trg_dict.items()} + src_dict = {v: k for k, v in list(src_dict.items())} + trg_dict = {v: k for k, v in list(trg_dict.items())} return src_dict, trg_dict diff --git a/python/paddle/dataset/wmt16.py b/python/paddle/dataset/wmt16.py index 540d43b692e..4e3c466c38e 100644 --- a/python/paddle/dataset/wmt16.py +++ b/python/paddle/dataset/wmt16.py @@ -70,7 +70,9 @@ def __build_dict(tar_file, dict_size, save_path, lang): fout.write("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)) for idx, word in enumerate( sorted( - word_dict.iteritems(), key=lambda x: x[1], reverse=True)): + iter(list(word_dict.items())), + key=lambda x: x[1], + reverse=True)): if idx + 3 == dict_size: break fout.write("%s\n" % (word[0])) diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index c2d641600cd..5ad3c5e7f70 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -14,49 +14,49 @@ from __future__ import print_function # import all class inside framework into fluid module -import framework -from framework import * +from . import framework +from .framework import * # import all class inside executor into fluid module -import executor -from executor import * - -import trainer -from trainer import Trainer -from trainer import BeginEpochEvent -from trainer import EndEpochEvent -from trainer import BeginStepEvent -from trainer import EndStepEvent -from trainer import CheckpointConfig - -import inferencer -from inferencer import Inferencer - -import io -import evaluator -import initializer -import layers -import contrib -import nets -import optimizer -import backward -import regularizer -import average -import metrics -import transpiler -from param_attr import ParamAttr, WeightNormParamAttr -from data_feeder import DataFeeder -from core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope -from transpiler import DistributeTranspiler, InferenceTranspiler, \ +from . import executor +from .executor import * + +from . import trainer +from .trainer import Trainer +from .trainer import BeginEpochEvent +from .trainer import EndEpochEvent +from .trainer import BeginStepEvent +from .trainer import EndStepEvent +from .trainer import CheckpointConfig + +from . import inferencer +from .inferencer import Inferencer + +from . import io +from . import evaluator +from . import initializer +from . import layers +from . import contrib +from . import nets +from . import optimizer +from . import backward +from . import regularizer +from . import average +from . import metrics +from . import transpiler +from .param_attr import ParamAttr, WeightNormParamAttr +from .data_feeder import DataFeeder +from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope +from .transpiler import DistributeTranspiler, InferenceTranspiler, \ memory_optimize, release_memory, DistributeTranspilerConfig -from concurrency import (Go, make_channel, channel_send, channel_recv, - channel_close, Select) -from lod_tensor import create_lod_tensor, create_random_int_lodtensor -import clip -import profiler -import unique_name -import recordio_writer -import parallel_executor -from parallel_executor import * +from .concurrency import (Go, make_channel, channel_send, channel_recv, + channel_close, Select) +from .lod_tensor import create_lod_tensor, create_random_int_lodtensor +from . import clip +from . import profiler +from . import unique_name +from . import recordio_writer +from . import parallel_executor +from .parallel_executor import * from paddle.fluid.layers.math_op_patch import monkey_patch_variable Tensor = LoDTensor @@ -99,8 +99,8 @@ def __bootstrap__(): None """ import sys - import core import os + from . import core in_test = 'unittest' in sys.modules diff --git a/python/paddle/fluid/annotations.py b/python/paddle/fluid/annotations.py index bb8756a4664..15e7976354f 100644 --- a/python/paddle/fluid/annotations.py +++ b/python/paddle/fluid/annotations.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import functools import sys @@ -28,7 +29,7 @@ def deprecated(since, instead, extra_message=""): @functools.wraps(func) def wrapper(*args, **kwargs): - print >> sys.stderr, err_msg + print(err_msg, file=sys.stderr) return func(*args, **kwargs) wrapper.__doc__ += "\n " diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 812f68bdd84..f33fa7218b4 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -16,7 +16,8 @@ from paddle.fluid import framework as framework from . import core import collections import copy -import unique_name +import six +from . import unique_name __all__ = ['append_backward'] @@ -44,17 +45,25 @@ def _create_op_desc_(op_type, inputs, outputs, attrs): """ op_desc = core.OpDesc() op_desc.set_type(op_type) - for para, args in inputs.iteritems(): - op_desc.set_input(para, args) - for para, args in outputs.iteritems(): - op_desc.set_output(para, args) + for para, args in list(inputs.items()): + op_desc.set_input( + para, + list( + map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg, + args))) + for para, args in list(outputs.items()): + op_desc.set_output( + para, + list( + map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg, + args))) op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() if op_role_attr_name not in attrs: attrs[ op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward - for name, val in attrs.iteritems(): + for name, val in list(attrs.items()): if isinstance(val, framework.Block): op_desc.set_block_attr(name, val.desc) else: @@ -105,7 +114,9 @@ def _strip_grad_suffix_(name): e.g. x@GRAD ==> x y@GRAD@RENAME@1 ==> y """ - pos = name.find(core.grad_var_suffix()) + if isinstance(name, six.text_type): + name = name.encode() + pos = name.find(six.b(core.grad_var_suffix())) return name[:pos] if pos != -1 else name @@ -114,7 +125,9 @@ def _append_grad_suffix_(name): Append grad suffix to the given variable name e.g. x ==> x@GRAD """ - return name + core.grad_var_suffix() + if isinstance(name, six.text_type): + name = name.encode() + return name + six.b(core.grad_var_suffix()) def _addup_repetitive_outputs_(op_descs): @@ -174,7 +187,7 @@ def _addup_repetitive_outputs_(op_descs): op_desc.set_output(param_name, arg_names) renamed_vars[var_name].append(new_name) - for var_name, inputs in renamed_vars.iteritems(): + for var_name, inputs in list(renamed_vars.items()): if len(inputs) > 1: pending_sum_ops.append( (_create_op_desc_("sum", {"X": inputs}, {"Out": [var_name]}, @@ -198,16 +211,19 @@ def _remove_no_grad_branch_(op_descs, no_grad_set): out_arg_names = op_desc.output_arg_names() if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set): return True - if _all_in_set_( - filter(lambda name: name.find(core.grad_var_suffix()) != -1, - op_desc.input_arg_names()), no_grad_set): + if _all_in_set_([ + name for name in op_desc.input_arg_names() + if name.find(core.grad_var_suffix()) != -1 + ], no_grad_set): no_grad_set.update(out_arg_names) return True return False # Remove ops whose outputs are all in no_grad_dict - op_descs = filter( - lambda op_desc: not _op_can_be_removed_(op_desc, no_grad_set), op_descs) + op_descs = [ + op_desc for op_desc in op_descs + if not _op_can_be_removed_(op_desc, no_grad_set) + ] # Insert fill_zeros_like_op to_insert = [] for idx, op_desc in enumerate(op_descs): @@ -217,12 +233,12 @@ def _remove_no_grad_branch_(op_descs, no_grad_set): "X": [_strip_grad_suffix_(arg)] }, {"Out": [arg]}, {}), idx)) - map(lambda p: op_descs.insert(p[1], p[0]), reversed(to_insert)) + list([op_descs.insert(p[1], p[0]) for p in reversed(to_insert)]) return op_descs -import proto.framework_pb2 as framework_pb2 +from .proto import framework_pb2 def serialize_op_decs(op_desc): @@ -244,8 +260,10 @@ def _callback_lookup_(op): if op.type == 'parallel_do' and op.attr('use_nccl'): all_vars = op.block.vars param_names = set(op.input('parameters')) - param_names = filter(lambda name: all_vars[name].stop_gradient is False, - param_names) + param_names = [ + name for name in param_names + if all_vars[name].stop_gradient is False + ] param_grad_names = [n + "@GRAD" for n in param_names] class ParallelDoCallBack(object): @@ -399,7 +417,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): continue block.desc.var(grad_var_name) new_vars.add(grad_var_name) - if not grad_to_var.has_key(grad_var_name): + if grad_var_name not in grad_to_var: continue grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block) # infer_shape and infer_type @@ -427,7 +445,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map): op_desc.rename_output(name, new_name) var_map[name] = new_name - for g, ng in var_map.iteritems(): + for g, ng in list(var_map.items()): if g in grad_to_var: grad_to_var[ng] = grad_to_var[g] grad_to_var.pop(g) @@ -439,7 +457,7 @@ def _get_stop_gradients_(program): for block in program.blocks: assert isinstance(block, framework.Block) block_no_grad_set = set() - for var in block.vars.itervalues(): + for var in list(block.vars.values()): assert isinstance(var, framework.Variable) if var.stop_gradient: block_no_grad_set.add(_append_grad_suffix_(var.name)) @@ -452,51 +470,51 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, """ Append backward part to main_program. - A complete neural network training is made up of forward and backward - propagation. However, when we configure a network, we only need to - specify its forwrd part. The backward part is generated automatically + A complete neural network training is made up of forward and backward + propagation. However, when we configure a network, we only need to + specify its forwrd part. The backward part is generated automatically according to the forward part by this function. - In most cases, users do not need to invoke this function manually. It + In most cases, users do not need to invoke this function manually. It will be automatically invoked by the optimizer's `minimize` function. Args: loss(Variable): The loss variable of the network. - parameter_list(list[string]|None): Names of parameters that need - to be updated by optimizers. - If it is None, all parameters + parameter_list(list[string]|None): Names of parameters that need + to be updated by optimizers. + If it is None, all parameters will be updated. Default: None - no_grad_set(set|None): Variables in the Block 0 whose gradients - should be ignored. All variables with - `step_gradient=True` from all blocks will + no_grad_set(set|None): Variables in the Block 0 whose gradients + should be ignored. All variables with + `step_gradient=True` from all blocks will be automatically added into this set. Default: None - callbacks(list[callable object]|None): The callbacks are used for - doing some custom jobs during - backward part building. All - callable objects in it will - be invoked once each time a - new gradient operator is added - into the program. The callable - object must has two input - parameters: 'block' and 'context'. - The 'block' is the block which - the new gradient operator will - be added to. The 'context' is a - map, whose keys are gradient - variable names and values are + callbacks(list[callable object]|None): The callbacks are used for + doing some custom jobs during + backward part building. All + callable objects in it will + be invoked once each time a + new gradient operator is added + into the program. The callable + object must has two input + parameters: 'block' and 'context'. + The 'block' is the block which + the new gradient operator will + be added to. The 'context' is a + map, whose keys are gradient + variable names and values are corresponding original variables. - In addition to this, the 'context' - has another special key-value pair: - the key is string '__current_op_desc__' - and the value is the op_desc of the - gradient operator who has just - triggered the callable object. + In addition to this, the 'context' + has another special key-value pair: + the key is string '__current_op_desc__' + and the value is the op_desc of the + gradient operator who has just + triggered the callable object. Returns: - list[(Variable,Variable)]: Pairs of parameter and its - corresponding gradients. The key is the parameter and the + list[(Variable,Variable)]: Pairs of parameter and its + corresponding gradients. The key is the parameter and the value is gradient variable. Raises: @@ -535,7 +553,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, no_grad_set = set() no_grad_set = copy.copy(no_grad_set) no_grad_dict = _get_stop_gradients_(program) - no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set))) grad_info_map = dict() root_block = program.block(0) @@ -558,7 +576,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0])) op_path = _find_op_path_(root_block, [loss], [], block_no_grad_set) - no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set))) _append_backward_ops_(root_block, op_path, root_block, no_grad_dict, grad_to_var, callbacks) @@ -699,7 +717,7 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): no_grad_set = set() no_grad_set = copy.copy(no_grad_set) no_grad_dict = _get_stop_gradients_(prog) - no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set))) fwd_op_num = block.desc.op_size() @@ -733,7 +751,7 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0])) op_path = _find_op_path_(block, targets, inputs, block_no_grad_set) - no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set))) grad_to_var = dict() grad_info_map = dict() _append_backward_ops_(block, op_path, block, no_grad_dict, grad_to_var) diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index c029662ebc1..dce4b53c132 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -15,8 +15,8 @@ import copy import functools -import layers -import framework +from . import layers +from . import framework from . import core __all__ = [ @@ -80,8 +80,7 @@ def error_clip_callback(block, context): # the context is a grad_to_var map grad_to_var = context op_desc = block.desc.op(block.desc.op_size() - 1) - for grad_n in filter(lambda n: grad_to_var.has_key(n), - op_desc.output_arg_names()): + for grad_n in [n for n in op_desc.output_arg_names() if n in grad_to_var]: fwd_var = block._var_recursive(grad_to_var[grad_n]) error_clip = getattr(fwd_var, "error_clip", None) if not (error_clip is None or isinstance(error_clip, @@ -247,7 +246,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): """ def __init__(self, clip_norm, group_name="default_group"): - if not isinstance(group_name, basestring): + if not isinstance(group_name, str): raise TypeError("'group_name' must be a basestring.") self.clip_norm = clip_norm @@ -284,7 +283,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): x=clip_var, y=layers.elementwise_max( x=clip_var, y=group_norm_var)) - assert group_scale_var.shape == (1L, ) + assert group_scale_var.shape == (1, ) self.context[group_scale_name] = group_scale_var new_grad = layers.elementwise_mul( @@ -313,7 +312,7 @@ def set_gradient_clip(clip, param_list=None, program=None): program = framework.default_main_program() if param_list is None: param_list = program.block(0).all_parameters() - if all(isinstance(elem, basestring) for elem in param_list): + if all(isinstance(elem, str) for elem in param_list): param_list = [program.block(0).var(elem) for elem in param_list] if not all(isinstance(elem, framework.Parameter) for elem in param_list): raise TypeError( diff --git a/python/paddle/fluid/concurrency.py b/python/paddle/fluid/concurrency.py index b8fe9bd4c19..a8c4d66720d 100644 --- a/python/paddle/fluid/concurrency.py +++ b/python/paddle/fluid/concurrency.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from layers.control_flow import BlockGuard, equal +from .layers.control_flow import BlockGuard, equal from .framework import Operator -from layer_helper import LayerHelper, unique_name -from layers import fill_constant -import core +from .layer_helper import LayerHelper, unique_name +from .layers import fill_constant +from . import core __all__ = [ 'Go', 'make_channel', 'channel_send', 'channel_recv', 'channel_close', diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index c859778b375..023a3c9c274 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -12,14 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function -import core +from . import core import numpy import os import six.moves as six import multiprocessing -from framework import Variable, default_main_program +from .framework import Variable, default_main_program __all__ = ['DataFeeder'] @@ -142,7 +141,7 @@ class DataFeeder(object): if program is None: program = default_main_program() for each_var in feed_list: - if isinstance(each_var, basestring): + if isinstance(each_var, str): each_var = program.block(0).var(each_var) if not isinstance(each_var, Variable): raise TypeError("Feed list should contain a list of variable") diff --git a/python/paddle/fluid/debugger.py b/python/paddle/fluid/debugger.py index 1c56064a1e8..b7a92cf0449 100644 --- a/python/paddle/fluid/debugger.py +++ b/python/paddle/fluid/debugger.py @@ -14,8 +14,8 @@ import sys import re -from graphviz import GraphPreviewGenerator -import proto.framework_pb2 as framework_pb2 +from .graphviz import GraphPreviewGenerator +from .proto import framework_pb2 from google.protobuf import text_format _vartype2str_ = [ diff --git a/python/paddle/fluid/evaluator.py b/python/paddle/fluid/evaluator.py index 00ba1a04575..c0671cce9a1 100644 --- a/python/paddle/fluid/evaluator.py +++ b/python/paddle/fluid/evaluator.py @@ -15,11 +15,11 @@ import warnings import numpy as np -import layers -from framework import Program, Variable, program_guard -import unique_name -from layer_helper import LayerHelper -from initializer import Constant +from . import layers +from .framework import Program, Variable, program_guard +from . import unique_name +from .layer_helper import LayerHelper +from .initializer import Constant __all__ = [ 'ChunkEvaluator', diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 4178971398c..d2f130b86da 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -14,7 +14,7 @@ import numpy as np import contextlib -from framework import Program, default_main_program, Variable +from .framework import Program, default_main_program, Variable from . import core __all__ = [ @@ -204,19 +204,19 @@ def fetch_var(name, scope=None, return_numpy=True): def _get_program_cache_key(feed, fetch_list): - feed_var_names = feed.keys() + feed_var_names = list(feed.keys()) def to_name_str(var): if isinstance(var, Variable): return var.desc.name() elif isinstance(var, str): return var - elif isinstance(var, basestring): + elif isinstance(var, str): return str(var) else: raise TypeError(str(var) + " should be Variable or str") - fetch_var_names = map(to_name_str, fetch_list) + fetch_var_names = list(map(to_name_str, fetch_list)) return str(feed_var_names + fetch_var_names) @@ -345,7 +345,7 @@ class Executor(object): def _fetch_data(self, fetch_list, fetch_var_name, scope): outs = [ core.get_fetch_variable(scope, fetch_var_name, i) - for i in xrange(len(fetch_list)) + for i in range(len(fetch_list)) ] return outs diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index db550eccf98..53658610e55 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -15,21 +15,22 @@ import collections import contextlib import re +import six import numpy as np -import proto.framework_pb2 as framework_pb2 +from .proto import framework_pb2 try: from . import core -except ImportError, e: +except ImportError as e: raise ImportError( """NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\" if you encounters \"libmkldnn.so not found\" errors. If you have python installed in other directory, replace \"/usr/local/lib\" with your own directory. The original error is: \n""" + e.message) -except Exception, e: +except Exception as e: raise e -import unique_name +from . import unique_name __all__ = [ 'Program', @@ -86,7 +87,7 @@ def convert_np_dtype_to_dtype_(np_dtype): elif dtype == np.uint8: return core.VarDesc.VarType.UINT8 else: - raise ValueError("Not supported numpy dtype " + str(dtype)) + raise ValueError("Not supported numpy dtype " + six.binary_type(dtype)) def dtype_is_floating(dtype): @@ -129,15 +130,15 @@ def _debug_string_(proto, throw_on_error=True): class Variable(object): """ - In Fluid, every input and output of an operator is a variable. In most - cases, variables are used for holding different kinds of data or training - labels. A variable belongs to a block. All variable has its own name and + In Fluid, every input and output of an operator is a variable. In most + cases, variables are used for holding different kinds of data or training + labels. A variable belongs to a block. All variable has its own name and two variables in different blocks could have the same name. - There are many kinds of variables. Each kind of them has its own attributes - and usages. Please reference the framework.proto for details. + There are many kinds of variables. Each kind of them has its own attributes + and usages. Please reference the framework.proto for details. - Most of a Variable's member variables can be setted to be None. It mean + Most of a Variable's member variables can be setted to be None. It mean it is not available or will be specified later. Args: @@ -197,6 +198,7 @@ class Variable(object): if name is None: name = unique_name.generate('_generated_var') is_new_var = False + name = name if isinstance(name, six.binary_type) else name.encode() self.desc = self.block.desc.find_var(name) if self.desc is None: @@ -290,13 +292,13 @@ class Variable(object): assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) protostr = self.desc.serialize_to_string() - proto = framework_pb2.VarDesc.FromString(str(protostr)) + proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr)) res_str = _debug_string_(proto, throw_on_error) if with_details: additional_attr = ("error_clip", "stop_gradient") for attr_name in additional_attr: - res_str += "%s: %s\n" % (attr_name, - str(getattr(self, attr_name))) + res_str += "%s: %s\n" % ( + attr_name, six.binary_type(getattr(self, attr_name))) return res_str __repr__ = __str__ @@ -369,7 +371,7 @@ def get_all_op_protos(): protostrs = core.get_all_op_protos() ret_values = [] for pbstr in protostrs: - op_proto = framework_pb2.OpProto.FromString(str(pbstr)) + op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr)) ret_values.append(op_proto) return ret_values @@ -472,7 +474,6 @@ class Operator(object): inputs=None, outputs=None, attrs=None): - self.block = block self.desc = desc self.attrs = attrs @@ -523,10 +524,15 @@ class Operator(object): % (in_proto.name, len(in_args))) in_arg_names = [] for arg in in_args: - if isinstance(arg, basestring): + if issubclass(arg.__class__, six.string_types): in_arg_names.append(arg) + elif isinstance(arg, six.binary_type): + in_arg_names.append(arg.decode()) else: - in_arg_names.append(arg.name) + if issubclass(arg.name.__class__, six.string_types): + in_arg_names.append(arg.name) + elif isinstance(arg.name, six.binary_type): + in_arg_names.append(arg.name.decode()) self.desc.set_input(in_proto.name, in_arg_names) else: self.desc.set_input(in_proto.name, []) @@ -541,8 +547,9 @@ class Operator(object): if not given == need: raise ValueError(("Incorrect setting for output(s) of " "operator \"%s\". Need: [%s] Given: [%s]") % - (type, ", ".join(str(e) for e in need), - ", ".join(str(e) for e in given))) + (type, + ", ".join(six.binary_type(e) for e in need), + ", ".join(six.binary_type(e) for e in given))) for out_proto in proto.outputs: out_args = outputs[out_proto.name] @@ -554,7 +561,12 @@ class Operator(object): (out_proto.name, len(out_args))) out_arg_names = [] for arg in out_args: - out_arg_names.append(arg.name) + if issubclass(arg.name.__class__, six.string_types): + out_arg_names.append(arg.name) + elif isinstance(arg.name, six.binary_type): + out_arg_names.append(arg.name.decode()) + else: + out_arg_names.append(six.u(arg.name)) arg.op = self self.desc.set_output(out_proto.name, out_arg_names) @@ -590,7 +602,7 @@ class Operator(object): """ protostr = self.desc.serialize_to_string() - proto = framework_pb2.OpDesc.FromString(str(protostr)) + proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr)) return _debug_string_(proto, throw_on_error) def __str__(self): @@ -845,7 +857,7 @@ class Block(object): re_add_indent = re.compile(r"\n(.)") res_str = "blocks {\n idx: %d\n parent_idx: %d" % ( self.idx, self.parent_idx) - for var in self.vars.itervalues(): + for var in list(self.vars.values()): res_str += "\n vars {\n %s }" % re_add_indent.sub( r"\n \1", var.to_string(throw_on_error, with_details)) for op in self.ops: @@ -854,7 +866,8 @@ class Block(object): res_str += "\n}" else: protostr = self.desc.serialize_to_string() - proto = framework_pb2.BlockDesc.FromString(str(protostr)) + proto = framework_pb2.BlockDesc.FromString( + six.binary_type(protostr)) res_str = _debug_string_(proto, throw_on_error) return res_str @@ -898,10 +911,11 @@ class Block(object): Returns: Variable: the Variable with the giving name. """ - if not isinstance(name, basestring): - raise TypeError( - "var require string as parameter, but get %s instead." % - (type(name))) + if not issubclass(name.__class__, six.string_types): + if not isinstance(name, six.binary_type): + raise TypeError( + "var require string as parameter, but get %s instead." % + (type(name))) v = self.vars.get(name, None) if v is None: raise ValueError("var %s not in this block" % name) @@ -949,10 +963,10 @@ class Block(object): raise ValueError("Var {0} is not found recursively".format(name)) def all_parameters(self): - return list(self._iter_parameters()) + return list(self.iter_parameters()) - def _iter_parameters(self): - return (item[1] for item in self.vars.iteritems() + def iter_parameters(self): + return (item[1] for item in list(self.vars.items()) if isinstance(item[1], Parameter)) def create_var(self, *args, **kwargs): @@ -1113,7 +1127,7 @@ class Block(object): self.create_var(name=var.name(), desc=var, type=var.type()) # sync variables removed from c++ end - for var in self.vars.keys(): + for var in list(self.vars.keys()): if not self.desc.find_var(var): self.vars.pop(var) @@ -1185,7 +1199,7 @@ class Block(object): if not isinstance(other, Block): raise TypeError( "_copy_param_info_from should be invoked with Block") - for p in other._iter_parameters(): + for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) if v is None: @@ -1384,7 +1398,8 @@ class Program(object): res_str += block.to_string(throw_on_error, with_details) else: protostr = self.desc.serialize_to_string() - proto = framework_pb2.ProgramDesc.FromString(str(protostr)) + proto = framework_pb2.ProgramDesc.FromString( + six.binary_type(protostr)) res_str = _debug_string_(proto, throw_on_error) return res_str @@ -1482,7 +1497,7 @@ class Program(object): else: p = Program() p.desc = core.ProgramDesc(self.desc) - p.blocks = [Block(p, i) for i in xrange(self.desc.num_blocks())] + p.blocks = [Block(p, i) for i in range(self.desc.num_blocks())] p._sync_with_cpp() p._copy_param_info_from(self) @@ -1534,7 +1549,7 @@ class Program(object): targets_idx.append([t.block.idx, t.idx]) res = Program() res.desc = core.prune(self.desc, targets_idx) - res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())] + res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())] res._sync_with_cpp() return res @@ -1554,13 +1569,13 @@ class Program(object): # core.inference_optimize being fixed. res = Program() res.desc = core.ProgramDesc(self.desc) - for i in xrange(res.desc.num_blocks()): + for i in range(res.desc.num_blocks()): block = res.desc.block(i) - for j in xrange(block.op_size()): + for j in range(block.op_size()): op = block.op(j) if op.has_attr('is_test'): op.set_attr('is_test', True) - res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())] + res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())] res._sync_with_cpp() return res @@ -1573,14 +1588,14 @@ class Program(object): and deserialization. Args: - binary_str(str): The binary prootbuf string. + binary_str_type(str): The binary prootbuf string. Returns: Program: A deserialized program desc. """ p = Program() p.desc = core.ProgramDesc(binary_str) - p.blocks = [Block(p, i) for i in xrange(p.desc.num_blocks())] + p.blocks = [Block(p, i) for i in range(p.desc.num_blocks())] p._sync_with_cpp() return p @@ -1608,7 +1623,7 @@ class Program(object): self._seed = seed def __repr__(self): - return str(self) + return self.__str__() def global_block(self): """ @@ -1719,7 +1734,7 @@ class Program(object): if len(self.blocks) != len(other.blocks): raise ValueError("_copy_param_info_from should be invoked with two " "program, with represent the same topology") - for var in other.global_block().vars.itervalues(): + for var in list(other.global_block().vars.values()): if var.is_data: self.global_block().var(var.name).is_data = True @@ -1731,15 +1746,15 @@ class Program(object): iterable: The generator will yield every variable in this program. """ for each_block in self.blocks: - for each_var in each_block.vars.itervalues(): + for each_var in list(each_block.vars.values()): yield each_var class Parameter(Variable): """ - Parameter is derived from Variable. A parameter is a persistable + Parameter is derived from Variable. A parameter is a persistable Variable, and will be updated by optimizers after each iteration. - The training of a neural network is essentially the updating of + The training of a neural network is essentially the updating of its parameters. Relative to a general Variable, a Parameter has several its own @@ -1805,8 +1820,8 @@ class Parameter(Variable): additional_attr = ("trainable", "optimize_attr", "regularizer", "gradient_clip_attr", "do_model_average") for attr_name in additional_attr: - res_str += "%s: %s\n" % (attr_name, - str(getattr(self, attr_name))) + res_str += "%s: %s\n" % ( + attr_name, six.binary_type(getattr(self, attr_name))) else: res_str = Variable.to_string(self, throw_on_error, False) return res_str diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index 125b4efa9d4..b72dd7bb01c 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -19,7 +19,7 @@ import logging def crepr(v): - if type(v) is str or type(v) is unicode: + if type(v) is str or type(v) is str: return '"%s"' % v return str(v) @@ -104,7 +104,7 @@ class Graph(object): def _rank_repr(self): ranks = sorted( - self.rank_groups.items(), + list(self.rank_groups.items()), cmp=lambda a, b: a[1].priority > b[1].priority) repr = [] for x in ranks: @@ -148,7 +148,7 @@ class Node(object): name=self.name, label=self.label, extra=',' + ','.join("%s=%s" % (key, crepr(value)) - for key, value in self.attrs.items()) + for key, value in list(self.attrs.items())) if self.attrs else "") return reprs @@ -172,7 +172,7 @@ class Edge(object): target=self.target.name, extra="" if not self.attrs else "[" + ','.join("{}={}".format(attr[0], crepr(attr[1])) - for attr in self.attrs.items()) + "]") + for attr in list(self.attrs.items())) + "]") return repr diff --git a/python/paddle/fluid/inferencer.py b/python/paddle/fluid/inferencer.py index a81e39695b7..ff382d8b832 100644 --- a/python/paddle/fluid/inferencer.py +++ b/python/paddle/fluid/inferencer.py @@ -14,14 +14,14 @@ import contextlib -import core - -import executor -import framework -import io -import parallel_executor -import unique_name -from trainer import check_and_get_place +from . import core + +from . import executor +from . import framework +from . import io +from . import parallel_executor +from . import unique_name +from .trainer import check_and_get_place __all__ = ['Inferencer', ] diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 0e640bf280d..83290ac6083 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import framework +from . import framework import numpy as np import contextlib -from framework import convert_np_dtype_to_dtype_ -from core import VarDesc +from .framework import convert_np_dtype_to_dtype_ +from .core import VarDesc __all__ = [ 'Constant', 'Uniform', 'Normal', 'Xavier', 'Bilinear', 'MSRA', diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 1ec670de070..599a7782eec 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -16,6 +16,7 @@ import os import errno import time import shutil +import six from paddle.fluid.evaluator import Evaluator from paddle.fluid.framework import Program, Parameter, default_main_program, default_startup_program, Variable @@ -92,34 +93,34 @@ def save_vars(executor, """ Save variables to the given directory by executor. - There are two ways to specify variables to be saved: The first way, list - variables in a list and assign it to the `vars`. The second way, assign the - `main_program` with an existing program, then all variables in the program - will be saved. The first way has a higher priority. In other words, if `vars` + There are two ways to specify variables to be saved: The first way, list + variables in a list and assign it to the `vars`. The second way, assign the + `main_program` with an existing program, then all variables in the program + will be saved. The first way has a higher priority. In other words, if `vars` are assigned, the `main_program` and the `predicate` will be ignored. - The `dirname` are used to specify the folder where to save variables. - If you prefer to save variables in separate files in the folder `dirname`, - set `filename` None; if you prefer to save all variables in a single file, + The `dirname` are used to specify the folder where to save variables. + If you prefer to save variables in separate files in the folder `dirname`, + set `filename` None; if you prefer to save all variables in a single file, use `filename` to specify it. Args: executor(Executor): The executor to run for saving variables. dirname(str): The directory path. - main_program(Program|None): The program whose variables will be saved. - If it is None, the default main program will + main_program(Program|None): The program whose variables will be saved. + If it is None, the default main program will be used automatically. Default: None - vars(list[Variable]|None): The list that contains all variables to save. + vars(list[Variable]|None): The list that contains all variables to save. It has a higher priority than the `main_program`. Default: None - predicate(function|None): If it is not None, only variables in the - `main_program` that makes predicate(variable)==True - will be saved. It only works when we are using the - `main_program` to specify variables (In other words + predicate(function|None): If it is not None, only variables in the + `main_program` that makes predicate(variable)==True + will be saved. It only works when we are using the + `main_program` to specify variables (In other words `vars` is None). Default: None - filename(str|None): The file which to save all variables. If you prefer to save + filename(str|None): The file which to save all variables. If you prefer to save variables separately, set it to None. Default: None @@ -149,7 +150,7 @@ def save_vars(executor, # The second usage: using `vars` to specify variables var_list = [var_a, var_b, var_c] - fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, + fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, filename="vars_file") # var_a, var_b and var_c will be saved. And they are going to be # saved in the same file named 'var_file' in the path "./my_paddle_model". @@ -163,7 +164,7 @@ def save_vars(executor, save_vars( executor, dirname=dirname, - vars=filter(predicate, main_program.list_vars()), + vars=list(filter(predicate, main_program.list_vars())), filename=filename) else: save_program = Program() @@ -203,14 +204,14 @@ def save_params(executor, dirname, main_program=None, filename=None): This function filters out all parameters from the give `main_program` and then save them to the folder `dirname` or the file `filename`. - Use the `dirname` to specify the saving folder. If you would like to - save parameters in separate files, set `filename` None; if you would - like to save all parameters in a single file, use `filename` to specify + Use the `dirname` to specify the saving folder. If you would like to + save parameters in separate files, set `filename` None; if you would + like to save all parameters in a single file, use `filename` to specify the file name. - NOTICE: Some variables are not Parameter while they are necessary for - training. So you can NOT save and continue your training just by - `save_params()` and `load_params()`. Please use `save_persistables()` + NOTICE: Some variables are not Parameter while they are necessary for + training. So you can NOT save and continue your training just by + `save_params()` and `load_params()`. Please use `save_persistables()` and `load_persistables()` instead. Args: @@ -220,8 +221,8 @@ def save_params(executor, dirname, main_program=None, filename=None): saved. If it is None, the default main program will be used automatically. Default: None - filename(str|None): The file to save all parameters. If you prefer - to save parameters in differnet files, set it + filename(str|None): The file to save all parameters. If you prefer + to save parameters in differnet files, set it to None. Default: None @@ -234,7 +235,7 @@ def save_params(executor, dirname, main_program=None, filename=None): exe = fluid.Executor(fluid.CPUPlace()) param_path = "./my_paddle_model" prog = fluid.default_main_program() - fluid.io.save_params(executor=exe, dirname=param_path, + fluid.io.save_params(executor=exe, dirname=param_path, main_program=None) """ save_vars( @@ -248,23 +249,23 @@ def save_params(executor, dirname, main_program=None, filename=None): def save_persistables(executor, dirname, main_program=None, filename=None): """ - This function filters out all variables with `persistable==True` from the - give `main_program` and then saves these variables to the folder `dirname` + This function filters out all variables with `persistable==True` from the + give `main_program` and then saves these variables to the folder `dirname` or file `filename`. - The `dirname` is used to specify the folder where persistable variables - are going to be saved. If you would like to save variables in separate - files, set `filename` None; if you would like to save all variables in a + The `dirname` is used to specify the folder where persistable variables + are going to be saved. If you would like to save variables in separate + files, set `filename` None; if you would like to save all variables in a single file, use `filename` to specify the file name. Args: executor(Executor): The executor to run for saving persistable variables. dirname(str): The directory path. - main_program(Program|None): The program whose persistbale variables will - be saved. If it is None, the default main + main_program(Program|None): The program whose persistbale variables will + be saved. If it is None, the default main program will be used automatically. Default: None - filename(str|None): The file to saved all variables. If you prefer to + filename(str|None): The file to saved all variables. If you prefer to save variables in differnet files, set it to None. Default: None @@ -277,7 +278,7 @@ def save_persistables(executor, dirname, main_program=None, filename=None): exe = fluid.Executor(fluid.CPUPlace()) param_path = "./my_paddle_model" prog = fluid.default_main_program() - fluid.io.save_persistables(executor=exe, dirname=param_path, + fluid.io.save_persistables(executor=exe, dirname=param_path, main_program=None) """ save_vars( @@ -298,34 +299,34 @@ def load_vars(executor, """ Load variables from the given directory by executor. - There are two ways to specify variables to be loaded: The first way, list - variables in a list and assign it to the `vars`. The second way, assign the - `main_program` with an existing program, then all variables in the program - will be loaded. The first way has a higher priority. In other words if `vars` + There are two ways to specify variables to be loaded: The first way, list + variables in a list and assign it to the `vars`. The second way, assign the + `main_program` with an existing program, then all variables in the program + will be loaded. The first way has a higher priority. In other words if `vars` are assigned, the `main_program` and the `predicate` will be ignored. - The `dirname` are used to specify the folder where to load variables. - If variables were saved in separate files in the folder `dirname`, - set `filename` None; if all variables were saved in a single file, + The `dirname` are used to specify the folder where to load variables. + If variables were saved in separate files in the folder `dirname`, + set `filename` None; if all variables were saved in a single file, use `filename` to specify it. Args: executor(Executor): The executor to run for loading variables. dirname(str): The directory path. - main_program(Program|None): The program whose variables will be loaded. - If it is None, the default main program will + main_program(Program|None): The program whose variables will be loaded. + If it is None, the default main program will be used automatically. Default: None - vars(list[Variable]|None): The list that contains all variables to load. + vars(list[Variable]|None): The list that contains all variables to load. It has a higher priority than the `main_program`. Default: None - predicate(function|None): If it is not None, only variables in the - `main_program` that makes predicate(variable)==True - will be loaded. It only works when we are using the - `main_program` to specify variables (In other words + predicate(function|None): If it is not None, only variables in the + `main_program` that makes predicate(variable)==True + will be loaded. It only works when we are using the + `main_program` to specify variables (In other words `vars` is None). Default: None - filename(str|None): The file which saved all required variables. If variables + filename(str|None): The file which saved all required variables. If variables were saved in differnet files, set it to None. Default: None @@ -355,9 +356,9 @@ def load_vars(executor, # The second usage: using `vars` to specify variables var_list = [var_a, var_b, var_c] - fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, + fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, filename="vars_file") - # var_a, var_b and var_c will be loaded. And they are supposed to haven + # var_a, var_b and var_c will be loaded. And they are supposed to haven # been saved in the same file named 'var_file' in the path "./my_paddle_model". """ if vars is None: @@ -369,7 +370,7 @@ def load_vars(executor, load_vars( executor, dirname=dirname, - vars=filter(predicate, main_program.list_vars()), + vars=list(filter(predicate, main_program.list_vars())), filename=filename) else: load_prog = Program() @@ -410,15 +411,15 @@ def load_params(executor, dirname, main_program=None, filename=None): and then trys to load these parameters from the folder `dirname` or the file `filename`. - Use the `dirname` to specify the folder where parameters were saved. If - parameters were saved in separate files in the folder `dirname`, set - `filename` None; if all parameters were saved in a single file, use + Use the `dirname` to specify the folder where parameters were saved. If + parameters were saved in separate files in the folder `dirname`, set + `filename` None; if all parameters were saved in a single file, use `filename` to specify the file name. - NOTICE: Some variables are not Parameter while they are necessary for - training. So you can NOT save and continue your training just by - `save_params()` and `load_params()`. Please use `save_persistables()` - and `load_persistables()` instead. + NOTICE: Some variables are not Parameter while they are necessary for + training. So you can NOT save and continue your training just by + `save_params()` and `load_params()`. Please use `save_persistables()` + and `load_persistables()` instead. Args: executor(Executor): The executor to run for loading parameters. @@ -427,7 +428,7 @@ def load_params(executor, dirname, main_program=None, filename=None): loaded. If it is None, the default main program will be used automatically. Default: None - filename(str|None): The file which saved all parameters. If parameters + filename(str|None): The file which saved all parameters. If parameters were saved in differnet files, set it to None. Default: None @@ -440,7 +441,7 @@ def load_params(executor, dirname, main_program=None, filename=None): exe = fluid.Executor(fluid.CPUPlace()) param_path = "./my_paddle_model" prog = fluid.default_main_program() - fluid.io.load_params(executor=exe, dirname=param_path, + fluid.io.load_params(executor=exe, dirname=param_path, main_program=None) """ load_vars( @@ -453,23 +454,23 @@ def load_params(executor, dirname, main_program=None, filename=None): def load_persistables(executor, dirname, main_program=None, filename=None): """ - This function filters out all variables with `persistable==True` from the - give `main_program` and then trys to load these variables from the folder + This function filters out all variables with `persistable==True` from the + give `main_program` and then trys to load these variables from the folder `dirname` or the file `filename`. - Use the `dirname` to specify the folder where persistable variables were - saved. If variables were saved in separate files, set `filename` None; - if all variables were saved in a single file, use `filename` to specify + Use the `dirname` to specify the folder where persistable variables were + saved. If variables were saved in separate files, set `filename` None; + if all variables were saved in a single file, use `filename` to specify the file name. Args: executor(Executor): The executor to run for loading persistable variables. dirname(str): The directory path. - main_program(Program|None): The program whose persistbale variables will - be loaded. If it is None, the default main + main_program(Program|None): The program whose persistbale variables will + be loaded. If it is None, the default main program will be used automatically. Default: None - filename(str|None): The file which saved all variables. If variables were + filename(str|None): The file which saved all variables. If variables were saved in differnet files, set it to None. Default: None @@ -482,7 +483,7 @@ def load_persistables(executor, dirname, main_program=None, filename=None): exe = fluid.Executor(fluid.CPUPlace()) param_path = "./my_paddle_model" prog = fluid.default_main_program() - fluid.io.load_persistables(executor=exe, dirname=param_path, + fluid.io.load_persistables(executor=exe, dirname=param_path, main_program=None) """ load_vars( @@ -561,20 +562,20 @@ def save_inference_model(dirname, Args: dirname(str): The directory path to save the inference model. - feeded_var_names(list[str]): Names of variables that need to be feeded data + feeded_var_names(list[str]): Names of variables that need to be feeded data during inference. - target_vars(list[Variable]): Variables from which we can get inference + target_vars(list[Variable]): Variables from which we can get inference results. executor(Executor): The executor that saves the inference model. - main_program(Program|None): The original program, which will be pruned to - build the inference model. If is setted None, + main_program(Program|None): The original program, which will be pruned to + build the inference model. If is setted None, the default main program will be used. Default: None. - model_filename(str|None): The name of file to save the inference program - itself. If is setted None, a default filename + model_filename(str|None): The name of file to save the inference program + itself. If is setted None, a default filename `__model__` will be used. - params_filename(str|None): The name of file to save all related parameters. - If it is setted None, parameters will be saved + params_filename(str|None): The name of file to save all related parameters. + If it is setted None, parameters will be saved in separate files . Returns: @@ -592,20 +593,34 @@ def save_inference_model(dirname, fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], target_vars=[predict_var], executor=exe) - # In this exsample, the function will prune the default main program - # to make it suitable for infering the `predict_var`. The pruned - # inference program is going to be saved in the "./infer_model/__model__" + # In this exsample, the function will prune the default main program + # to make it suitable for infering the `predict_var`. The pruned + # inference program is going to be saved in the "./infer_model/__model__" # and parameters are going to be saved in separate files under folder - # "./infer_model". + # "./infer_model". """ - if isinstance(feeded_var_names, basestring): + if isinstance(feeded_var_names, six.binary_type): feeded_var_names = [feeded_var_names] + elif isinstance(feeded_var_names, six.text_type): + feeded_var_names = [feeded_var_names.encode()] else: if len(feeded_var_names) > 0: if not (bool(feeded_var_names) and all( - isinstance(name, basestring) for name in feeded_var_names)): - raise ValueError("'feed_var_names' should be a list of str.") + isinstance(name, six.binary_type) + for name in feeded_var_names)): + if not (all( + isinstance(name, six.text_type) + for name in feeded_var_names)): + import sys + print([type(name) for name in feeded_var_names]) + sys.stdout.flush() + raise ValueError( + "'feed_var_names' should be a list of str.") + else: + feeded_var_names = [ + name.encode() for name in feeded_var_names + ] if isinstance(target_vars, Variable): target_vars = [target_vars] @@ -662,22 +677,22 @@ def load_inference_model(dirname, dirname(str): The directory path executor(Executor): The executor to run for loading inference model. model_filename(str|None): The name of file to load inference program. - If it is None, the default filename + If it is None, the default filename '__model__' will be used. Default: None params_filename(str|None): The name of file to load all parameters. - It is only used for the case that all - parameters were saved in a single binary - file. If parameters were saved in separate + It is only used for the case that all + parameters were saved in a single binary + file. If parameters were saved in separate files, set it as 'None'. Returns: tuple: The return of this function is a tuple with three elements: - (program, feed_target_names, fetch_targets). The `program` is a - Program, it's the program for inference. The `feed_target_names` is - a list of str, it contains Names of variables that need to feed - data in the inference program. The `fetch_targets` is a list of - Variable. It contains variables from which we can get inference + (program, feed_target_names, fetch_targets). The `program` is a + Program, it's the program for inference. The `feed_target_names` is + a list of str, it contains Names of variables that need to feed + data in the inference program. The `fetch_targets` is a list of + Variable. It contains variables from which we can get inference results. Raises: @@ -688,17 +703,17 @@ def load_inference_model(dirname, exe = fluid.Executor(fluid.CPUPlace()) path = "./infer_model" - [inference_program, feed_target_names, fetch_targets] = + [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=path, executor=exe) results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) - # In this exsample, the inference program was saved in the - # "./infer_model/__model__" and parameters were saved in - # separate files in ""./infer_model". - # After getting inference program, feed target names and - # fetch targets, we can use an Executor to run the inference + # In this exsample, the inference program was saved in the + # "./infer_model/__model__" and parameters were saved in + # separate files in ""./infer_model". + # After getting inference program, feed target names and + # fetch targets, we can use an Executor to run the inference # program to get the inference result. """ diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index de752d1daeb..5f2ff3c9c87 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -15,11 +15,11 @@ import copy import itertools -from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating -import unique_name +from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating +from . import unique_name from paddle.fluid.initializer import Constant, Xavier -from param_attr import ParamAttr, WeightNormParamAttr -import core +from .param_attr import ParamAttr, WeightNormParamAttr +from . import core class LayerHelper(object): @@ -83,7 +83,7 @@ class LayerHelper(object): raise ValueError("parameter number mismatch") elif len(param_attr) == 1 and length != 1: tmp = [None] * length - for i in xrange(length): + for i in range(length): tmp[i] = copy.deepcopy(param_attr[0]) param_attr = tmp return param_attr @@ -91,7 +91,7 @@ class LayerHelper(object): def iter_inputs_and_params(self, input_param_name='input'): inputs = self.multiple_input(input_param_name) param_attrs = self.multiple_param_attr(len(inputs)) - for ipt, param_attr in itertools.izip(inputs, param_attrs): + for ipt, param_attr in zip(inputs, param_attrs): yield ipt, param_attr def input_dtype(self, input_param_name='input'): @@ -218,7 +218,7 @@ class LayerHelper(object): norm = __norm_op(reshape, dim=0, block=block) __reshape_op(norm, out=out, shape=out_shape, block=block) else: - perm = range(len(x.shape)) + perm = list(range(len(x.shape))) perm[0], perm[dim] = dim, 0 transpose = __transpose_op(x, perm, block=block) norm = __norm_op(transpose, dim=0, block=block) @@ -397,7 +397,7 @@ class LayerHelper(object): act = self.kwargs.get('act', None) if act is None: return input_var - if isinstance(act, basestring): + if isinstance(act, str): act = {'type': act} if 'use_cudnn' in self.kwargs and self.kwargs.get('use_cudnn'): diff --git a/python/paddle/fluid/layers/__init__.py b/python/paddle/fluid/layers/__init__.py index 4917e67de0d..a48e3604634 100644 --- a/python/paddle/fluid/layers/__init__.py +++ b/python/paddle/fluid/layers/__init__.py @@ -12,25 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ops -from ops import * -import nn -from nn import * -import io -from io import * -import tensor -from tensor import * -import control_flow -from control_flow import * -import device -from device import * -import math_op_patch -from math_op_patch import * -import detection -from detection import * -import metric_op -from metric_op import * -from learning_rate_scheduler import * +from . import ops +from .ops import * +from . import nn +from .nn import * +from . import io +from .io import * +from . import tensor +from .tensor import * +from . import control_flow +from .control_flow import * +from . import device +from .device import * +from . import math_op_patch +from .math_op_patch import * +from . import detection +from .detection import * +from . import metric_op +from .metric_op import * +from .learning_rate_scheduler import * __all__ = [] __all__ += nn.__all__ diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index f05ae6d5d19..3d230e5515e 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -13,14 +13,15 @@ # limitations under the License. import contextlib -from layer_function_generator import autodoc, templatedoc -from tensor import assign, fill_constant +from .layer_function_generator import autodoc, templatedoc +from .tensor import assign, fill_constant from .. import core from ..framework import Program, Variable, Operator from ..layer_helper import LayerHelper, unique_name from ..initializer import force_init_on_cpu -from ops import logical_and, logical_not, logical_or +from .ops import logical_and, logical_not, logical_or import numpy +from functools import reduce __all__ = [ 'While', @@ -597,7 +598,7 @@ class StaticRNN(object): boot_memories = [] pre_memories = [] memories = [] - for _, mem in self.memories.iteritems(): + for _, mem in list(self.memories.items()): boot_memories.append(mem.init) pre_memories.append(mem.pre_mem.name) mem_var = rnn_block.var(mem.mem.name) @@ -1508,7 +1509,7 @@ class IfElse(object): def __call__(self): if self.status != self.OUT_IF_ELSE_BLOCKS: raise ValueError("IfElse::__call__ must be out of sub-block") - false_len, true_len = map(len, self.output_table) + false_len, true_len = list(map(len, self.output_table)) if false_len == 0 and true_len == 0: raise ValueError("Must invoke true_block/false_block before " "__call__") diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 3ef4afa691b..1917624fd70 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -15,12 +15,13 @@ All layers just related to the detection neural network. """ -from layer_function_generator import generate_layer_fn -from layer_function_generator import autodoc, templatedoc +from .layer_function_generator import generate_layer_fn +from .layer_function_generator import autodoc, templatedoc from ..layer_helper import LayerHelper -import tensor -import nn +from . import tensor +from . import nn import math +from functools import reduce __all__ = [ 'prior_box', @@ -1031,7 +1032,7 @@ def multi_box_head(inputs, min_sizes = [] max_sizes = [] step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) - for ratio in xrange(min_ratio, max_ratio + 1, step): + for ratio in range(min_ratio, max_ratio + 1, step): min_sizes.append(base_size * ratio / 100.) max_sizes.append(base_size * (ratio + step) / 100.) min_sizes = [base_size * .10] + min_sizes diff --git a/python/paddle/fluid/layers/device.py b/python/paddle/fluid/layers/device.py index 384d302a709..bb1fb7fd571 100644 --- a/python/paddle/fluid/layers/device.py +++ b/python/paddle/fluid/layers/device.py @@ -15,7 +15,7 @@ All util layers. """ -from layer_function_generator import autodoc +from .layer_function_generator import autodoc from ..framework import unique_name from ..layer_helper import LayerHelper from ..annotations import deprecated diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index df6becabd16..77c61e08627 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -16,8 +16,8 @@ import multiprocessing import threading from ..data_feeder import DataFeeder -from control_flow import BlockGuard -from layer_function_generator import templatedoc +from .control_flow import BlockGuard +from .layer_function_generator import templatedoc from .. import core from ..executor import global_scope from ..framework import convert_np_dtype_to_dtype_, default_main_program, \ @@ -69,7 +69,7 @@ def data(name, """ helper = LayerHelper('data', **locals()) shape = list(shape) - for i in xrange(len(shape)): + for i in range(len(shape)): if shape[i] is None: shape[i] = -1 append_batch_size = False @@ -387,9 +387,9 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True): Create a uniform random data generator This layer returns a Reader Variable. - Instead of opening a file and reading data from it, this - Reader Variable generates float uniform random data by itself. - It can be used as a dummy reader to test a network without + Instead of opening a file and reading data from it, this + Reader Variable generates float uniform random data by itself. + It can be used as a dummy reader to test a network without opening a real file. Args: @@ -710,9 +710,9 @@ def open_files(filenames, """ Open files - This layer takes a list of files to read from and returns a Reader Variable. - Via the Reader Variable, we can get data from given files. All files must - have name suffixs to indicate their formats, e.g., '*.recordio'. + This layer takes a list of files to read from and returns a Reader Variable. + Via the Reader Variable, we can get data from given files. All files must + have name suffixs to indicate their formats, e.g., '*.recordio'. Args: filenames(list): The list of file names. @@ -828,9 +828,9 @@ def shuffle(reader, buffer_size): def batch(reader, batch_size): """ - This layer is a reader decorator. It takes a reader and adds - 'batching' decoration on it. When reading with the result - decorated reader, output data will be automatically organized + This layer is a reader decorator. It takes a reader and adds + 'batching' decoration on it. When reading with the result + decorated reader, output data will be automatically organized to the form of batches. Args: @@ -855,11 +855,11 @@ def batch(reader, batch_size): # If we read data with the raw_reader: # data = fluid.layers.read_file(raw_reader) # We can only get data instance by instance. - # + # # However, if we read data with the batch_reader: # data = fluid.layers.read_file(batch_reader) - # Each 5 adjacent instances will be automatically combined together - # to become a batch. So what we get('data') is a batch data instead + # Each 5 adjacent instances will be automatically combined together + # to become a batch. So what we get('data') is a batch data instead # of an instance. """ return __create_unshared_decorated_reader__( @@ -906,8 +906,8 @@ def read_file(reader): """ Execute the given reader and get data via it. - A reader is also a Variable. It can be a raw reader generated by - `fluid.layers.open_files()` or a decorated one generated by + A reader is also a Variable. It can be a raw reader generated by + `fluid.layers.open_files()` or a decorated one generated by `fluid.layers.double_buffer()` and so on. Args: @@ -1008,7 +1008,7 @@ class Preprocessor(object): source_lod_levels = self.underlying_reader.desc.lod_levels() self.source_var_names = [ unique_name("preprocessor_source") - for _ in xrange(len(source_shapes)) + for _ in range(len(source_shapes)) ] source_vars = [] for var_name, shape, dtype, lod_level in zip( diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 3096389101a..c0d72620b1d 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -import cStringIO import functools import warnings import string +from six.moves import cStringIO from ..proto import framework_pb2 from ..framework import OpProtoHolder, Variable from ..layer_helper import LayerHelper @@ -70,7 +70,7 @@ def _generate_doc_string_(op_proto): if not isinstance(op_proto, framework_pb2.OpProto): raise TypeError("OpProto should be `framework_pb2.OpProto`") - buf = cStringIO.StringIO() + buf = cStringIO() buf.write(escape_math(op_proto.comment)) buf.write('\nArgs:\n') for each_input in op_proto.inputs: @@ -119,9 +119,9 @@ def generate_layer_fn(op_type): """ op_proto = OpProtoHolder.instance().get_op_proto(op_type) not_intermediate_outputs = \ - filter(lambda output: not output.intermediate, op_proto.outputs) + [output for output in op_proto.outputs if not output.intermediate] intermediate_outputs = \ - filter(lambda output: output.intermediate, op_proto.outputs) + [output for output in op_proto.outputs if output.intermediate] if len(not_intermediate_outputs) != 1: raise ValueError("Only one non intermediate output operator can be", diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index c7966e36f15..daf91a40f7a 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -20,10 +20,10 @@ User can also implement their own learning_rate_decay strategy according to this module. """ -import control_flow -import nn -import ops -import tensor +from . import control_flow +from . import nn +from . import ops +from . import tensor from ..initializer import init_on_cpu from ..framework import default_main_program, Parameter diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index f814c41633f..0e10a91d258 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -13,7 +13,7 @@ # limitations under the License. from ..framework import Variable, unique_name -from layer_function_generator import OpProtoHolder +from .layer_function_generator import OpProtoHolder from ..initializer import force_init_on_cpu diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py index e7d7a9e826d..49bae1e8af7 100644 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -20,7 +20,7 @@ from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr -import nn +from . import nn __all__ = ['accuracy', 'auc'] diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 5d7f1eadd93..b3c73a749ea 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -33,11 +33,12 @@ from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr -from layer_function_generator import autodoc, templatedoc -from tensor import concat -import utils +from .layer_function_generator import autodoc, templatedoc +from .tensor import concat +from . import utils import random from .. import unique_name +from functools import reduce __all__ = [ 'fc', @@ -4843,7 +4844,7 @@ def dice_loss(input, label, epsilon=0.00001): loss = fluid.layers.dice_loss(input=predictions, label=label, 2) """ label = one_hot(label, depth=input.shape[-1]) - reduce_dim = range(1, len(input.shape)) + reduce_dim = list(range(1, len(input.shape))) inse = reduce_sum(input * label, dim=reduce_dim) dice_denominator = reduce_sum( input, dim=reduce_dim) + reduce_sum( diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 9e97ec9a6f5..60c1413d7c1 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from layer_function_generator import generate_layer_fn +from .layer_function_generator import generate_layer_fn __activations__ = [ 'sigmoid', diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index b6614ecf3bc..b93d721c12c 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -18,7 +18,7 @@ from ..framework import convert_np_dtype_to_dtype_ from ..framework import Variable from ..initializer import Constant, force_init_on_cpu from ..core import VarDesc -from layer_function_generator import templatedoc +from .layer_function_generator import templatedoc import numpy __all__ = [ diff --git a/python/paddle/fluid/lod_tensor.py b/python/paddle/fluid/lod_tensor.py index b2b3186c1e8..53c33616f55 100644 --- a/python/paddle/fluid/lod_tensor.py +++ b/python/paddle/fluid/lod_tensor.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import core +from . import core import numpy as np __all__ = ['create_lod_tensor', 'create_random_int_lodtensor'] @@ -24,7 +24,7 @@ def create_lod_tensor(data, recursive_seq_lens, place): Create a lod tensor by doing the following: - 1. Check that the length-based level of detail (LoD) also known as + 1. Check that the length-based level of detail (LoD) also known as recursive_sequence_lengths of the input is valid. 2. Convert recursive_sequence_lengths to a offset-based LoD. @@ -33,7 +33,7 @@ def create_lod_tensor(data, recursive_seq_lens, place): CPU or GPU device (based on input place). 4. Set the level of detail (LoD) using the offset-based LoD. - + Examples: Suppose we want LoDTensor to hold data for sequences of word, where each @@ -51,7 +51,7 @@ def create_lod_tensor(data, recursive_seq_lens, place): Args: data(numpy.ndarray|list|LoDTensor): a numpy array or a LoDTensor or a list holding the data to be copied. - recursive_seq_lens(list): a list of lists indicating the length-based level of detail + recursive_seq_lens(list): a list of lists indicating the length-based level of detail info specified by the user. place(Place): CPU or GPU place indicating where the data in the new LoDTensor will be stored. @@ -62,10 +62,10 @@ def create_lod_tensor(data, recursive_seq_lens, place): if isinstance(data, core.LoDTensor): return create_lod_tensor(np.array(data), recursive_seq_lens, place) elif isinstance(data, list): - # When input data is a list, it only deal with the case where the base element - # is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated - # LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number - # of words or other indexes in the sequence. + # When input data is a list, it only deal with the case where the base element + # is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated + # LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number + # of words or other indexes in the sequence. new_recursive_seq_lens = [] for seq in data: new_recursive_seq_lens.append(len(seq)) @@ -109,12 +109,12 @@ def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, Suppose we want LoDTensor to hold data for sequences of word, where each word is represented by an integer. If we want to create a LoDTensor to represent two sentences, one of 2 words, and one of 3 words. Then - 'base_shape' is [1], input length-based 'recursive_seq_lens' is [[2, 3]]. - Then the overall shape of the LoDTensor would be [5, 1], holding 5 words + 'base_shape' is [1], input length-based 'recursive_seq_lens' is [[2, 3]]. + Then the overall shape of the LoDTensor would be [5, 1], holding 5 words for two sentences. Args: - recursive_seq_lens(list): a list of lists indicating the length-based + recursive_seq_lens(list): a list of lists indicating the length-based level of detail info specified by the user. base_shape(list): the shape of the basic element to be held by the LoDTensor. @@ -124,11 +124,11 @@ def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, high(int): the upper bound of the random integers. Returns: - A fluid LoDTensor object with tensor data and recursive_seq_lens info. + A fluid LoDTensor object with tensor data and recursive_seq_lens info. """ assert isinstance(base_shape, list), "base_shape should be a list" # append the total number of basic elements to the front of its shape overall_shape = [sum(recursive_seq_lens[-1])] + base_shape - # the range of integer data elements is [low, high] + # the range of integer data elements is [low, high] data = np.random.random_integers(low, high, overall_shape).astype("int64") return create_lod_tensor(data, recursive_seq_lens, place) diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index b37b09ac816..cd893452275 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -79,10 +79,10 @@ class MetricBase(object): """ states = { attr: value - for attr, value in self.__dict__.iteritems() + for attr, value in list(self.__dict__.items()) if not attr.startswith("_") } - for attr, value in states.iteritems(): + for attr, value in list(states.items()): if isinstance(value, int): setattr(self, attr, 0) elif isinstance(value, float): @@ -105,7 +105,7 @@ class MetricBase(object): """ states = { attr: value - for attr, value in self.__dict__.iteritems() + for attr, value in list(self.__dict__.items()) if not attr.startswith("_") } config = {} diff --git a/python/paddle/fluid/net_drawer.py b/python/paddle/fluid/net_drawer.py index 73946a0721d..623a7d3fd05 100644 --- a/python/paddle/fluid/net_drawer.py +++ b/python/paddle/fluid/net_drawer.py @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) try: - from graphviz import Digraph + from .graphviz import Digraph except ImportError: logger.info( 'Cannot import graphviz, which is required for drawing a network. This ' @@ -77,7 +77,7 @@ def parse_graph(program, graph, var_dict, **kwargs): # fill the known variables for block in program.blocks: for var in block.vars: - if not var_dict.has_key(var): + if var not in var_dict: var_dict[var] = "Feed" temp_id = 0 @@ -93,17 +93,17 @@ def parse_graph(program, graph, var_dict, **kwargs): var_dict[arg] = op.type for e in op.inputs: for arg in e.arguments: - if var_dict.has_key(arg): + if arg in var_dict: graph.edge(**draw_edge(var_dict, op, e, arg)) break # only plot the first block def draw_graph(startup_program, main_program, **kwargs): - if kwargs.has_key("graph_attr"): + if "graph_attr" in kwargs: GRAPH_STYLE.update(kwargs[graph_attr]) - if kwargs.has_key("node_attr"): + if "node_attr" in kwargs: OP_STYLE.update(kwargs[node_attr]) - if kwargs.has_key("edge_attr"): + if "edge_attr" in kwargs: VAR_STYLE.update(kwargs[edge_attr]) graph_id = unique_id() diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index 9b3f2aebee7..08480671d8a 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import layers +from . import layers __all__ = [ "simple_img_conv_pool", @@ -210,7 +210,7 @@ def img_conv_group(input, conv_with_batchnorm = __extend_list__(conv_with_batchnorm) conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate) - for i in xrange(len(conv_num_filter)): + for i in range(len(conv_num_filter)): local_conv_act = conv_act if conv_with_batchnorm[i]: local_conv_act = None @@ -488,10 +488,11 @@ def scaled_dot_product_attention(queries, trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) return layers.reshape( x=trans_x, - shape=map(int, [ - trans_x.shape[0], trans_x.shape[1], - trans_x.shape[2] * trans_x.shape[3] - ])) + shape=list( + map(int, [ + trans_x.shape[0], trans_x.shape[1], trans_x.shape[2] * + trans_x.shape[3] + ]))) q, k, v = __compute_qkv(queries, keys, values, num_heads) diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index 0b76e94157e..37ba8d9f09d 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + import paddle.fluid.core as core import paddle.fluid.proto.framework_pb2 as framework_pb2 @@ -24,13 +26,13 @@ def get_all_op_protos(): protostrs = core.get_all_op_protos() ret_values = [] for pbstr in protostrs: - op_proto = framework_pb2.OpProto.FromString(str(pbstr)) + op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr)) ret_values.append(op_proto) return ret_values def is_str(s): - return isinstance(s, str) or isinstance(s, unicode) + return isinstance(s, str) or isinstance(s, str) class OpDescCreationMethod(object): @@ -189,7 +191,7 @@ class OperatorFactory(object): return self.get_op_info(t).method(**kwargs) def types(self): - return self.op_methods.keys() + return list(self.op_methods.keys()) def get_op_info(self, t): if t not in self.op_methods: @@ -197,13 +199,13 @@ class OperatorFactory(object): return self.op_methods.get(t) def get_op_input_names(self, type): - return map(lambda x: x[0], self.get_op_info(type).inputs) + return [x[0] for x in self.get_op_info(type).inputs] def get_op_inputs(self, type): return self.get_op_info(type).inputs def get_op_output_names(self, type): - return map(lambda x: x[0], self.get_op_info(type).outputs) + return [x[0] for x in self.get_op_info(type).outputs] def get_op_outputs(self, type): return self.get_op_info(type).outputs diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 3fe99f55011..a3c32cfea67 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -14,15 +14,15 @@ import re from collections import defaultdict from paddle.fluid.framework import Program, Variable -import framework -import layers -from backward import append_backward -from framework import program_guard -import unique_name -from initializer import Constant -from layer_helper import LayerHelper -from regularizer import append_regularization_ops -from clip import append_gradient_clip_ops, error_clip_callback +from . import framework +from . import layers +from .backward import append_backward +from .framework import program_guard +from . import unique_name +from .initializer import Constant +from .layer_helper import LayerHelper +from .regularizer import append_regularization_ops +from .clip import append_gradient_clip_ops, error_clip_callback from contextlib import contextmanager __all__ = [ @@ -106,7 +106,7 @@ class Optimizer(object): param_lr = param.optimize_attr['learning_rate'] if type(param_lr) == Variable: # param learning rate has been updated (LARS) - print("returns updated param lr ", param_lr) + print(("returns updated param lr ", param_lr)) return param_lr else: if param_lr == 1.0: diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 10028a8c6e3..a9bd8930d03 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import core +from __future__ import print_function import multiprocessing -import framework -import executor +from . import core +from . import framework +from . import executor import warnings import sys import os @@ -94,7 +95,7 @@ class ParallelExecutor(object): self._places = [] self._act_places = [] if use_cuda: - for i in xrange(core.get_cuda_device_count()): + for i in range(core.get_cuda_device_count()): p = core.Place() self._act_places.append(core.CUDAPlace(i)) p.set_place(self._act_places[-1]) @@ -102,7 +103,7 @@ class ParallelExecutor(object): else: cpu_num = int( os.environ.get('CPU_NUM', multiprocessing.cpu_count())) - for i in xrange(cpu_num): + for i in range(cpu_num): p = core.Place() self._act_places.append(core.CPUPlace()) p.set_place(self._act_places[-1]) @@ -143,16 +144,16 @@ class ParallelExecutor(object): ) if share_vars_from else [] self.persistable_vars = [ - v.name - for v in filter( - lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW, - main.list_vars()) + v.name for v in [ + var for var in main.list_vars() + if var.persistable and var.type != core.VarDesc.VarType.RAW + ] ] self.executor = core.ParallelExecutor( self._places, set([ - p.name for p in main.global_block()._iter_parameters() + p.name for p in main.global_block().iter_parameters() if not p.stop_gradient ]), set(self.persistable_vars), main.desc, loss_name @@ -227,7 +228,9 @@ class ParallelExecutor(object): """ if feed is None and feed_dict is not None: feed = feed_dict - print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`" + print( + "`feed_dict` is deprecated. Please use `feed=`", + file=sys.stderr) if isinstance(feed, dict): feed_tensor_dict = dict() diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index 4a61f85ec4b..04e0c9e6317 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from initializer import Initializer, Xavier, Constant -from regularizer import WeightDecayRegularizer +from .initializer import Initializer, Xavier, Constant +from .regularizer import WeightDecayRegularizer __all__ = [ 'ParamAttr', @@ -134,7 +134,7 @@ class ParamAttr(object): return [ParamAttr._to_attr(a) for a in arg] elif isinstance(arg, ParamAttr): return arg - elif isinstance(arg, str) or isinstance(arg, unicode): + elif isinstance(arg, str) or isinstance(arg, str): return ParamAttr(name=arg) elif isinstance(arg, Initializer): return ParamAttr(initializer=arg) diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 6a321ae024d..60e9215457e 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import core +from . import core from contextlib import contextmanager import os @@ -224,7 +224,7 @@ def profiler(state, sorted_key=None, profile_path='/tmp/profile'): If the state == 'All', a profile proto file will be written to `profile_path`. This file records timeline information during the execution. - Then users can visualize this file to see the timeline, please refer + Then users can visualize this file to see the timeline, please refer https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/howto/optimization/timeline.md Args: diff --git a/python/paddle/fluid/recordio_writer.py b/python/paddle/fluid/recordio_writer.py index bd577727130..93b38ad3fa3 100644 --- a/python/paddle/fluid/recordio_writer.py +++ b/python/paddle/fluid/recordio_writer.py @@ -13,8 +13,8 @@ # limitations under the License. import os -import core import contextlib +from . import core __all__ = [ 'convert_reader_to_recordio_file', 'convert_reader_to_recordio_files' ] diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 080c185420b..0d0288a176a 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import framework +from . import framework from . import core __all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer'] diff --git a/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py index ad28c9eff56..a27e6c45f68 100644 --- a/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py @@ -63,7 +63,7 @@ def train(use_cuda, train_program, params_dirname): if event.step == 10: test_metrics = trainer.test( reader=test_reader, feed_order=['x', 'y']) - print test_metrics + print(test_metrics) ''' ... ['25.768919467926025'] @@ -94,7 +94,7 @@ def infer(use_cuda, inference_program, params_dirname=None): tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") results = inferencer.infer({'x': tensor_x}) - print("infer results: ", results[0]) + print(("infer results: ", results[0])) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py index 7fed6d914f7..ebbe09d0988 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py @@ -28,7 +28,7 @@ images per class. """ -import cPickle +import pickle import itertools import numpy import paddle.v2.dataset.common @@ -46,7 +46,7 @@ def reader_creator(filename, sub_name, batch_size=None): data = batch['data'] labels = batch.get('labels', batch.get('fine_labels', None)) assert labels is not None - for sample, label in itertools.izip(data, labels): + for sample, label in zip(data, labels): yield (sample / 255.0).astype(numpy.float32), int(label) def reader(): @@ -56,7 +56,7 @@ def reader_creator(filename, sub_name, batch_size=None): batch_count = 0 for name in names: - batch = cPickle.load(f.extractfile(name)) + batch = pickle.load(f.extractfile(name)) for item in read_batch(batch): if isinstance(batch_size, int) and batch_count > batch_size: break diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py index 8e222d26907..8f38d53ea1b 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid import numpy @@ -107,7 +105,7 @@ def train(use_cuda, train_program, params_dirname): avg_cost, accuracy = trainer.test( reader=test_reader, feed_order=['pixel', 'label']) - print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) + print(('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))) if accuracy > 0.01: # Low threshold for speeding up CI if params_dirname is not None: @@ -136,7 +134,7 @@ def infer(use_cuda, inference_program, params_dirname=None): tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") results = inferencer.infer({'pixel': tensor_img}) - print("infer results: ", results) + print(("infer results: ", results)) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py index dbc7bc06c93..f37d3e6d6d1 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid import numpy @@ -84,7 +82,7 @@ def train(use_cuda, train_program, params_dirname): avg_cost, accuracy = trainer.test( reader=test_reader, feed_order=['pixel', 'label']) - print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) + print(('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))) if accuracy > 0.01: # Low threshold for speeding up CI if params_dirname is not None: @@ -113,7 +111,7 @@ def infer(use_cuda, inference_program, params_dirname=None): tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") results = inferencer.infer({'pixel': tensor_img}) - print("infer results: ", results) + print(("infer results: ", results)) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py index 67aa21e8c56..6e177478e81 100755 --- a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py +++ b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid import numpy as np @@ -173,19 +171,20 @@ def train(use_cuda, train_program, params_dirname): # get avg cost avg_cost = np.array(avg_cost_set).mean() - print("avg_cost: %s" % avg_cost) + print(("avg_cost: %s" % avg_cost)) if float(avg_cost) < 100.0: # Large value to increase CI speed trainer.save_params(params_dirname) else: - print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, - float(avg_cost))) + print( + ('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, + float(avg_cost)))) if math.isnan(float(avg_cost)): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): - print("Step {0}, Epoch {1} Metrics {2}".format( - event.step, event.epoch, map(np.array, event.metrics))) + print(("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics))))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() @@ -249,7 +248,7 @@ def infer(use_cuda, inference_program, params_dirname): }, return_numpy=False) - print("infer results: ", np.array(results[0]).shape) + print(("infer results: ", np.array(results[0]).shape)) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py index 8becd2404b0..c8dbea4807d 100644 --- a/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py @@ -197,7 +197,7 @@ def train(use_cuda, is_sparse, is_local=True): def event_handler(event): if isinstance(event, fluid.EndStepEvent): - print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step)) + print(('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))) if event.step == 10: trainer.stop() @@ -250,7 +250,7 @@ def decode_main(use_cuda, is_sparse): feeder = fluid.DataFeeder(feed_list, place) for data in train_data(): - feed_dict = feeder.feed(map(lambda x: [x[0]], data)) + feed_dict = feeder.feed([[x[0]] for x in data]) feed_dict['init_ids'] = init_ids feed_dict['init_scores'] = init_scores @@ -259,7 +259,7 @@ def decode_main(use_cuda, is_sparse): feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) - print result_ids.recursive_sequence_lengths() + print((result_ids.recursive_sequence_lengths())) break diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py index fd278f45f1c..2ade9a4bc61 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function + import argparse import paddle.fluid as fluid import paddle.fluid.core as core @@ -78,19 +78,21 @@ def train(use_cuda, train_program, params_dirname): avg_cost, acc = trainer.test( reader=test_reader, feed_order=['img', 'label']) - print("avg_cost: %s" % avg_cost) - print("acc : %s" % acc) + print(("avg_cost: %s" % avg_cost)) + print(("acc : %s" % acc)) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) else: - print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - event.epoch + 1, avg_cost, acc)) + print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc))) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): - print("Step {0}, Epoch {1} Metrics {2}".format( - event.step, event.epoch, map(numpy.array, event.metrics))) + print( + ("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, + list(map(numpy.array, event.metrics))))) train_reader = paddle.batch( paddle.reader.shuffle( @@ -116,7 +118,7 @@ def infer(use_cuda, inference_program, params_dirname=None): results = inferencer.infer({'img': tensor_img}) - print("infer results: ", results[0]) + print(("infer results: ", results[0])) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py index b2b544e791d..ddf7d05d43c 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function + import argparse import paddle.fluid as fluid import paddle @@ -61,14 +61,14 @@ def train(use_cuda, train_program, params_dirname): avg_cost, acc = trainer.test( reader=test_reader, feed_order=['img', 'label']) - print("avg_cost: %s" % avg_cost) - print("acc : %s" % acc) + print(("avg_cost: %s" % avg_cost)) + print(("acc : %s" % acc)) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) else: - print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - event.epoch + 1, avg_cost, acc)) + print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc))) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") @@ -96,7 +96,7 @@ def infer(use_cuda, inference_program, params_dirname=None): results = inferencer.infer({'img': tensor_img}) - print("infer results: ", results[0]) + print(("infer results: ", results[0])) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py b/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py index c860f164170..2605c1f56fd 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py +++ b/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py @@ -180,14 +180,15 @@ def train(use_cuda, train_program, params_dirname): # get avg cost avg_cost = np.array(avg_cost_set).mean() - print("avg_cost: %s" % avg_cost) + print(("avg_cost: %s" % avg_cost)) if float(avg_cost) < 4: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: - print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, - float(avg_cost))) + print( + ('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, + float(avg_cost)))) if math.isnan(float(avg_cost)): sys.exit("got NaN loss, training failed.") @@ -239,7 +240,7 @@ def infer(use_cuda, inference_program, params_dirname): }, return_numpy=False) - print("infer results: ", np.array(results[0])) + print(("infer results: ", np.array(results[0]))) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py index 1668ae83d35..4a739252e19 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid from functools import partial @@ -84,21 +82,21 @@ def train(use_cuda, train_program, params_dirname): avg_cost, acc = trainer.test( reader=test_reader, feed_order=['words', 'label']) - print("avg_cost: %s" % avg_cost) - print("acc : %s" % acc) + print(("avg_cost: %s" % avg_cost)) + print(("acc : %s" % acc)) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: - print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - event.epoch + 1, avg_cost, acc)) + print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc))) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): - print("Step {0}, Epoch {1} Metrics {2}".format( - event.step, event.epoch, map(np.array, event.metrics))) + print(("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics))))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() @@ -140,7 +138,7 @@ def infer(use_cuda, inference_program, params_dirname=None): tensor_words = fluid.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) - print("infer results: ", results) + print(("infer results: ", results)) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py index 8da89d82cb8..690d6e47c10 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid from functools import partial @@ -99,21 +97,21 @@ def train(use_cuda, train_program, params_dirname): avg_cost, acc = trainer.test( reader=test_reader, feed_order=['words', 'label']) - print("avg_cost: %s" % avg_cost) - print("acc : %s" % acc) + print(("avg_cost: %s" % avg_cost)) + print(("acc : %s" % acc)) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: - print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - event.epoch + 1, avg_cost, acc)) + print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc))) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): - print("Step {0}, Epoch {1} Metrics {2}".format( - event.step, event.epoch, map(np.array, event.metrics))) + print(("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics))))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() @@ -155,7 +153,7 @@ def infer(use_cuda, inference_program, params_dirname=None): tensor_words = fluid.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) - print("infer results: ", results) + print(("infer results: ", results)) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py index 74faa2e8aa7..af41abaf230 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid from functools import partial @@ -93,21 +91,21 @@ def train(use_cuda, train_program, params_dirname): avg_cost, acc = trainer.test( reader=test_reader, feed_order=['words', 'label']) - print("avg_cost: %s" % avg_cost) - print("acc : %s" % acc) + print(("avg_cost: %s" % avg_cost)) + print(("acc : %s" % acc)) if acc > 0.2: # Smaller value to increase CI speed trainer.save_params(params_dirname) trainer.stop() else: - print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - event.epoch + 1, avg_cost, acc)) + print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc))) if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") elif isinstance(event, fluid.EndStepEvent): - print("Step {0}, Epoch {1} Metrics {2}".format( - event.step, event.epoch, map(np.array, event.metrics))) + print(("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics))))) if event.step == 1: # Run 2 iterations to speed CI trainer.save_params(params_dirname) trainer.stop() @@ -150,7 +148,7 @@ def infer(use_cuda, inference_program, params_dirname=None): tensor_words = fluid.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) - print("infer results: ", results) + print(("infer results: ", results)) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py b/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py index 02e65cf56c4..8e32f90d652 100644 --- a/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py @@ -98,7 +98,7 @@ def train(use_cuda, train_program, params_dirname): reader=test_reader, feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw']) avg_cost = outs[0] - print("loss= ", avg_cost) + print(("loss= ", avg_cost)) if avg_cost < 10.0: trainer.save_params(params_dirname) @@ -149,7 +149,7 @@ def infer(use_cuda, inference_program, params_dirname=None): 'forthw': fourth_word }, return_numpy=False) - print(np.array(result[0])) + print((np.array(result[0]))) def main(use_cuda, is_sparse): diff --git a/python/paddle/fluid/tests/book/notest_understand_sentiment.py b/python/paddle/fluid/tests/book/notest_understand_sentiment.py index 95002aa7f9b..2b5585149d6 100644 --- a/python/paddle/fluid/tests/book/notest_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/notest_understand_sentiment.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function + from paddle.fluid.layers.device import get_places import unittest import paddle.fluid as fluid @@ -175,12 +175,12 @@ def train(word_dict, def train_loop(main_program): exe.run(fluid.default_startup_program()) - for pass_id in xrange(PASS_NUM): + for pass_id in range(PASS_NUM): for data in train_data(): cost_val, acc_val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[cost, acc_out]) - print("cost=" + str(cost_val) + " acc=" + str(acc_val)) + print(("cost=" + str(cost_val) + " acc=" + str(acc_val))) if cost_val < 0.4 and acc_val > 0.8: if save_dirname is not None: fluid.io.save_inference_model(save_dirname, ["words"], @@ -261,10 +261,10 @@ def infer(word_dict, use_cuda, save_dirname=None): feed={feed_target_names[0]: tensor_words}, fetch_list=fetch_targets, return_numpy=False) - print(results[0].recursive_sequence_lengths()) + print((results[0].recursive_sequence_lengths())) np_data = np.array(results[0]) - print("Inference Shape: ", np_data.shape) - print("Inference results: ", np_data) + print(("Inference Shape: ", np_data.shape)) + print(("Inference results: ", np_data)) def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index 71bf5f8b3a9..58bcbdfb039 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -114,7 +114,7 @@ def infer(use_cuda, save_dirname=None): test_reader = paddle.batch( paddle.dataset.uci_housing.test(), batch_size=batch_size) - test_data = test_reader().next() + test_data = next(test_reader()) test_feat = numpy.array( [data[0] for data in test_data]).astype("float32") test_label = numpy.array( @@ -124,9 +124,9 @@ def infer(use_cuda, save_dirname=None): results = exe.run(inference_program, feed={feed_target_names[0]: numpy.array(test_feat)}, fetch_list=fetch_targets) - print("infer shape: ", results[0].shape) - print("infer results: ", results[0]) - print("ground truth: ", test_label) + print(("infer shape: ", results[0].shape)) + print(("infer results: ", results[0])) + print(("ground truth: ", test_label)) def main(use_cuda, is_local=True): diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index a2fb186b86c..1c74481fa76 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid import contextlib @@ -165,10 +163,10 @@ def train(net_type, use_cuda, save_dirname, is_local): acc_value = numpy.array(acc_list).mean() avg_loss_value = numpy.array(avg_loss_list).mean() - print( + print(( 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. format(pass_id, batch_id + 1, - float(avg_loss_value), float(acc_value))) + float(avg_loss_value), float(acc_value)))) if acc_value > 0.01: # Low threshold for speeding up CI fluid.io.save_inference_model(save_dirname, ["pixel"], @@ -241,7 +239,7 @@ def infer(use_cuda, save_dirname=None): np.testing.assert_almost_equal( results[0][i], transpiler_results[0][i], decimal=5) - print("infer results: ", results[0]) + print(("infer results: ", results[0])) fluid.io.save_inference_model(save_dirname, feed_target_names, fetch_targets, exe, diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index d489feae9c5..9dbd462fb42 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -181,7 +181,7 @@ def train(use_cuda, save_dirname=None, is_local=True): start_time = time.time() batch_id = 0 - for pass_id in xrange(PASS_NUM): + for pass_id in range(PASS_NUM): for data in train_data(): cost = exe.run(main_program, feed=feeder.feed(data), @@ -189,10 +189,10 @@ def train(use_cuda, save_dirname=None, is_local=True): cost = cost[0] if batch_id % 10 == 0: - print("avg_cost:" + str(cost)) + print(("avg_cost:" + str(cost))) if batch_id != 0: - print("second per batch: " + str((time.time( - ) - start_time) / batch_id)) + print(("second per batch: " + str( + (time.time() - start_time) / batch_id))) # Set the threshold low to speed up the CI test if float(cost) < 60.0: if save_dirname is not None: @@ -333,9 +333,9 @@ def infer(use_cuda, save_dirname=None): }, fetch_list=fetch_targets, return_numpy=False) - print(results[0].recursive_sequence_lengths()) + print((results[0].recursive_sequence_lengths())) np_data = np.array(results[0]) - print("Inference Shape: ", np_data.shape) + print(("Inference Shape: ", np_data.shape)) def main(use_cuda, is_local=True): diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index 90c301a6610..6578328643c 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -199,14 +199,14 @@ def train_main(use_cuda, is_sparse, is_local=True): feeder = fluid.DataFeeder(feed_list, place) batch_id = 0 - for pass_id in xrange(1): + for pass_id in range(1): for data in train_data(): outs = exe.run(main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) + print(('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + + " avg_cost=" + str(avg_cost_val))) if batch_id > 3: break batch_id += 1 @@ -273,7 +273,7 @@ def decode_main(use_cuda, is_sparse): feeder = fluid.DataFeeder(feed_list, place) for data in train_data(): - feed_dict = feeder.feed(map(lambda x: [x[0]], data)) + feed_dict = feeder.feed([[x[0]] for x in data]) feed_dict['init_ids'] = init_ids feed_dict['init_scores'] = init_scores @@ -282,7 +282,7 @@ def decode_main(use_cuda, is_sparse): feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) - print result_ids.recursive_sequence_lengths() + print((result_ids.recursive_sequence_lengths())) break diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index c4718639209..49e58a148a6 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import paddle.fluid.core as core import math @@ -143,10 +142,10 @@ def train(nn_type, params_filename=params_filename) return else: - print( + print(( 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. format(pass_id, batch_id + 1, - float(avg_loss_val), float(acc_val))) + float(avg_loss_val), float(acc_val)))) if math.isnan(float(avg_loss_val)): sys.exit("got NaN loss, training failed.") raise AssertionError("Loss of recognize digits is too large") @@ -207,7 +206,7 @@ def infer(use_cuda, results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) - print("infer results: ", results[0]) + print(("infer results: ", results[0])) def main(use_cuda, parallel, nn_type, combine): diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index 6548766ef5d..ca396e5cdf7 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -304,7 +304,7 @@ def infer(use_cuda, save_dirname=None): }, fetch_list=fetch_targets, return_numpy=False) - print("inferred score: ", np.array(results[0])) + print(("inferred score: ", np.array(results[0]))) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py index 46728262415..25181d83095 100644 --- a/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py +++ b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py @@ -175,15 +175,15 @@ def train(use_cuda, save_dirname=None): feeder = fluid.DataFeeder(feed_list, place) batch_id = 0 - for pass_id in xrange(2): + for pass_id in range(2): for data in train_data(): outs = exe.run(framework.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) + print(('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + + " avg_cost=" + str(avg_cost_val))) if math.isnan(float(avg_cost_val[0])): sys.exit("got NaN loss, training failed.") if batch_id > 3: @@ -241,10 +241,10 @@ def infer(use_cuda, save_dirname=None): }, fetch_list=fetch_targets, return_numpy=False) - print(results[0].recursive_sequence_lengths()) + print((results[0].recursive_sequence_lengths())) np_data = np.array(results[0]) - print("Inference shape: ", np_data.shape) - print("Inference results: ", np_data) + print(("Inference shape: ", np_data.shape)) + print(("Inference results: ", np_data)) def main(use_cuda): diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 3b957508ca1..61838440b0e 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -85,9 +85,11 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): pd = fluid.layers.ParallelDo(places) with pd.do(): avg_cost, predict_word = __network__( - map(pd.read_input, [ - first_word, second_word, third_word, forth_word, next_word - ])) + list( + map(pd.read_input, [ + first_word, second_word, third_word, forth_word, + next_word + ]))) pd.write_output(avg_cost) avg_cost = fluid.layers.mean(pd()) @@ -202,9 +204,9 @@ def infer(use_cuda, save_dirname=None): }, fetch_list=fetch_targets, return_numpy=False) - print(results[0].recursive_sequence_lengths()) + print((results[0].recursive_sequence_lengths())) np_data = np.array(results[0]) - print("Inference Shape: ", np_data.shape) + print(("Inference Shape: ", np_data.shape)) def main(use_cuda, is_sparse, is_parallel): diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index bec9f8594ff..e425da1f9d7 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -78,7 +78,7 @@ for pass_id in range(PASS_NUM): if avg_loss_value[0] < 10.0: exit(0) # if avg cost less than 10.0, we think our code is good. - print avg_loss_value[0] + print((avg_loss_value[0])) if math.isnan(float(avg_loss_value)): sys.exit("got NaN loss, training failed.") exit(1) diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index dfebb9a06ea..16e5c5f322a 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import sys import paddle @@ -157,8 +155,8 @@ for pass_id in range(PASS_NUM): fetch_list=[avg_cost, batch_acc, batch_size]) accuracy.add(value=acc, weight=weight) pass_acc = accuracy.eval() - print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( - pass_acc)) + print(("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + + str(pass_acc))) # this model is slow, so if we can train two mini batch, we think it works properly. if i > 0: exit(0) diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index fa696acdfa9..f290dd3e6de 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -118,14 +118,14 @@ def main(): feeder = fluid.DataFeeder(feed_list, place) batch_id = 0 - for pass_id in xrange(10): + for pass_id in range(10): for data in train_data(): outs = exe.run(fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) + print(('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + + " avg_cost=" + str(avg_cost_val))) if batch_id > 2: exit(0) if math.isnan(float(avg_cost_val)): diff --git a/python/paddle/fluid/tests/demo/fc_gan.py b/python/paddle/fluid/tests/demo/fc_gan.py index 8ea1b2b15cc..0bc5a470296 100644 --- a/python/paddle/fluid/tests/demo/fc_gan.py +++ b/python/paddle/fluid/tests/demo/fc_gan.py @@ -137,7 +137,7 @@ def main(): generated_img = exe.run(g_program, feed={'noise': n}, fetch_list={g_img})[0] - real_data = numpy.array(map(lambda x: x[0], data)).astype('float32') + real_data = numpy.array([x[0] for x in data]).astype('float32') real_data = real_data.reshape(num_true, 784) total_data = numpy.concatenate([real_data, generated_img]) total_label = numpy.concatenate([ @@ -150,7 +150,7 @@ def main(): feed={'img': total_data, 'label': total_label}, fetch_list={d_loss})[0] - for _ in xrange(NUM_TRAIN_TIMES_OF_DG): + for _ in range(NUM_TRAIN_TIMES_OF_DG): n = numpy.random.uniform( low=-1.0, high=1.0, size=[2 * num_true * NOISE_SIZE]).astype('float32').reshape( @@ -158,8 +158,8 @@ def main(): dg_loss_np = exe.run(dg_program, feed={'noise': n}, fetch_list={dg_loss})[0] - print("Pass ID={0}, Batch ID={1}, D-Loss={2}, DG-Loss={3}".format( - pass_id, batch_id, d_loss_np, dg_loss_np)) + print(("Pass ID={0}, Batch ID={1}, D-Loss={2}, DG-Loss={3}".format( + pass_id, batch_id, d_loss_np, dg_loss_np))) # generate image each batch fig = plot(generated_img) plt.savefig( diff --git a/python/paddle/fluid/tests/demo/file_reader/convert_data_to_recordio.py b/python/paddle/fluid/tests/demo/file_reader/convert_data_to_recordio.py index b839e148898..a00325d79be 100644 --- a/python/paddle/fluid/tests/demo/file_reader/convert_data_to_recordio.py +++ b/python/paddle/fluid/tests/demo/file_reader/convert_data_to_recordio.py @@ -36,7 +36,7 @@ if len(sys.argv) == 1: else: word_dict = load_vocab(sys.argv[1]) word_dict[""] = len(word_dict) -print "Dict dim = ", len(word_dict) +print("Dict dim = ", len(word_dict)) # input text data data = fluid.layers.data(name="words", shape=[1], dtype="int64", lod_level=1) diff --git a/python/paddle/fluid/tests/no_test_concurrency.py b/python/paddle/fluid/tests/no_test_concurrency.py index e8f6cfb4a90..3bc0c9808e2 100644 --- a/python/paddle/fluid/tests/no_test_concurrency.py +++ b/python/paddle/fluid/tests/no_test_concurrency.py @@ -194,7 +194,7 @@ class TestRoutineOp(unittest.TestCase): quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) with fluid.Go(): - for i in xrange(10): + for i in range(10): fluid.channel_recv(ch1, result) Print(result) diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 2d70c986b1b..3e0dffc1e97 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.framework import Program, program_guard @@ -47,7 +46,7 @@ class TestDetection(unittest.TestCase): scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv) self.assertIsNotNone(out) self.assertEqual(out.shape[-1], 6) - print(str(program)) + print((str(program))) def test_detection_api(self): program = Program() @@ -82,7 +81,7 @@ class TestDetection(unittest.TestCase): self.assertIsNotNone(trg) self.assertIsNotNone(trg_weight) - print(str(program)) + print((str(program))) def test_ssd_loss(self): program = Program() @@ -106,7 +105,7 @@ class TestDetection(unittest.TestCase): loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) self.assertIsNotNone(loss) self.assertEqual(loss.shape[-1], 1) - print(str(program)) + print((str(program))) class TestPriorBox(unittest.TestCase): @@ -197,7 +196,7 @@ class TestDetectionMAP(unittest.TestCase): map_out = layers.detection_map(detect_res, label, 21) self.assertIsNotNone(map_out) self.assertEqual(map_out.shape, (1, )) - print(str(program)) + print((str(program))) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py index 3dc858971c5..e8edd7fbbb3 100644 --- a/python/paddle/fluid/tests/test_error_clip.py +++ b/python/paddle/fluid/tests/test_error_clip.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import numpy as np import paddle import paddle.fluid as fluid diff --git a/python/paddle/fluid/tests/test_if_else_op.py b/python/paddle/fluid/tests/test_if_else_op.py index 799c31dfe51..a3495cc6d47 100644 --- a/python/paddle/fluid/tests/test_if_else_op.py +++ b/python/paddle/fluid/tests/test_if_else_op.py @@ -76,15 +76,15 @@ class TestMNISTIfElseOp(unittest.TestCase): PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): - x_data = np.array(map(lambda x: x[0], data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") + x_data = np.array([x[0] for x in data]).astype("float32") + y_data = np.array([x[1] for x in data]).astype("int64") y_data = np.expand_dims(y_data, axis=1) outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) - print outs[0] + print((outs[0])) if outs[0] < 1.0: return self.assertFalse(True) @@ -131,15 +131,15 @@ class TestMNISTIfElseOp(unittest.TestCase): PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): - x_data = np.array(map(lambda x: x[0], data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") + x_data = np.array([x[0] for x in data]).astype("float32") + y_data = np.array([x[1] for x in data]).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) - print outs[0] + print((outs[0])) if outs[0] < 1.0: return self.assertFalse(True) diff --git a/python/paddle/fluid/tests/unittests/benchmark.py b/python/paddle/fluid/tests/unittests/benchmark.py index e891ee932f1..aeaa1751d78 100644 --- a/python/paddle/fluid/tests/unittests/benchmark.py +++ b/python/paddle/fluid/tests/unittests/benchmark.py @@ -20,7 +20,7 @@ import itertools import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.op import Operator -from op_test import OpTest +from .op_test import OpTest class BenchmarkSuite(OpTest): @@ -40,8 +40,7 @@ class BenchmarkSuite(OpTest): expect_t = np.array(item_cpu_out) actual = item_gpu_out actual_t = np.array(item_gpu_out) - var_name = variable if isinstance(variable, - basestring) else variable.name + var_name = variable if isinstance(variable, str) else variable.name self.assertTrue( np.allclose( actual_t, expect_t, atol=atol), @@ -53,7 +52,7 @@ class BenchmarkSuite(OpTest): def _get_input_names(self): inputs = [] - for name, value in self.inputs.iteritems(): + for name, value in list(self.inputs.items()): if isinstance(value, list): inputs.extend([sub_name for sub_name, _ in value]) inputs.append(name) @@ -61,7 +60,7 @@ class BenchmarkSuite(OpTest): def _get_output_names(self): outputs = [] - for var_name, var in self.outputs.iteritems(): + for var_name, var in list(self.outputs.items()): if isinstance(var, list): for sub_var_name, sub_var in var: outputs.append(sub_var_name) @@ -89,8 +88,8 @@ class BenchmarkSuite(OpTest): for place in places: elapses.append(self.timeit_output_with_place(place, iters)) for place, elapse in zip(places, elapses): - print("One pass of ({2}_op) at {0} cost {1}".format( - str(place), elapse, self.op_type)) + print(("One pass of ({2}_op) at {0} cost {1}".format( + str(place), elapse, self.op_type))) def timeit_grad_with_place(self, place, iters=100): inputs_to_check = self._get_input_names() @@ -109,5 +108,5 @@ class BenchmarkSuite(OpTest): for place in places: elapses.append(self.timeit_grad_with_place(place, iters)) for place, elapse in zip(places, elapses): - print("One pass of ({2}_grad_op) at {0} cost {1}".format( - str(place), elapse, self.op_type)) + print(("One pass of ({2}_grad_op) at {0} cost {1}".format( + str(place), elapse, self.op_type))) diff --git a/python/paddle/fluid/tests/unittests/benchmark_sum_op.py b/python/paddle/fluid/tests/unittests/benchmark_sum_op.py index 91a5f1bca44..b222320d7c3 100644 --- a/python/paddle/fluid/tests/unittests/benchmark_sum_op.py +++ b/python/paddle/fluid/tests/unittests/benchmark_sum_op.py @@ -16,8 +16,8 @@ import unittest import numpy as np import paddle.fluid as fluid -from benchmark import BenchmarkSuite -from op_test import OpTest +from .benchmark import BenchmarkSuite +from .op_test import OpTest # This is a demo op test case for operator benchmarking and high resolution number stability alignment. diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index fcf86cc5839..831331589b3 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -91,7 +91,7 @@ class TestParallelExecutorBase(unittest.TestCase): first_loss, = run_executor( exe=exe, feed=feed_dict, fetch_list=[loss.name]) - for i in xrange(iter): + for i in range(iter): run_executor(exe=exe, feed=feed_dict, fetch_list=[]) last_loss, = run_executor( @@ -99,8 +99,8 @@ class TestParallelExecutorBase(unittest.TestCase): end = time.time() if batch_size is not None: - print "%.4f Instance per second" % ( - (batch_size * iter + 2) / (end - begin)) + print(("%.4f Instance per second" % ( + (batch_size * iter + 2) / (end - begin)))) avg_last_loss_val = np.array(last_loss).mean() avg_first_loss_val = np.array(first_loss).mean() @@ -108,6 +108,6 @@ class TestParallelExecutorBase(unittest.TestCase): float(avg_first_loss_val)): sys.exit("got NaN loss, training failed.") - print first_loss, last_loss + print((first_loss, last_loss)) # self.assertGreater(first_loss[0], last_loss[0]) return first_loss, last_loss diff --git a/python/paddle/fluid/tests/unittests/testsuite.py b/python/paddle/fluid/tests/unittests/testsuite.py index 55c6e54906e..bfe186d1879 100644 --- a/python/paddle/fluid/tests/unittests/testsuite.py +++ b/python/paddle/fluid/tests/unittests/testsuite.py @@ -142,7 +142,7 @@ def append_input_output(block, op_proto, np_list, is_input, dtype): def append_loss_ops(block, output_names): - mean_inputs = map(block.var, output_names) + mean_inputs = list(map(block.var, output_names)) # for item in mean_inputs: # print(item) # print("Item", item.dtype) diff --git a/python/paddle/fluid/tests/unittests/transformer_model.py b/python/paddle/fluid/tests/unittests/transformer_model.py index c62792face3..53f8cdfd003 100644 --- a/python/paddle/fluid/tests/unittests/transformer_model.py +++ b/python/paddle/fluid/tests/unittests/transformer_model.py @@ -118,8 +118,9 @@ def multi_head_attention(queries, # FIXME(guosheng): Decouple the program desc with batch_size. return layers.reshape( x=trans_x, - shape=map(int, - [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]])) + shape=list( + map(int, [batch_size, -1, trans_x.shape[2] * trans_x.shape[3] + ]))) def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate): """ diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index 64049a93cb0..eed9b49ef40 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -18,16 +18,15 @@ import errno import shutil import time -import core - -import data_feeder -import executor -import framework -import io +from . import core +from . import data_feeder +from . import executor +from . import framework +from . import io # optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module -import optimizer as opt_module -import parallel_executor -from transpiler import distribute_transpiler +from . import optimizer as opt_module +from . import parallel_executor +from .transpiler import distribute_transpiler __all__ = [ 'Trainer', 'BeginEpochEvent', 'EndEpochEvent', 'BeginStepEvent', @@ -73,7 +72,7 @@ class BeginStepEvent(object): self.step = step_id self.fetch_metrics = True """ - If fetch_metrics is true, the metrics will be fetched at the + If fetch_metrics is true, the metrics will be fetched at the EndStepEvent. Default is True. """ @@ -614,11 +613,12 @@ def build_feed_var_list(program, feed_order): if not isinstance(feed_order, dict): raise TypeError( "The 'feed_order' should be either None, list or dict.") - if not sorted(feed_order.values()) == range(len(feed_order)): + if not sorted(feed_order.values()) == list(range(len(feed_order))): raise ValueError( "The values of 'feed_order' should be a permutation of [0, len(feed_order))" ) - sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1]) + sorted_pair_list = sorted( + list(feed_order.items()), key=lambda item: item[1]) feed_var_list = [ program.global_block().var(pair[0]) for pair in sorted_pair_list ] @@ -644,14 +644,14 @@ def save_checkpoint(executor, pserver_endpoints=None): """ This function filters out all checkpoint variables from the give - main_program and then saves these variables to the `checkpoint_dir` + main_program and then saves these variables to the `checkpoint_dir` directory. In the training precess, we generally save a checkpoint in each - iteration. So there might be a lot of checkpoints in the - `checkpoint_dir`. To avoid them taking too much disk space, the - `max_num_checkpoints` are introduced to limit the total number of - checkpoints. If the number of existing checkpints is greater than + iteration. So there might be a lot of checkpoints in the + `checkpoint_dir`. To avoid them taking too much disk space, the + `max_num_checkpoints` are introduced to limit the total number of + checkpoints. If the number of existing checkpints is greater than the `max_num_checkpoints`, oldest ones will be scroll deleted. A variable is a checkpoint variable and will be saved if it meets @@ -663,21 +663,21 @@ def save_checkpoint(executor, Args: executor(Executor): The executor to run for save checkpoint. checkpoint_dir(str): The folder where to save checkpoints. - trainer_id(int): currect trainer id, if id is equal to 0, the trainer + trainer_id(int): currect trainer id, if id is equal to 0, the trainer is chief. - trainer_args(dict|None): Current training arguments. Such as 'epoch_id' + trainer_args(dict|None): Current training arguments. Such as 'epoch_id' and 'step_id'. Defaut: None main_program(Program): The program whose checkpoint variables will be saved. - max_num_checkpoints(int): The max number of total number of existing + max_num_checkpoints(int): The max number of total number of existing checkpoints. Default: 3 lookup_table(string|None): the lookup table name, when use distribute lookup table, we can get lookup table name by DistributeTranspiler. - table_name - pserver_endpoints(list|None): the parameter server ip:port list. - when use distribute lookup table, we can get pserver_endpoints by + table_name + pserver_endpoints(list|None): the parameter server ip:port list. + when use distribute lookup table, we can get pserver_endpoints by distribute arguments. Returns: @@ -747,8 +747,8 @@ def load_checkpoint(executor, `checkpoint_dir` directory. In the training precess, we generally save a checkpoint in each - iteration. So there are more than one checkpoint in the - `checkpoint_dir` (each checkpoint has its own sub folder), use + iteration. So there are more than one checkpoint in the + `checkpoint_dir` (each checkpoint has its own sub folder), use `serial` to specify which serial of checkpoint you would like to load. @@ -819,9 +819,9 @@ def load_checkpoint(executor, def clean_checkpoint(checkpoint_dir, delete_dir=False): """ - clean the checkpoint dir, when the train exits normally, + clean the checkpoint dir, when the train exits normally, the trainer will call clean_checkpoint to delete checkpoint directory saved before. - delete_dir only works when the directory is empty, otherwise, OSError is raised. + delete_dir only works when the directory is empty, otherwise, OSError is raised. : param checkpoint_dir : param delete_dir @@ -889,7 +889,7 @@ def _load_persist_vars_without_grad(executor, def _load_lookup_table_vars(executor, dirname, program, pserver_id, table_name): """ - The parameter server will load lookup table's local file in + The parameter server will load lookup table's local file in selectedrows variable. Args: @@ -940,7 +940,7 @@ def _load_lookup_table_vars(executor, dirname, program, pserver_id, table_name): def _save_persist_vars_without_grad(executor, dirname, program): """ This function filters out all checkpoint variables from the give - program and then save these variables to a sub-folder '__model__' of + program and then save these variables to a sub-folder '__model__' of the given directory. A variable is a checkpoint variable if it meets all following @@ -969,7 +969,7 @@ def _save_persist_vars_without_grad(executor, dirname, program): # In this example, `_save_persist_vars_without_grad` function # will first filters out all checkpoint variables in the default - # main program, and then saves these variables to the folder + # main program, and then saves these variables to the folder # "./my_paddle_model/__model__". """ cur_dir = _get_model_dir(dirname) @@ -988,7 +988,7 @@ def _save_pserver_vars_by_notify(executor, dirname, lookup_table, """ This function will send checkpoint notify message from Trainer 0 to all the pservers. - The checkpoint notify message contains lookup table name, + The checkpoint notify message contains lookup table name, the absolute path on pserver to save lookup_table. Args: @@ -996,13 +996,13 @@ def _save_pserver_vars_by_notify(executor, dirname, lookup_table, dirname(str): The folder where to save checkpoints. lookup_table(string): the lookup table name, when use distribute lookup table, we can get lookup table name by DistributeTranspiler. - table_name - ps_endpoint_list(list): the parameter server ip:port list. - when use distribute lookup table, we can get ps_endpoint_list by + table_name + ps_endpoint_list(list): the parameter server ip:port list. + when use distribute lookup table, we can get ps_endpoint_list by distribute arguments. Return: None - + Examples: .. code-block:: python @@ -1013,7 +1013,7 @@ def _save_pserver_vars_by_notify(executor, dirname, lookup_table, ps_endpoints = ["127.0.0.1:6000","127.0.0.1:6001"] _save_pserver_vars_by_notify(executor=exe, - dirname=param_path, lookup_table=table_name, + dirname=param_path, lookup_table=table_name, ps_endpoint_list=ps_endpoints) """ cur_dir = _get_lookuptable_dir(dirname) @@ -1036,7 +1036,7 @@ def _save_trainer_args(dirname, trainer_id, trainer_args): cur_dir = _get_trainer_dir(dirname, trainer_id) - for name, value in trainer_args.iteritems(): + for name, value in list(trainer_args.items()): args_file = os.path.join(cur_dir, name) with open(args_file, 'w') as f: f.write(str(value)) @@ -1045,7 +1045,7 @@ def _save_trainer_args(dirname, trainer_id, trainer_args): def _load_trainer_args(checkpoint_dir, serial, trainer_id, trainer_args): """ - trainer will load some args from it's independent directory, + trainer will load some args from it's independent directory, such as epoch_id and step_id. Args: @@ -1168,10 +1168,10 @@ def _scroll_delete(dirname, max_num_checkpoints=3): serial_num = _get_dir_serial(serial) serial_map[serial_num] = serial - if len(serial_map.keys()) <= max_num_checkpoints: + if len(list(serial_map.keys())) <= max_num_checkpoints: return - serials = serial_map.keys() + serials = list(serial_map.keys()) serials.sort(reverse=True) serials = serials[max_num_checkpoints:] for serial in serials: diff --git a/python/paddle/fluid/transpiler/__init__.py b/python/paddle/fluid/transpiler/__init__.py index eae13b50398..a8622ad5443 100644 --- a/python/paddle/fluid/transpiler/__init__.py +++ b/python/paddle/fluid/transpiler/__init__.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from distribute_transpiler import DistributeTranspiler, DistributeTranspilerConfig -from inference_transpiler import InferenceTranspiler -from memory_optimization_transpiler import memory_optimize, release_memory -from ps_dispatcher import HashName, RoundRobin +from .distribute_transpiler import DistributeTranspiler, DistributeTranspilerConfig +from .inference_transpiler import InferenceTranspiler +from .memory_optimization_transpiler import memory_optimize, release_memory +from .ps_dispatcher import HashName, RoundRobin __all__ = [ "DistributeTranspiler", "InferenceTranspiler", "memory_optimize", diff --git a/python/paddle/fluid/transpiler/details/__init__.py b/python/paddle/fluid/transpiler/details/__init__.py index dc597c33849..1bfab1f219f 100644 --- a/python/paddle/fluid/transpiler/details/__init__.py +++ b/python/paddle/fluid/transpiler/details/__init__.py @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -from program_utils import * -from ufind import * +from .program_utils import * +from .ufind import * diff --git a/python/paddle/fluid/transpiler/details/program_utils.py b/python/paddle/fluid/transpiler/details/program_utils.py index 2ca1d4716b1..76d10777f5f 100644 --- a/python/paddle/fluid/transpiler/details/program_utils.py +++ b/python/paddle/fluid/transpiler/details/program_utils.py @@ -17,8 +17,8 @@ def delete_ops(block, ops): try: start = list(block.ops).index(ops[0]) end = list(block.ops).index(ops[-1]) - [block._remove_op(start) for _ in xrange(end - start + 1)] - except Exception, e: + [block._remove_op(start) for _ in range(end - start + 1)] + except Exception as e: raise e block.program._sync_with_cpp() diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 4a9ea6af747..75c403163d6 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -28,18 +28,17 @@ Steps to transpile pserver: 5. add listen_and_serv op """ -from __future__ import print_function - import math import random import numpy as np -from ps_dispatcher import RoundRobin, HashName, PSDispatcher +from .ps_dispatcher import RoundRobin, HashName, PSDispatcher from .. import core, framework from ..framework import Program, default_main_program, \ default_startup_program, Block, \ Parameter, grad_var_name -from details import * +from .details import * +from functools import reduce LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" @@ -102,7 +101,7 @@ def slice_variable(var_list, slice_count, min_block_size): block_size += dim1 - remains # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) - for block_id in xrange(split_count): + for block_id in range(split_count): curr_block_size = min(block_size, var_numel - ( (block_id) * block_size)) block = VarBlock(var.name, block_id, curr_block_size) @@ -117,7 +116,7 @@ class DistributeTranspilerConfig(object): try to choose the best method to balance loads for pservers. min_block_size (int): Minimum splitted element number in block. According:https://github.com/PaddlePaddle/Paddle/issues/8638#issuecomment-369912156 - We can use bandwidth effiently when data size is larger than 2MB.If you + We can use bandwidth effiently when data size is larger than 2MB.If you want to change it, please be sure you see the slice_variable function. """ @@ -218,7 +217,7 @@ class DistributeTranspiler(object): # fc_w@GRAD_trainer_0, fc_w@GRAD_trainer_1 --> pserver1 # fc_b@GRAD_trainer_0, fc_b@GRAD_trainer_1 --> pserver2 # shuffle the map will avoid the uneven distribution above - grad_var_mapping_items = self.grad_var_mapping.items() + grad_var_mapping_items = list(self.grad_var_mapping.items()) if not self.config.slice_var_up: random.seed(self.trainer_num) random.shuffle(grad_var_mapping_items) @@ -278,7 +277,7 @@ class DistributeTranspiler(object): self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) # step4: Concat the parameters splits together after recv. - for varname, splited_var in self.param_var_mapping.iteritems(): + for varname, splited_var in list(self.param_var_mapping.items()): eps = [] for var in splited_var: index = [v.name for v in recv_vars].index(var.name) @@ -302,7 +301,7 @@ class DistributeTranspiler(object): RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) - for varname, splited_var in self.param_var_mapping.iteritems(): + for varname, splited_var in list(self.param_var_mapping.items()): if len(splited_var) <= 1: continue orig_param = program.global_block().vars[varname] @@ -371,7 +370,7 @@ class DistributeTranspiler(object): dtype=v.dtype, shape=v.shape) if self.sync_mode and self.trainer_num > 1: - for trainer_id in xrange(self.trainer_num): + for trainer_id in range(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), persistable=False, @@ -461,7 +460,7 @@ class DistributeTranspiler(object): per_opt_block = pserver_program.create_block(pre_block_idx) optimize_blocks.append(per_opt_block) # append grad merging ops before clip and weight decay - # cases may like: + # cases may like: # L2Decay op -> clip op -> optimize for _, op in enumerate(self.optimize_ops): # find the origin @GRAD var before clipping @@ -556,7 +555,7 @@ class DistributeTranspiler(object): # 1. create vars in pserver program to startup program pserver_vars = pserver_program.global_block().vars created_var_map = dict() - for _, var in pserver_vars.iteritems(): + for _, var in list(pserver_vars.items()): tmpvar = s_prog.global_block()._clone_variable(var) created_var_map[var.name] = tmpvar @@ -989,11 +988,11 @@ class DistributeTranspiler(object): var_mapping = dict() for block_str in block_list: varname, offset, size = block_str.split(":") - if not block_map.has_key(varname): + if varname not in block_map: block_map[varname] = [] - block_map[varname].append((long(offset), long(size))) + block_map[varname].append((int(offset), int(size))) - for varname, splited in block_map.iteritems(): + for varname, splited in list(block_map.items()): orig_var = program.global_block().var(varname) if len(splited) == 1: if self.sync_mode and add_trainer_suffix: @@ -1156,7 +1155,7 @@ class DistributeTranspiler(object): grad_to_block_id.append(merged_var.name + ":" + str(optimize_block.idx)) if self.sync_mode and self.trainer_num > 1: vars2merge = [] - for i in xrange(self.trainer_num): + for i in range(self.trainer_num): per_trainer_name = "%s.trainer_%d" % \ (merged_var_name, i) vars2merge.append(pserver_block.vars[per_trainer_name]) @@ -1204,7 +1203,7 @@ class DistributeTranspiler(object): # learning rate variable has already be created by non-optimize op, # don't create it once again. lr_varname = opt_op.input(key)[0] - if pserver_block.vars.has_key(lr_varname): + if lr_varname in pserver_block.vars: new_inputs[key] = pserver_block.vars[opt_op.input(key)[0]] else: origin_var = origin_program.global_block().vars[lr_varname] @@ -1244,7 +1243,7 @@ class DistributeTranspiler(object): def _is_splited_grad_var(self, var, var_dict): grad_block = None - for _, g in var_dict.iteritems(): + for _, g in list(var_dict.items()): if self._orig_varname(g.name) == self._orig_varname(var.name): if g.name.find(".trainer_") == -1: grad_block = g @@ -1254,7 +1253,7 @@ class DistributeTranspiler(object): def _clone_lr_op(self, program, block, op): inputs = self._get_input_map_from_op( self.origin_program.global_block().vars, op) - for key, varlist in inputs.iteritems(): + for key, varlist in list(inputs.items()): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: @@ -1263,7 +1262,7 @@ class DistributeTranspiler(object): outputs = self._get_output_map_from_op( self.origin_program.global_block().vars, op) - for key, varlist in outputs.iteritems(): + for key, varlist in list(outputs.items()): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: @@ -1278,7 +1277,7 @@ class DistributeTranspiler(object): # Append the ops for parameters that do not need to be optimized/updated inputs = self._get_input_map_from_op( self.origin_program.global_block().vars, opt_op) - for key, varlist in inputs.iteritems(): + for key, varlist in list(inputs.items()): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: @@ -1288,7 +1287,7 @@ class DistributeTranspiler(object): var, program.global_block().vars) if grad_block: inputs[key] = grad_block - elif not program.global_block().vars.has_key(var.name): + elif var.name not in program.global_block().vars: program.global_block().create_var( name=var.name, persistable=var.persistable, @@ -1297,7 +1296,7 @@ class DistributeTranspiler(object): outputs = self._get_output_map_from_op( self.origin_program.global_block().vars, opt_op) - for key, varlist in outputs.iteritems(): + for key, varlist in list(outputs.items()): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: @@ -1305,7 +1304,7 @@ class DistributeTranspiler(object): var, program.global_block().vars) if grad_block: outputs[key] = grad_block - elif not program.global_block().vars.has_key(var.name): + elif var.name not in program.global_block().vars: program.global_block()._clone_variable(var) return optimize_block.append_op( @@ -1326,8 +1325,8 @@ class DistributeTranspiler(object): def _create_ufind(self, optimize_ops): # Create a unit find data struct by optimize ops ufind = UnionFind(optimize_ops) - for i in xrange(len(optimize_ops)): - for j in xrange(i, len(optimize_ops)): + for i in range(len(optimize_ops)): + for j in range(i, len(optimize_ops)): op1 = optimize_ops[i] op2 = optimize_ops[j] if self._is_op_connected(op1, op2): diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index f1905f08787..142fa5c31d2 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -305,6 +305,6 @@ class InferenceTranspiler(object): args += current_op.output_arg_names args = list(set(args)) # unique the input and output arguments - for var in self.block.vars.keys(): + for var in list(self.block.vars.keys()): if var not in args: self.block._remove_var(var) diff --git a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py index 0ca5cf813b5..1c7bab8e1ed 100644 --- a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py +++ b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py @@ -16,6 +16,7 @@ from collections import defaultdict from .. import core from ..framework import Program, default_main_program, Parameter from ..backward import _rename_arg_ +from functools import reduce dtype_to_size = { core.VarDesc.VarType.FP16: 2, @@ -107,7 +108,7 @@ class ControlFlowGraph(object): # Repeatedly apply liveness updates until the algorithm stablize # on a complete set live input vars and live output vars. while True: - for i in reversed(range(self.op_size)): + for i in reversed(list(range(self.op_size))): live_in[i] = set(self._live_in[i]) live_out[i] = set(self._live_out[i]) for s in self._successors[i]: @@ -172,9 +173,10 @@ class ControlFlowGraph(object): is_forward = i < self._forward_num in_diff, out_diff = self._get_diff(self._live_in[i], self._live_out[i]) - can_optimize = filter( - lambda x: self._check_var_validity(block_desc, x, is_forward), - in_diff) + can_optimize = [ + x for x in in_diff + if self._check_var_validity(block_desc, x, is_forward) + ] if can_optimize: index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1 delete_op = block_desc._insert_op(index) @@ -213,9 +215,10 @@ class ControlFlowGraph(object): block_desc = op.block() is_forward = i < self._forward_num if self.pool: - defs_can_optimize = filter( - lambda x: self._check_var_validity(block_desc, x, is_forward), - self._defs[i]) + defs_can_optimize = [ + x for x in self._defs[i] + if self._check_var_validity(block_desc, x, is_forward) + ] out_pair = [ (x, self._find_var(block_desc, x, is_forward).shape()) for x in defs_can_optimize @@ -243,11 +246,11 @@ class ControlFlowGraph(object): continue if PRINT_LOG: - print(("Hit Cache !!!! cache pool index " - "is %d, var name is %s, " - "cached var name is %s, " - "var shape is %s ") % (index, x, cache_var, - str(cache_shape))) + print((("Hit Cache !!!! cache pool index " + "is %d, var name is %s, " + "cached var name is %s, " + "var shape is %s ") % (index, x, cache_var, + str(cache_shape)))) self.pool.pop(index) if x == cache_var: break @@ -261,9 +264,10 @@ class ControlFlowGraph(object): break in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i]) - can_optimize = filter( - lambda x: self._check_var_validity(block_desc, x, is_forward), - in_diff) + can_optimize = [ + x for x in in_diff + if self._check_var_validity(block_desc, x, is_forward) + ] if can_optimize: for var_name in can_optimize: self.pool.append((var_name, self._find_var( diff --git a/python/paddle/fluid/unique_name.py b/python/paddle/fluid/unique_name.py index 776619cd367..9b661746e9d 100644 --- a/python/paddle/fluid/unique_name.py +++ b/python/paddle/fluid/unique_name.py @@ -67,7 +67,7 @@ def switch(new_generator=None): @contextlib.contextmanager def guard(new_generator=None): - if isinstance(new_generator, basestring): + if isinstance(new_generator, str): new_generator = UniqueNameGenerator(new_generator) old = switch(new_generator) yield diff --git a/python/paddle/reader/creator.py b/python/paddle/reader/creator.py index 4c905d959fa..12c3afbcb05 100644 --- a/python/paddle/reader/creator.py +++ b/python/paddle/reader/creator.py @@ -67,10 +67,10 @@ def recordio(paths, buf_size=100): import recordio as rec import paddle.reader.decorator as dec - import cPickle as pickle + import pickle as pickle def reader(): - if isinstance(paths, basestring): + if isinstance(paths, str): path = paths else: path = ",".join(paths) diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index 4b1fe94222d..7faca28e400 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -21,6 +21,7 @@ from threading import Thread import subprocess from six.moves.queue import Queue +from six.moves import zip_longest import itertools import random import zlib @@ -42,7 +43,7 @@ def map_readers(func, *readers): rs = [] for r in readers: rs.append(r()) - for e in itertools.imap(func, *rs): + for e in map(func, *rs): yield e return reader @@ -148,16 +149,16 @@ def compose(*readers, **kwargs): for r in readers: rs.append(r()) if not check_alignment: - for outputs in itertools.izip(*rs): - yield sum(map(make_tuple, outputs), ()) + for outputs in zip(*rs): + yield sum(list(map(make_tuple, outputs)), ()) else: - for outputs in itertools.izip_longest(*rs): + for outputs in zip_longest(*rs): for o in outputs: if o is None: # None will be not be present if compose is aligned raise ComposeNotAligned( "outputs of readers are not aligned.") - yield sum(map(make_tuple, outputs), ()) + yield sum(list(map(make_tuple, outputs)), ()) return reader @@ -306,7 +307,7 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): args = (in_queue, out_queue, mapper, out_order) if order else ( in_queue, out_queue, mapper) workers = [] - for i in xrange(process_num): + for i in range(process_num): worker = Thread(target=target, args=args) worker.daemon = True workers.append(worker) diff --git a/python/paddle/reader/tests/decorator_test.py b/python/paddle/reader/tests/decorator_test.py index bee24d3b657..537df489b97 100644 --- a/python/paddle/reader/tests/decorator_test.py +++ b/python/paddle/reader/tests/decorator_test.py @@ -136,7 +136,7 @@ class TestXmap(unittest.TestCase): reader = paddle.reader.xmap_readers(mapper, reader_creator_10(0), tNum, size, order) - for n in xrange(3): + for n in range(3): result = [] for i in reader(): result.append(i) @@ -156,7 +156,7 @@ class TestPipeReader(unittest.TestCase): import tempfile - records = [str(i) for i in xrange(5)] + records = [str(i) for i in range(5)] temp = tempfile.NamedTemporaryFile() try: with open(temp.name, 'w') as f: diff --git a/tools/test_runner.py b/tools/test_runner.py index 9dc750b8905..2d6a3cf8a97 100644 --- a/tools/test_runner.py +++ b/tools/test_runner.py @@ -12,19 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import unittest import os import sys import paddle.fluid as fluid import importlib -import cStringIO +from six.moves import cStringIO def main(): sys.path.append(os.getcwd()) some_test_failed = False for module_name in sys.argv[1:]: - buffer = cStringIO.StringIO() + buffer = cStringIO() main = fluid.Program() startup = fluid.Program() scope = fluid.core.Scope() @@ -37,8 +38,11 @@ def main(): res = unittest.TextTestRunner(stream=buffer).run(tests) if not res.wasSuccessful(): some_test_failed = True - print >> sys.stderr, module_name, 'failed\n', buffer.getvalue( - ) + print( + module_name, + 'failed\n', + buffer.getvalue(), + file=sys.stderr) if some_test_failed: exit(1) -- GitLab