From 35e6abd7bb7ae6518dddf27b81515a4754bfc10b Mon Sep 17 00:00:00 2001 From: minqiyang Date: Thu, 26 Jul 2018 22:34:59 +0800 Subject: [PATCH] Change iter_parameters back and port unittests code to Python3 --- python/paddle/fluid/framework.py | 6 +- .../fluid/tests/test_beam_search_decoder.py | 8 +-- .../fluid/tests/unittests/dist_se_resnext.py | 4 +- .../paddle/fluid/tests/unittests/op_test.py | 24 ++++---- .../fluid/tests/unittests/test_accuracy_op.py | 4 +- .../unittests/test_activation_mkldnn_op.py | 4 +- .../tests/unittests/test_activation_op.py | 2 +- .../fluid/tests/unittests/test_adadelta_op.py | 2 +- .../fluid/tests/unittests/test_adagrad_op.py | 2 +- .../fluid/tests/unittests/test_adam_op.py | 6 +- .../fluid/tests/unittests/test_adamax_op.py | 2 +- .../unittests/test_anchor_generator_op.py | 2 +- .../tests/unittests/test_arg_min_max_op.py | 2 +- .../fluid/tests/unittests/test_argsort_op.py | 2 +- .../unittests/test_array_read_write_op.py | 5 +- .../fluid/tests/unittests/test_assign_op.py | 2 +- .../tests/unittests/test_assign_value_op.py | 2 +- .../fluid/tests/unittests/test_auc_op.py | 2 +- .../unittests/test_batch_norm_mkldnn_op.py | 4 +- .../tests/unittests/test_batch_norm_op.py | 4 +- .../tests/unittests/test_beam_search_op.py | 3 +- .../unittests/test_bilinear_interp_op.py | 2 +- .../test_bilinear_tensor_product_op.py | 2 +- .../unittests/test_bipartite_match_op.py | 4 +- .../tests/unittests/test_box_coder_op.py | 2 +- .../fluid/tests/unittests/test_cast_op.py | 2 +- .../tests/unittests/test_chunk_eval_op.py | 9 +-- .../tests/unittests/test_clip_by_norm_op.py | 2 +- .../fluid/tests/unittests/test_clip_op.py | 2 +- .../fluid/tests/unittests/test_compare_op.py | 2 +- .../fluid/tests/unittests/test_concat_op.py | 2 +- .../tests/unittests/test_conditional_block.py | 4 +- .../tests/unittests/test_conv2d_mkldnn_op.py | 2 +- .../fluid/tests/unittests/test_conv2d_op.py | 2 +- .../unittests/test_conv2d_transpose_op.py | 2 +- .../fluid/tests/unittests/test_conv3d_op.py | 2 +- .../unittests/test_conv3d_transpose_op.py | 2 +- .../tests/unittests/test_conv_shift_op.py | 6 +- .../fluid/tests/unittests/test_cos_sim_op.py | 2 +- .../unittests/test_create_op_doc_string.py | 2 +- .../tests/unittests/test_crf_decoding_op.py | 2 +- .../fluid/tests/unittests/test_crop_op.py | 2 +- .../tests/unittests/test_cross_entropy_op.py | 2 +- .../fluid/tests/unittests/test_ctc_align.py | 4 +- .../fluid/tests/unittests/test_cumsum_op.py | 2 +- .../tests/unittests/test_data_balance.py | 8 +-- .../unittests/test_decayed_adagrad_op.py | 2 +- .../unittests/test_default_scope_funcs.py | 2 +- .../tests/unittests/test_detection_map_op.py | 4 +- .../fluid/tests/unittests/test_dist_mnist.py | 5 +- .../tests/unittests/test_dist_transpiler.py | 6 +- .../tests/unittests/test_dist_word2vec.py | 4 +- .../fluid/tests/unittests/test_dropout_op.py | 2 +- .../fluid/tests/unittests/test_dyn_rnn.py | 2 +- .../unittests/test_dynrnn_gradient_check.py | 60 ++++++++++--------- .../unittests/test_dynrnn_static_input.py | 18 +++--- .../tests/unittests/test_edit_distance_op.py | 2 +- .../test_elementwise_add_mkldnn_op.py | 4 +- .../unittests/test_elementwise_add_op.py | 2 +- .../unittests/test_elementwise_div_op.py | 2 +- .../unittests/test_elementwise_gradient_op.py | 2 +- .../unittests/test_elementwise_max_op.py | 2 +- .../unittests/test_elementwise_min_op.py | 2 +- .../unittests/test_elementwise_mul_op.py | 2 +- .../unittests/test_elementwise_pow_op.py | 2 +- .../unittests/test_elementwise_sub_op.py | 2 +- .../fluid/tests/unittests/test_expand_op.py | 2 +- .../unittests/test_fake_dequantize_op.py | 2 +- .../tests/unittests/test_fake_quantize_op.py | 2 +- .../tests/unittests/test_fc_mkldnn_op.py | 2 +- .../fluid/tests/unittests/test_fetch_var.py | 2 +- .../test_fill_constant_batch_size_like_op.py | 2 +- .../tests/unittests/test_fill_constant_op.py | 2 +- .../fluid/tests/unittests/test_fill_op.py | 2 +- .../unittests/test_fill_zeros_like_op.py | 2 +- .../fluid/tests/unittests/test_ftrl_op.py | 2 +- .../fluid/tests/unittests/test_gather_op.py | 2 +- ...test_gaussian_random_batch_size_like_op.py | 2 +- .../test_gaussian_random_mkldnn_op.py | 2 +- .../tests/unittests/test_get_places_op.py | 2 +- .../fluid/tests/unittests/test_gru_op.py | 13 ++-- .../fluid/tests/unittests/test_gru_unit_op.py | 4 +- .../tests/unittests/test_hinge_loss_op.py | 2 +- .../fluid/tests/unittests/test_hsigmoid_op.py | 2 +- .../tests/unittests/test_huber_loss_op.py | 2 +- .../tests/unittests/test_im2sequence_op.py | 2 +- .../test_image_classification_layer.py | 6 +- .../unittests/test_inference_model_io.py | 2 +- .../tests/unittests/test_iou_similarity_op.py | 2 +- .../fluid/tests/unittests/test_is_empty_op.py | 2 +- .../fluid/tests/unittests/test_l1_norm_op.py | 2 +- .../tests/unittests/test_label_smooth_op.py | 2 +- .../tests/unittests/test_layer_norm_op.py | 1 + .../fluid/tests/unittests/test_layers.py | 6 +- .../unittests/test_linear_chain_crf_op.py | 2 +- .../unittests/test_listen_and_serv_op.py | 2 +- .../tests/unittests/test_lod_rank_table.py | 2 +- .../tests/unittests/test_lod_reset_op.py | 2 +- .../tests/unittests/test_lod_tensor_array.py | 4 +- .../unittests/test_lod_tensor_array_ops.py | 16 +++-- .../fluid/tests/unittests/test_log_loss_op.py | 2 +- .../fluid/tests/unittests/test_logical_op.py | 2 +- .../unittests/test_lookup_sparse_table_op.py | 2 +- .../tests/unittests/test_lookup_table_op.py | 4 +- .../tests/unittests/test_lrn_mkldnn_op.py | 2 +- .../fluid/tests/unittests/test_lrn_op.py | 2 +- .../fluid/tests/unittests/test_lstm_op.py | 2 +- .../tests/unittests/test_lstm_unit_op.py | 2 +- .../fluid/tests/unittests/test_lstmp_op.py | 2 +- .../unittests/test_margin_rank_loss_op.py | 2 +- .../tests/unittests/test_math_op_patch.py | 2 +- .../fluid/tests/unittests/test_matmul_op.py | 2 +- .../fluid/tests/unittests/test_maxout_op.py | 2 +- .../fluid/tests/unittests/test_mean_iou.py | 4 +- .../fluid/tests/unittests/test_mean_op.py | 2 +- .../tests/unittests/test_merge_ids_op.py | 2 +- .../unittests/test_mine_hard_examples_op.py | 2 +- .../fluid/tests/unittests/test_minus_op.py | 2 +- .../unittests/test_modified_huber_loss_op.py | 2 +- .../fluid/tests/unittests/test_momentum_op.py | 2 +- .../fluid/tests/unittests/test_mul_op.py | 2 +- .../tests/unittests/test_multiclass_nms_op.py | 6 +- .../tests/unittests/test_multiplex_op.py | 2 +- .../paddle/fluid/tests/unittests/test_nce.py | 4 +- .../fluid/tests/unittests/test_norm_op.py | 2 +- .../fluid/tests/unittests/test_one_hot_op.py | 12 ++-- .../fluid/tests/unittests/test_pad_op.py | 2 +- .../unittests/test_parallel_executor_crf.py | 6 +- .../test_parallel_executor_fetch_feed.py | 6 +- .../unittests/test_parallel_executor_mnist.py | 14 ++--- .../test_parallel_executor_seresnext.py | 6 +- ...test_parallel_executor_test_while_train.py | 4 +- .../test_parallel_executor_transformer.py | 4 +- .../fluid/tests/unittests/test_parallel_op.py | 4 +- .../unittests/test_polygon_box_transform.py | 6 +- .../tests/unittests/test_pool2d_mkldnn_op.py | 2 +- .../fluid/tests/unittests/test_pool2d_op.py | 10 ++-- .../fluid/tests/unittests/test_pool3d_op.py | 14 ++--- .../fluid/tests/unittests/test_pool_max_op.py | 20 +++---- .../test_positive_negative_pair_op.py | 4 +- .../unittests/test_precision_recall_op.py | 22 +++---- .../fluid/tests/unittests/test_prelu_op.py | 2 +- .../tests/unittests/test_prior_box_op.py | 2 +- .../tests/unittests/test_protobuf_descs.py | 4 +- .../unittests/test_proximal_adagrad_op.py | 2 +- .../tests/unittests/test_proximal_gd_op.py | 2 +- .../tests/unittests/test_random_crop_op.py | 2 +- .../tests/unittests/test_rank_loss_op.py | 2 +- .../tests/unittests/test_reader_reset.py | 2 +- .../tests/unittests/test_recurrent_op.py | 8 +-- .../fluid/tests/unittests/test_reduce_op.py | 2 +- .../fluid/tests/unittests/test_registry.py | 2 +- .../fluid/tests/unittests/test_reshape_op.py | 2 +- .../fluid/tests/unittests/test_reverse_op.py | 2 +- .../fluid/tests/unittests/test_rmsprop_op.py | 2 +- .../fluid/tests/unittests/test_roi_pool_op.py | 2 +- .../fluid/tests/unittests/test_row_conv_op.py | 2 +- .../unittests/test_rpn_target_assign_op.py | 2 +- .../fluid/tests/unittests/test_scale_op.py | 2 +- .../fluid/tests/unittests/test_scatter_op.py | 2 +- .../tests/unittests/test_seq_concat_op.py | 2 +- .../fluid/tests/unittests/test_seq_conv.py | 8 +-- .../fluid/tests/unittests/test_seq_pool.py | 2 +- .../tests/unittests/test_sequence_erase_op.py | 2 +- .../tests/unittests/test_sequence_expand.py | 6 +- .../tests/unittests/test_sequence_reshape.py | 4 +- .../tests/unittests/test_sequence_slice_op.py | 2 +- .../unittests/test_sequence_softmax_op.py | 4 +- .../fluid/tests/unittests/test_sgd_op.py | 2 +- .../fluid/tests/unittests/test_shape_op.py | 2 +- .../tests/unittests/test_shrink_rnn_memory.py | 2 +- ...st_sigmoid_cross_entropy_with_logits_op.py | 2 +- .../fluid/tests/unittests/test_sign_op.py | 2 +- .../fluid/tests/unittests/test_slice_op.py | 2 +- .../tests/unittests/test_smooth_l1_loss_op.py | 2 +- .../fluid/tests/unittests/test_softmax_op.py | 2 +- .../test_softmax_with_cross_entropy_op.py | 4 +- .../tests/unittests/test_split_ids_op.py | 2 +- .../fluid/tests/unittests/test_split_op.py | 4 +- .../unittests/test_split_selected_rows_op.py | 2 +- .../fluid/tests/unittests/test_spp_op.py | 8 +-- .../unittests/test_squared_l2_distance_op.py | 2 +- .../unittests/test_squared_l2_norm_op.py | 2 +- .../fluid/tests/unittests/test_squeeze_op.py | 2 +- .../tests/unittests/test_sum_mkldnn_op.py | 2 +- .../fluid/tests/unittests/test_sum_op.py | 2 +- .../tests/unittests/test_target_assign_op.py | 2 +- .../fluid/tests/unittests/test_top_k_op.py | 6 +- .../tests/unittests/test_transpose_op.py | 2 +- .../test_uniform_random_batch_size_like_op.py | 2 +- .../tests/unittests/test_uniform_random_op.py | 2 +- .../fluid/tests/unittests/test_unpool_op.py | 18 +++--- .../tests/unittests/test_unsqueeze_op.py | 2 +- .../fluid/tests/unittests/test_warpctc_op.py | 4 +- .../fluid/tests/unittests/test_while_op.py | 2 +- 195 files changed, 383 insertions(+), 371 deletions(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 3cfbe8cebf8..53658610e55 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -963,9 +963,9 @@ class Block(object): raise ValueError("Var {0} is not found recursively".format(name)) def all_parameters(self): - return list(self._iter_parameters()) + return list(self.iter_parameters()) - def _iter_parameters(self): + def iter_parameters(self): return (item[1] for item in list(self.vars.items()) if isinstance(item[1], Parameter)) @@ -1199,7 +1199,7 @@ class Block(object): if not isinstance(other, Block): raise TypeError( "_copy_param_info_from should be invoked with Block") - for p in other._iter_parameters(): + for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) if v is None: diff --git a/python/paddle/fluid/tests/test_beam_search_decoder.py b/python/paddle/fluid/tests/test_beam_search_decoder.py index 7a2502fa2f9..8bf750940d5 100644 --- a/python/paddle/fluid/tests/test_beam_search_decoder.py +++ b/python/paddle/fluid/tests/test_beam_search_decoder.py @@ -155,7 +155,7 @@ def train_main(use_cuda): ] feeder = fluid.DataFeeder(feed_list, place) - for pass_id in xrange(1): + for pass_id in range(1): for batch_id, data in enumerate(train_reader()): outs = exe.run(main_program, feed=feeder.feed(data), @@ -204,8 +204,8 @@ def decode_main(use_cuda): ] feeder = fluid.DataFeeder(feed_list, place) - data = train_reader().next() - feed_dict = feeder.feed(map(lambda x: [x[0]], data)) + data = next(train_reader()) + feed_dict = feeder.feed([[x[0]] for x in data]) feed_dict['init_ids'] = init_ids feed_dict['init_scores'] = init_scores @@ -214,7 +214,7 @@ def decode_main(use_cuda): feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) - print result_ids.lod() + print(result_ids.lod()) class TestBeamSearchDecoder(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py index 72bc1729b0f..3cffdebf069 100644 --- a/python/paddle/fluid/tests/unittests/dist_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -301,7 +301,7 @@ class DistSeResneXt2x2: trainer_id=trainer_id) feed_var_list = [ - var for var in trainer_prog.global_block().vars.itervalues() + var for var in trainer_prog.global_block().vars.values() if var.is_data ] @@ -309,7 +309,7 @@ class DistSeResneXt2x2: reader_generator = train_reader() first_loss, = exe.run(fetch_list=[avg_cost.name]) print(first_loss) - for i in xrange(5): + for i in range(5): loss, = exe.run(fetch_list=[avg_cost.name]) last_loss, = exe.run(fetch_list=[avg_cost.name]) print(last_loss) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 82b5e7cf0b3..d6b730add7d 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -25,14 +25,16 @@ from paddle.fluid.backward import append_backward from paddle.fluid.op import Operator from paddle.fluid.executor import Executor from paddle.fluid.framework import Program, OpProtoHolder, Variable -from testsuite import create_op, set_input, append_input_output, append_loss_ops +from .testsuite import create_op, set_input, append_input_output, append_loss_ops +from functools import reduce +from six.moves import zip def randomize_probability(batch_size, class_num, dtype='float32'): prob = np.random.uniform( 0.1, 1.0, size=(batch_size, class_num)).astype(dtype) prob_sum = prob.sum(axis=1) - for i in xrange(len(prob)): + for i in range(len(prob)): prob[i] /= prob_sum[i] return prob @@ -86,7 +88,7 @@ def get_numeric_gradient(place, # we only compute gradient of one element each time. # we use a for loop to compute the gradient of every element. - for i in xrange(tensor_size): + for i in range(tensor_size): if in_place: set_input(scope, op, inputs, place) @@ -139,7 +141,7 @@ class OpTest(unittest.TestCase): assert isinstance( numpy_dict, dict), "self.inputs, self.outputs must be numpy_dict" - for var_name, var_value in numpy_dict.iteritems(): + for var_name, var_value in numpy_dict.items(): if isinstance(var_value, (np.ndarray, np.generic)): self.try_call_once(var_value.dtype) elif isinstance(var_value, (list, tuple)): @@ -197,7 +199,7 @@ class OpTest(unittest.TestCase): def _get_io_vars(self, block, numpy_inputs): inputs = {} - for name, value in numpy_inputs.iteritems(): + for name, value in numpy_inputs.items(): if isinstance(value, list): var_list = [ block.var(sub_name) for sub_name, sub_value in value @@ -240,7 +242,7 @@ class OpTest(unittest.TestCase): # if the fetch_list is customized by user, we use it directly. # if not, fill the fetch_list by the user configured outputs in test. if len(fetch_list) == 0: - for var_name, var in outputs.iteritems(): + for var_name, var in outputs.items(): if isinstance(var, list): for v in var: fetch_list.append(v) @@ -252,7 +254,7 @@ class OpTest(unittest.TestCase): fetch_list.append(str(out_name)) # fetch_list = map(block.var, fetch_list) if not isinstance(fetch_list[0], fluid.framework.Variable): - fetch_list = map(block.var, fetch_list) + fetch_list = list(map(block.var, fetch_list)) outs = executor.run(program, feed=feed_map, fetch_list=fetch_list, @@ -334,7 +336,7 @@ class OpTest(unittest.TestCase): def __assert_is_close(self, numeric_grads, analytic_grads, names, max_relative_error, msg_prefix): - for a, b, name in itertools.izip(numeric_grads, analytic_grads, names): + for a, b, name in zip(numeric_grads, analytic_grads, names): abs_a = np.abs(a) abs_a[abs_a < 1e-3] = 1 @@ -460,6 +462,6 @@ class OpTest(unittest.TestCase): use_cuda=use_cuda, loss_name=loss.name, main_program=program) else: executor = Executor(place) - return map(np.array, - executor.run(prog, feed_dict, fetch_list, - return_numpy=False)) + return list( + map(np.array, + executor.run(prog, feed_dict, fetch_list, return_numpy=False))) diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py index 212a87e529d..f6958cc8932 100644 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestAccuracyOp(OpTest): @@ -26,7 +26,7 @@ class TestAccuracyOp(OpTest): label = np.random.randint(0, 2, (n, 1)) self.inputs = {'Out': infer, 'Indices': indices, "Label": label} num_correct = 0 - for rowid in xrange(n): + for rowid in range(n): for ele in indices[rowid]: if ele == label[rowid]: num_correct += 1 diff --git a/python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py index 7d554c2276c..99913a94f86 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py @@ -15,9 +15,9 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest from scipy.special import expit -from test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs +from .test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs class TestMKLDNNReluDim2(TestRelu): diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 5ed387fb124..d12aec885b3 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest from scipy.special import expit diff --git a/python/paddle/fluid/tests/unittests/test_adadelta_op.py b/python/paddle/fluid/tests/unittests/test_adadelta_op.py index 1b892e64c76..c8fa469270f 100644 --- a/python/paddle/fluid/tests/unittests/test_adadelta_op.py +++ b/python/paddle/fluid/tests/unittests/test_adadelta_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestAdadeltaOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_adagrad_op.py index 2f0ea79f4d6..1f4432782ba 100644 --- a/python/paddle/fluid/tests/unittests/test_adagrad_op.py +++ b/python/paddle/fluid/tests/unittests/test_adagrad_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core from paddle.fluid.op import Operator -from op_test import OpTest +from .op_test import OpTest import math diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 3c65f3d44ad..1ff9bf9be19 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest from paddle.fluid import core from paddle.fluid.op import Operator @@ -273,7 +273,7 @@ class TestSparseAdamOp(unittest.TestCase): self.setup(scope, place) op_args = dict() - for key, np_array in self.dense_inputs.iteritems(): + for key, np_array in self.dense_inputs.items(): var = scope.var(key).get_tensor() var.set(np_array, place) op_args[key] = key @@ -290,7 +290,7 @@ class TestSparseAdamOp(unittest.TestCase): adam_op = Operator("adam", **op_args) adam_op.run(scope, place) - for key, np_array in self.outputs.iteritems(): + for key, np_array in self.outputs.items(): out_var = scope.var(key).get_tensor() actual = np.array(out_var) actual = actual.reshape([actual.size]) diff --git a/python/paddle/fluid/tests/unittests/test_adamax_op.py b/python/paddle/fluid/tests/unittests/test_adamax_op.py index 8099beefa58..668303ea2c9 100644 --- a/python/paddle/fluid/tests/unittests/test_adamax_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamax_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestAdamaxOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py b/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py index 9c7d5d41f0c..2c22e3c5e2e 100644 --- a/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py +++ b/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import sys import math -from op_test import OpTest +from .op_test import OpTest def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, diff --git a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py index e04412f809c..22f03ede179 100644 --- a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class BaseTestCase(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index b29a102a388..7f6c0db448b 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestArgsortOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index a49e9035a43..0000fb0958a 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -80,8 +80,9 @@ class TestArrayReadWrite(unittest.TestCase): append_backward(total_sum_scaled) - g_vars = map(default_main_program().global_block().var, - [each_x.name + "@GRAD" for each_x in x]) + g_vars = list( + map(default_main_program().global_block().var, + [each_x.name + "@GRAD" for each_x in x])) g_out = [ item.sum() for item in exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index e93c02bd3ee..872f4df2339 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import op_test +from . import op_test import numpy import unittest diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index 02f2e6eddc8..1c13446faa0 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -14,7 +14,7 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers -import op_test +from . import op_test import numpy import unittest import paddle.fluid.framework as framework diff --git a/python/paddle/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py index 6580c70ca68..48b63a1b237 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest from paddle.fluid import metrics diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py index 18fa5461590..1317906e5f3 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py @@ -17,9 +17,9 @@ import numpy as np import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid -from op_test import OpTest +from .op_test import OpTest from paddle.fluid.framework import grad_var_name -from test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad +from .test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining): diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index fcb2612326e..d83f82665f2 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid -from op_test import OpTest +from .op_test import OpTest from paddle.fluid.framework import grad_var_name @@ -415,7 +415,7 @@ class TestBatchNormOpTraining(unittest.TestCase): self.__assert_close(scale_grad, out[6], "scale_grad") self.__assert_close(bias_grad, out[7], "bias_grad") - print "op test forward passed: ", str(place), data_layout + print("op test forward passed: ", str(place), data_layout) places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py index 167451edd8c..e8283fc9422 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_op.py @@ -59,8 +59,7 @@ class BeamSearchOpTester(unittest.TestCase): np.allclose( np.array(selected_scores), np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis])) - self.assertEqual(selected_ids.lod(), - [[0L, 2L, 4L], [0L, 1L, 2L, 3L, 4L]]) + self.assertEqual(selected_ids.lod(), [[0, 2, 4], [0, 1, 2, 3, 4]]) def _create_pre_ids(self): np_data = np.array([[1, 2, 3, 4]], dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index b04f25ef874..7a0945060ae 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest import paddle.fluid.core as core diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py index d20a11e27ea..83440ddda26 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestBilinearTensorProductOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py index d5bd726c4a8..dba93a8fd07 100644 --- a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py +++ b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py @@ -13,7 +13,7 @@ #limitations under the License. import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def bipartite_match(distance, match_indices, match_dist): @@ -48,7 +48,7 @@ def bipartite_match(distance, match_indices, match_dist): def argmax_match(distance, match_indices, match_dist, threshold): r, c = distance.shape - for j in xrange(c): + for j in range(c): if match_indices[j] != -1: continue col_dist = distance[:, j] diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 4ce9a4783e2..99449177db6 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import sys import math -from op_test import OpTest +from .op_test import OpTest def box_coder(target_box, prior_box, prior_box_var, output_box, code_type, diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index b8d3ed3aa3e..164c28917f4 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import op_test +from . import op_test import unittest import numpy as np import paddle.fluid.core as core diff --git a/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py b/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py index 23932194f0c..be95b6092ef 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class Segment(object): @@ -63,7 +63,7 @@ class TestChunkEvalOp(OpTest): # generate chunk beginnings chunk_begins = sorted( np.random.choice( - range(starts[-1]), num_chunks, replace=False)) + list(range(starts[-1])), num_chunks, replace=False)) seq_chunk_begins = [] begin_idx = 0 # divide chunks into sequences @@ -93,7 +93,7 @@ class TestChunkEvalOp(OpTest): self.num_infer_chunks + self.num_label_chunks - self.num_correct_chunks) correct_chunks = np.random.choice( - range(len(chunks)), self.num_correct_chunks, replace=False) + list(range(len(chunks))), self.num_correct_chunks, replace=False) infer_chunks = np.random.choice( [x for x in range(len(chunks)) if x not in correct_chunks], self.num_infer_chunks - self.num_correct_chunks, @@ -138,7 +138,8 @@ class TestChunkEvalOp(OpTest): infer.fill(self.num_chunk_types * self.num_tag_types) label = np.copy(infer) starts = np.random.choice( - range(1, self.batch_size), self.num_sequences - 1, + list(range(1, self.batch_size)), + self.num_sequences - 1, replace=False).tolist() starts.extend([0, self.batch_size]) starts = sorted(starts) diff --git a/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py b/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py index 129958fa281..a4b8122f2f2 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestClipByNormOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index 3df80c8ec8f..c542bdd76f0 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestClipOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index 405afebae85..3d8ed33f3b9 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import op_test +from . import op_test import unittest import numpy diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index e9f3c45dc40..e71805b9325 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestConcatOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index d9f83905e61..77869a1242e 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -39,7 +39,7 @@ class ConditionalBlockTest(unittest.TestCase): x = numpy.random.random(size=(10, 1)).astype('float32') outs = exe.run(feed={'X': x}, fetch_list=[out])[0] - print outs + print(outs) loss = layers.mean(out) append_backward(loss=loss) outs = exe.run( @@ -47,7 +47,7 @@ class ConditionalBlockTest(unittest.TestCase): fetch_list=[ default_main_program().block(0).var(data.name + "@GRAD") ])[0] - print outs + print(outs) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py index db6be21baaa..3e4739805ae 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py @@ -14,7 +14,7 @@ import unittest -from test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride +from .test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride class TestMKLDNN(TestConv2dOp): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index a478649541b..71f421ee484 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest def conv2d_forward_naive(input, filter, group, conv_param): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index af6cd99b0d7..eb3bdbdde52 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest def conv2dtranspose_forward_naive(input_, filter_, attrs): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index dd4ef7cc94e..f0eec4de0a0 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest def conv3d_forward_naive(input, filter, group, conv_param): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py index 300fa5e8bde..6500044b736 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest def conv3dtranspose_forward_naive(input_, filter_, attrs): diff --git a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py index 5d4d244f439..4b8d76114c2 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def conv_shift_forward(x, y): @@ -22,8 +22,8 @@ def conv_shift_forward(x, y): M = x.shape[1] N = y.shape[1] y_half_width = (N - 1) / 2 - for i in xrange(M): - for j in xrange(N): + for i in range(M): + for j in range(N): out[:, i] += x[:, (i + j + M - y_half_width) % M] * y[:, j] return out diff --git a/python/paddle/fluid/tests/unittests/test_cos_sim_op.py b/python/paddle/fluid/tests/unittests/test_cos_sim_op.py index 1b27cd57670..6da63d786e0 100644 --- a/python/paddle/fluid/tests/unittests/test_cos_sim_op.py +++ b/python/paddle/fluid/tests/unittests/test_cos_sim_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestCosSimOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py index 5e6f9a20a93..07c89eefc32 100644 --- a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py +++ b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py @@ -18,7 +18,7 @@ import paddle.fluid.layers as layers class TestDocString(unittest.TestCase): def test_layer_doc_string(self): - print layers.dropout.__doc__ + print(layers.dropout.__doc__) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py b/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py index 122b076c2d3..b77b7a5dff3 100644 --- a/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py +++ b/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py @@ -16,7 +16,7 @@ import unittest import random import numpy as np -from op_test import OpTest +from .op_test import OpTest class CRFDecoding(object): diff --git a/python/paddle/fluid/tests/unittests/test_crop_op.py b/python/paddle/fluid/tests/unittests/test_crop_op.py index 4016089c016..94fc17a4b05 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def crop(data, offsets, crop_shape): diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index c5b9e92d691..1b19a9c0f8d 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest, randomize_probability +from .op_test import OpTest, randomize_probability class TestCrossEntropyOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_ctc_align.py b/python/paddle/fluid/tests/unittests/test_ctc_align.py index 131b4076f45..ba8e9ba53d0 100644 --- a/python/paddle/fluid/tests/unittests/test_ctc_align.py +++ b/python/paddle/fluid/tests/unittests/test_ctc_align.py @@ -15,8 +15,8 @@ import sys import unittest import numpy as np -from op_test import OpTest -from test_softmax_op import stable_softmax +from .op_test import OpTest +from .test_softmax_op import stable_softmax def CTCAlign(input, lod, blank, merge_repeated): diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 04e7f0b9451..a7a509ff859 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSumOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py index aa09b0ea445..951282e8bab 100644 --- a/python/paddle/fluid/tests/unittests/test_data_balance.py +++ b/python/paddle/fluid/tests/unittests/test_data_balance.py @@ -21,7 +21,7 @@ import numpy as np class TestDataBalance(unittest.TestCase): def prepare_data(self): def fake_data_generator(): - for n in xrange(self.total_ins_num): + for n in range(self.total_ins_num): yield np.ones((3, 4)) * n, n # Prepare data @@ -41,7 +41,7 @@ class TestDataBalance(unittest.TestCase): def prepare_lod_data(self): def fake_data_generator(): - for n in xrange(1, self.total_ins_num + 1): + for n in range(1, self.total_ins_num + 1): d1 = (np.ones((n, 3)) * n).astype('float32') d2 = (np.array(n).reshape((1, 1))).astype('int32') yield d1, d2 @@ -58,9 +58,9 @@ class TestDataBalance(unittest.TestCase): (0, 1)) ] lod = [0] - for _ in xrange(self.batch_size): + for _ in range(self.batch_size): try: - ins = generator.next() + ins = next(generator) except StopIteration: eof = True break diff --git a/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py index 84c44d48173..484193b8543 100644 --- a/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py +++ b/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestDecayedAdagradOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py index a3bf7b544b9..868bcca881a 100644 --- a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py +++ b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py @@ -39,7 +39,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): self.assertTrue(i.is_int()) self.assertEqual(10, i.get_int()) - for _ in xrange(10): + for _ in range(10): scoped_function(__new_scope__) diff --git a/python/paddle/fluid/tests/unittests/test_detection_map_op.py b/python/paddle/fluid/tests/unittests/test_detection_map_op.py index 05d3367ad8e..a71447ebe30 100644 --- a/python/paddle/fluid/tests/unittests/test_detection_map_op.py +++ b/python/paddle/fluid/tests/unittests/test_detection_map_op.py @@ -17,7 +17,7 @@ import numpy as np import sys import collections import math -from op_test import OpTest +from .op_test import OpTest class TestDetectionMAPOp(OpTest): @@ -176,7 +176,7 @@ class TestDetectionMAPOp(OpTest): true_pos[label].append([score, tp]) false_pos[label].append([score, fp]) - for (label, label_pos_num) in label_count.items(): + for (label, label_pos_num) in list(label_count.items()): if label_pos_num == 0 or label not in true_pos: continue label_true_pos = true_pos[label] label_false_pos = false_pos[label] diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist.py b/python/paddle/fluid/tests/unittests/test_dist_mnist.py index ad2d57f7c5f..a6fcbd977f1 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist.py @@ -25,6 +25,7 @@ import unittest from multiprocessing import Process import os import signal +from functools import reduce SEED = 1 DTYPE = "float32" @@ -172,12 +173,12 @@ class TestDistMnist(unittest.TestCase): exe.run(fluid.default_startup_program()) feed_var_list = [ - var for var in trainer_prog.global_block().vars.itervalues() + var for var in trainer_prog.global_block().vars.values() if var.is_data ] feeder = fluid.DataFeeder(feed_var_list, place) - for pass_id in xrange(10): + for pass_id in range(10): for batch_id, data in enumerate(train_reader()): exe.run(trainer_prog, feed=feeder.feed(data)) diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index 9dbef0693bb..4d1a89da3dd 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -151,7 +151,7 @@ class TestBasicModelWithLargeBlockSize(TranspilerTest): ["fill_constant", "fill_constant", "fill_constant"]) # the variable #fc_w will be split into two blocks fc_w_var = startup2.global_block().var("fc_w") - self.assertEqual(fc_w_var.shape, (1000L, 1000L)) + self.assertEqual(fc_w_var.shape, (1000, 1000)) # all parameters should be optimized on pserver pserver_params = [] @@ -184,9 +184,9 @@ class TestNoSliceVar(TranspilerTest): _, startup = self.get_pserver(self.pserver1_ep, config) _, startup2 = self.get_pserver(self.pserver2_ep, config) - if startup.global_block().vars.has_key("fc_w"): + if "fc_w" in startup.global_block().vars: fc_w_var = startup.global_block().vars["fc_w"] - elif startup2.global_block().vars.has_key("fc_w"): + elif "fc_w" in startup2.global_block().vars: fc_w_var = startup2.global_block().vars["fc_w"] self.assertEqual(fc_w_var.shape, (1000, 1000)) diff --git a/python/paddle/fluid/tests/unittests/test_dist_word2vec.py b/python/paddle/fluid/tests/unittests/test_dist_word2vec.py index 712fd5849d8..4bb3998f891 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_word2vec.py +++ b/python/paddle/fluid/tests/unittests/test_dist_word2vec.py @@ -183,12 +183,12 @@ class TestDistMnist(unittest.TestCase): exec_strategy=exec_strategy) feed_var_list = [ - var for var in trainer_prog.global_block().vars.itervalues() + var for var in trainer_prog.global_block().vars.values() if var.is_data ] feeder = fluid.DataFeeder(feed_var_list, place) - for pass_id in xrange(10): + for pass_id in range(10): for batch_id, data in enumerate(train_reader()): avg_loss_np = train_exe.run(feed=feeder.feed(data), fetch_list=[avg_cost.name]) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index eaa3435a864..0b1e1e9388a 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest class TestDropoutOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index 4448de8839d..fdc6adc93bc 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -135,7 +135,7 @@ class TestDynRNN(unittest.TestCase): loss_0 = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] - for _ in xrange(100): + for _ in range(100): val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py index 0f289af2847..0cb57f45b00 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -17,7 +17,7 @@ import random import collections import paddle.fluid as fluid import unittest -from decorators import * +from .decorators import * class Memory(object): @@ -30,12 +30,12 @@ class Memory(object): assert val.dtype == self.ex.dtype self.cur = val - def next(self): + def __next__(self): self.ex = self.cur self.cur = None def __next__(self): - self.next() + next(self) def reset(self): self.ex = numpy.zeros(shape=self.ex.shape, dtype=self.ex.dtype) @@ -61,13 +61,13 @@ class BaseRNN(object): self.num_seq = num_seq self.inputs = collections.defaultdict(list) - for _ in xrange(num_seq): + for _ in range(num_seq): seq_len = random.randint(1, max_seq_len - 1) for iname in ins: ishape = ins[iname].get('shape', None) idtype = ins[iname].get('dtype', 'float32') lst = [] - for _ in xrange(seq_len): + for _ in range(seq_len): lst.append(numpy.random.random(size=ishape).astype(idtype)) self.inputs[iname].append(lst) @@ -96,16 +96,16 @@ class BaseRNN(object): for out in self.outputs: retv[out] = [] - for seq_id in xrange(self.num_seq): + for seq_id in range(self.num_seq): for mname in self.mems: self.mems[mname].reset() for out in self.outputs: self.outputs[out].next_sequence() - iname0 = self.inputs.keys()[0] + iname0 = list(self.inputs.keys())[0] seq_len = len(self.inputs[iname0][seq_id]) - for step_id in xrange(seq_len): + for step_id in range(seq_len): xargs = dict() for iname in self.inputs: @@ -138,7 +138,7 @@ class BaseRNN(object): for iname in self.inputs: lod = [] np_flatten = [] - for seq_id in xrange(len(self.inputs[iname])): + for seq_id in range(len(self.inputs[iname])): seq_len = len(self.inputs[iname][seq_id]) lod.append(seq_len) np_flatten.extend(self.inputs[iname][seq_id]) @@ -159,8 +159,8 @@ class BaseRNN(object): " which is not matrix") g = numpy.zeros(shape=p.shape, dtype=p.dtype) - for i in xrange(p.shape[0]): - for j in xrange(p.shape[1]): + for i in range(p.shape[0]): + for j in range(p.shape[1]): o = p[i][j] p[i][j] += delta pos = self._exe_mean_out_() @@ -184,7 +184,7 @@ class BaseRNN(object): if len(item.shape) != 1: raise ValueError("Not support") - for i in xrange(len(item)): + for i in range(len(item)): o = item[i] item[i] += delta pos = self._exe_mean_out_() @@ -198,14 +198,14 @@ class BaseRNN(object): if not return_one_tensor: return grad - for i in xrange(len(grad)): + for i in range(len(grad)): grad[i] = numpy.concatenate(grad[i]) grad = numpy.concatenate(grad) return grad def _exe_mean_out_(self): outs = self.exe() - return numpy.array([o.mean() for o in outs.itervalues()]).mean() + return numpy.array([o.mean() for o in outs.values()]).mean() class SeedFixedTestCase(unittest.TestCase): @@ -274,13 +274,14 @@ class TestSimpleMul(SeedFixedTestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) - out, w_g, i_g = map(numpy.array, - exe.run(feed=py_rnn.to_feed(cpu), - fetch_list=[ - out, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False)) + out, w_g, i_g = list( + map(numpy.array, + exe.run(feed=py_rnn.to_feed(cpu), + fetch_list=[ + out, self.PARAM_NAME + "@GRAD", self.DATA_NAME + + "@GRAD" + ], + return_numpy=False))) out_by_python = py_rnn.exe()[self.OUT_NAME] self.assertTrue(numpy.allclose(out, out_by_python)) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) @@ -351,14 +352,15 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) feed = py_rnn.to_feed(cpu) - last_np, w_g, i_g = map(numpy.array, - exe.run(feed=feed, - fetch_list=[ - last, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False)) - last_by_py, = py_rnn.exe().values() + last_np, w_g, i_g = list( + map(numpy.array, + exe.run(feed=feed, + fetch_list=[ + last, self.PARAM_NAME + "@GRAD", self.DATA_NAME + + "@GRAD" + ], + return_numpy=False))) + last_by_py, = list(py_rnn.exe().values()) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) self.assertTrue(numpy.allclose(last_np, last_by_py)) diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index 31af1245720..d182889a970 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -67,7 +67,7 @@ class TestDyRnnStaticInput(unittest.TestCase): def _lodtensor_to_ndarray(self, lod_tensor): dims = lod_tensor.shape() ndarray = np.zeros(shape=dims).astype('float32') - for i in xrange(np.product(dims)): + for i in range(np.product(dims)): ndarray.ravel()[i] = lod_tensor._get_float_element(i) return ndarray, lod_tensor.recursive_sequence_lengths() @@ -114,7 +114,7 @@ class TestDyRnnStaticInput(unittest.TestCase): shape=[1], dtype='int64', value=0) step_idx.stop_gradient = True - for i in xrange(self._max_sequence_len): + for i in range(self._max_sequence_len): step_out = fluid.layers.array_read(static_input_out_array, step_idx) step_out.stop_gradient = True @@ -140,27 +140,27 @@ class TestDyRnnStaticInput(unittest.TestCase): static_lod = self.static_input_tensor.recursive_sequence_lengths() static_sliced = [] cur_offset = 0 - for i in xrange(len(static_lod[0])): + for i in range(len(static_lod[0])): static_sliced.append(self.static_input_data[cur_offset:( cur_offset + static_lod[0][i])]) cur_offset += static_lod[0][i] static_seq_len = static_lod[0] static_reordered = [] - for i in xrange(len(x_sorted_indices)): + for i in range(len(x_sorted_indices)): static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist()) static_seq_len_reordered = [ static_seq_len[x_sorted_indices[i]] - for i in xrange(len(x_sorted_indices)) + for i in range(len(x_sorted_indices)) ] static_step_outs = [] static_step_lods = [] - for i in xrange(self._max_sequence_len): + for i in range(self._max_sequence_len): end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1) lod = [] total_len = 0 - for i in xrange(end): + for i in range(end): lod.append(static_seq_len_reordered[i]) total_len += lod[-1] static_step_lods.append([lod]) @@ -174,7 +174,7 @@ class TestDyRnnStaticInput(unittest.TestCase): static_step_outs = self.build_graph(only_forward=True) self.exe.run(framework.default_startup_program()) expected_outs, expected_lods = self.get_expected_static_step_outs() - for i in xrange(self._max_sequence_len): + for i in range(self._max_sequence_len): step_out, lod = self.fetch_value(static_step_outs[i]) self.assertTrue(np.allclose(step_out, expected_outs[i])) self.assertTrue(np.allclose(lod, expected_lods[i])) @@ -189,7 +189,7 @@ class TestDyRnnStaticInput(unittest.TestCase): numeric_gradients = np.zeros(shape=static_input_shape).astype('float32') # calculate numeric gradients tensor_size = np.product(static_input_shape) - for i in xrange(tensor_size): + for i in range(tensor_size): origin = self.static_input_tensor._get_float_element(i) x_pos = origin + self._delta self.static_input_tensor._set_float_element(i, x_pos) diff --git a/python/paddle/fluid/tests/unittests/test_edit_distance_op.py b/python/paddle/fluid/tests/unittests/test_edit_distance_op.py index 816562621b4..00abd19cd90 100644 --- a/python/paddle/fluid/tests/unittests/test_edit_distance_op.py +++ b/python/paddle/fluid/tests/unittests/test_edit_distance_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def Levenshtein(hyp, ref): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py index bcdbfc8e527..afdbabd6a2a 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py @@ -14,8 +14,8 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest -from test_elementwise_add_op import * +from .op_test import OpTest +from .test_elementwise_add_op import * ''' Some tests differ from the tests defined in test_elementwise_add_op.py because MKLDNN does not support tensors of number of dimensions 3. diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index fb9a496126f..432901fbb07 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest class TestElementwiseAddOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index bfe022af6da..4244e1132a2 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class ElementwiseDivOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py index c6f45381af8..6f350044892 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py @@ -26,7 +26,7 @@ class TestElementWiseAddOp(unittest.TestCase): def test_with_place(place): out_grad = np.random.random_sample(self.x.shape).astype(np.float32) x_grad = out_grad - sum_axis = range(0, len(self.x.shape)) + sum_axis = list(range(0, len(self.x.shape))) del sum_axis[self.axis] y_grad = np.sum(out_grad, axis=tuple(sum_axis)) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index b6cd18a5795..3796d0af92d 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestElementwiseOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py index 92099724fe6..826e8a9619a 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestElementwiseOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index 2742bb21d95..5195f7b5f21 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class ElementwiseMulOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py index a3fd18669c5..be9f82943f0 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestElementwisePowOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py index acf652d3fb9..bbf113aed2d 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestElementwiseOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_expand_op.py b/python/paddle/fluid/tests/unittests/test_expand_op.py index a91e3aef5a1..8239ea7ae7c 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestExpandOpRank1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py index 026ac2112b2..b0a773bf208 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import math -from op_test import OpTest +from .op_test import OpTest def quantize_max_abs(x, num_bits): diff --git a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py index 6c6aa9d3bb6..72fba806091 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestFakeQuantizeOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py index 3f547f3c484..1476061a12e 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def fully_connected_naive(input, weights, bias_data=None): diff --git a/python/paddle/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py index 46c3bbb6712..80994ef4ab4 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_var.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_var.py @@ -14,7 +14,7 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers -import op_test +from . import op_test import numpy import unittest diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py index 0c75cf33f5f..7533db80cb2 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestFillConstantBatchSizeLikeWhenFirstDimIsBatchSize(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index 5e2ddb218af..d94a25b49c0 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestFillConstantOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fill_op.py b/python/paddle/fluid/tests/unittests/test_fill_op.py index 762d29199e2..09b949871bf 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest import paddle.fluid.core as core diff --git a/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py index c9b3e4ba138..23ab9dd18bd 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestFillZerosLikeOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_ftrl_op.py b/python/paddle/fluid/tests/unittests/test_ftrl_op.py index 5f7581391af..1b593be37de 100644 --- a/python/paddle/fluid/tests/unittests/test_ftrl_op.py +++ b/python/paddle/fluid/tests/unittests/test_ftrl_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestFTRLOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 4ae90864806..df782c99093 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestGatherOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py index 1398166a74e..fed7989495d 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestGaussianRandomBatchSizeLike(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py index 3ae877a6081..e14c5d668a1 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py @@ -14,7 +14,7 @@ import unittest -from test_gaussian_random_op import TestGaussianRandomOp +from .test_gaussian_random_op import TestGaussianRandomOp class TestMKLDNN(TestGaussianRandomOp): diff --git a/python/paddle/fluid/tests/unittests/test_get_places_op.py b/python/paddle/fluid/tests/unittests/test_get_places_op.py index 964423e2d26..06b088bac0d 100644 --- a/python/paddle/fluid/tests/unittests/test_get_places_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_places_op.py @@ -14,7 +14,7 @@ import paddle.fluid as fluid from paddle.fluid.layers.device import get_places -import decorators +from . import decorators import unittest diff --git a/python/paddle/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py index 8fbf1560859..2f93b0dc2cf 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_op.py @@ -15,8 +15,8 @@ import unittest import numpy as np import math -from op_test import OpTest -from test_lstm_op import identity, sigmoid, tanh, relu +from .op_test import OpTest +from .test_lstm_op import identity, sigmoid, tanh, relu class TestGRUOp(OpTest): @@ -38,7 +38,7 @@ class TestGRUOp(OpTest): for i in range(len(seq_lens)): seq_starts.append(seq_starts[-1] + seq_lens[i]) sorted_seqs = sorted( - range(len(seq_lens)), lambda x, y: seq_lens[y] - seq_lens[x]) + list(range(len(seq_lens))), lambda x, y: seq_lens[y] - seq_lens[x]) num_batch = seq_lens[sorted_seqs[0]] for batch_idx in range(num_batch): idx_in_seq = [] @@ -74,15 +74,16 @@ class TestGRUOp(OpTest): def gru(self): input, lod = self.inputs['Input'] w = self.inputs['Weight'] - b = self.inputs['Bias'] if self.inputs.has_key('Bias') else np.zeros( + b = self.inputs['Bias'] if 'Bias' in self.inputs else np.zeros( (1, self.frame_size * 3)) batch_gate = self.outputs['BatchGate'] batch_reset_hidden_prev = self.outputs['BatchResetHiddenPrev'] batch_hidden = self.outputs['BatchHidden'] hidden = self.outputs['Hidden'] idx_in_seq_list = self.idx_in_seq_list - h_p = self.inputs['H0'][self.sorted_seqs] if self.inputs.has_key( - 'H0') else np.zeros((len(idx_in_seq_list[0]), self.frame_size)) + h_p = self.inputs['H0'][ + self.sorted_seqs] if 'H0' in self.inputs else np.zeros( + (len(idx_in_seq_list[0]), self.frame_size)) num_batch = len(idx_in_seq_list) end_idx = 0 for batch_idx in range(num_batch): diff --git a/python/paddle/fluid/tests/unittests/test_gru_unit_op.py b/python/paddle/fluid/tests/unittests/test_gru_unit_op.py index c56b1eefd3a..68359f6f514 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_unit_op.py @@ -15,7 +15,7 @@ import math import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class GRUActivationType(OpTest): @@ -76,7 +76,7 @@ class TestGRUUnitOp(OpTest): x = self.inputs['Input'] h_p = self.inputs['HiddenPrev'] w = self.inputs['Weight'] - b = self.inputs['Bias'] if self.inputs.has_key('Bias') else np.zeros( + b = self.inputs['Bias'] if 'Bias' in self.inputs else np.zeros( (1, frame_size * 3)) g = x + np.tile(b, (batch_size, 1)) w_u_r = w.flatten()[:frame_size * frame_size * 2].reshape( diff --git a/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py b/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py index 70586c6be3d..6d8aba98ea1 100644 --- a/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestHingeLossOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index d090960c84e..9abe44db7de 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import math -from op_test import OpTest +from .op_test import OpTest def find_latest_set(num): diff --git a/python/paddle/fluid/tests/unittests/test_huber_loss_op.py b/python/paddle/fluid/tests/unittests/test_huber_loss_op.py index a8d0a776255..ed19c39e04d 100644 --- a/python/paddle/fluid/tests/unittests/test_huber_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_huber_loss_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def huber_loss_forward(val, delta): diff --git a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py index 13bc5768740..4f5954f59c6 100644 --- a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py +++ b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py @@ -13,7 +13,7 @@ #limitations under the License. import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def get_output_shape(attrs, in_shape, img_real_size): diff --git a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py index 6ecfa9ea213..23b1ed957ad 100644 --- a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py +++ b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py @@ -43,7 +43,7 @@ class TestLayer(unittest.TestCase): hidden2 = fluid.layers.fc(input=hidden1, size=128, act='relu') fluid.layers.batch_norm(input=hidden2) - print str(main_program) + print(str(main_program)) def test_dropout_layer(self): main_program = Program() @@ -53,7 +53,7 @@ class TestLayer(unittest.TestCase): name='pixel', shape=[3, 48, 48], dtype='float32') fluid.layers.dropout(x=images, dropout_prob=0.5) - print str(main_program) + print(str(main_program)) def test_img_conv_group(self): main_program = Program() @@ -65,7 +65,7 @@ class TestLayer(unittest.TestCase): conv1 = conv_block(images, 64, 2, [0.3, 0]) conv_block(conv1, 256, 3, [0.4, 0.4, 0]) - print str(main_program) + print(str(main_program)) def test_elementwise_add_with_act(self): main_program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index 51460cbb137..4cd203155f4 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -48,7 +48,7 @@ class TestBook(unittest.TestCase): exe.run(init_program, feed={}, fetch_list=[]) - for i in xrange(100): + for i in range(100): tensor_x = np.array( [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py index eff4212d91e..53d31d20d09 100644 --- a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py +++ b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py @@ -17,7 +17,7 @@ import numpy as np import numpy.random as random import sys import math -from op_test import OpTest +from .op_test import OpTest class TestIOUSimilarityOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_is_empty_op.py b/python/paddle/fluid/tests/unittests/test_is_empty_op.py index 11121d9b653..33777dfecbc 100644 --- a/python/paddle/fluid/tests/unittests/test_is_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_is_empty_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestEmpty(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_l1_norm_op.py b/python/paddle/fluid/tests/unittests/test_l1_norm_op.py index fa5b18a16f7..f5e139fcfd8 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_l1_norm_op.py @@ -14,7 +14,7 @@ import numpy as np import unittest -from op_test import OpTest +from .op_test import OpTest class TestL1NormOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_label_smooth_op.py b/python/paddle/fluid/tests/unittests/test_label_smooth_op.py index ca21289a0d4..081852b6f1a 100644 --- a/python/paddle/fluid/tests/unittests/test_label_smooth_op.py +++ b/python/paddle/fluid/tests/unittests/test_label_smooth_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestLabelSmoothOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 69365db4d10..295887ccd17 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -17,6 +17,7 @@ import numpy as np from operator import mul import paddle.fluid.core as core import paddle.fluid as fluid +from functools import reduce np.random.random(123) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index ab2ab24f354..2d33c8cfaa7 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -20,7 +20,7 @@ from paddle.fluid.layers.device import get_places import paddle.fluid.nets as nets from paddle.fluid.framework import Program, program_guard, default_main_program from paddle.fluid.param_attr import ParamAttr -import decorators +from . import decorators class TestBook(unittest.TestCase): @@ -279,7 +279,7 @@ class TestBook(unittest.TestCase): def test_nce(self): window_size = 5 words = [] - for i in xrange(window_size): + for i in range(window_size): words.append( layers.data( name='word_{0}'.format(i), shape=[1], dtype='int64')) @@ -288,7 +288,7 @@ class TestBook(unittest.TestCase): label_word = int(window_size / 2) + 1 embs = [] - for i in xrange(window_size): + for i in range(window_size): if i == label_word: continue diff --git a/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py b/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py index 696d0ab4fa8..0d0286e104c 100644 --- a/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py @@ -16,7 +16,7 @@ import unittest import random import numpy as np -from op_test import OpTest +from .op_test import OpTest class LinearChainCrfForward(object): diff --git a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py index 1cdc6950104..9f24acb569b 100644 --- a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py +++ b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py @@ -20,7 +20,7 @@ import subprocess import time import unittest from multiprocessing import Process -from op_test import OpTest +from .op_test import OpTest def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id): diff --git a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py index 16e85830ffa..d53ead381d3 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py +++ b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py @@ -36,7 +36,7 @@ class TestLoDRankTable(unittest.TestCase): exe.run(scope=scope, feed={'x': tensor}) var = scope.find_var(rank_table.name) table = var.get_lod_rank_table() - self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items()) + self.assertEqual([(0, 5), (1, 1), (2, 1)], list(table.items())) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py index 77905c4b964..3ca440e1c4d 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestLodResetOpByAttr(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py index 118c22fbb1f..0ac6d9b81df 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py @@ -24,7 +24,7 @@ class TestLoDTensorArray(unittest.TestCase): tensor_array = arr.get_lod_tensor_array() self.assertEqual(0, len(tensor_array)) cpu = core.CPUPlace() - for i in xrange(10): + for i in range(10): t = core.LoDTensor() t.set(numpy.array([i], dtype='float32'), cpu) t.set_recursive_sequence_lengths([[1]]) @@ -32,7 +32,7 @@ class TestLoDTensorArray(unittest.TestCase): self.assertEqual(10, len(tensor_array)) - for i in xrange(10): + for i in range(10): t = tensor_array[i] self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32')) self.assertEqual([[1]], t.recursive_sequence_lengths()) diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py index 5a4580116bc..9789ff4af64 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -35,8 +35,10 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor.set( numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths([[3, 6, 1]]) - expect = map(lambda x: numpy.array(x).astype('int32'), - [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) + expect = [ + numpy.array(x).astype('int32') + for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] + ] self.main( tensor=tensor, expect_array=expect, @@ -48,8 +50,10 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor.set( numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths([[3, 6, 0, 1]]) - expect = map(lambda x: numpy.array(x).astype('int32'), - [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) + expect = [ + numpy.array(x).astype('int32') + for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] + ] self.main( tensor=tensor, expect_array=expect, @@ -111,8 +115,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): expect = [ numpy.array( item, dtype='int32') - for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], range( - 22, 39) + range(7, 21), range(39, 46)] + for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], list( + range(22, 39)) + list(range(7, 21)), list(range(39, 46))] ] lod = [[[1, 2, 1], [1, 3, 4, 4]], [[4, 3], [1, 4, 4, 8, 4, 6, 4]], [[2], [6, 1]]] diff --git a/python/paddle/fluid/tests/unittests/test_log_loss_op.py b/python/paddle/fluid/tests/unittests/test_log_loss_op.py index d3980b8db93..033442bcfb0 100644 --- a/python/paddle/fluid/tests/unittests/test_log_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_log_loss_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestLogLossOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py index 1d7dfe60f20..6bf93b1a38b 100644 --- a/python/paddle/fluid/tests/unittests/test_logical_op.py +++ b/python/paddle/fluid/tests/unittests/test_logical_op.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import op_test +from . import op_test import unittest import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py index aa9eae1e882..bf3e7420aec 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest import paddle.fluid.core as core from paddle.fluid.op import Operator diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index f8d5785fbfe..bb69e3a9afc 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest import paddle.fluid.core as core from paddle.fluid.op import Operator @@ -40,7 +40,7 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] self.outputs['Out'][ids == padding_idx] = np.zeros(31) - self.attrs = {'padding_idx': long(padding_idx)} + self.attrs = {'padding_idx': int(padding_idx)} self.check_output() def test_check_grad(self): diff --git a/python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py index 966a16dc870..b82e438f121 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from test_lrn_op import TestLRNOp +from .test_lrn_op import TestLRNOp class TestLRNMKLDNNOp(TestLRNOp): diff --git a/python/paddle/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py index eaff45cbb2a..a7539e43ecc 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestLRNOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_lstm_op.py b/python/paddle/fluid/tests/unittests/test_lstm_op.py index 705a24bd8f3..421e1cb8aa8 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest SIGMOID_THRESHOLD_MIN = -40.0 SIGMOID_THRESHOLD_MAX = 13.0 diff --git a/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py b/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py index e343265874f..fd3e2f24ae8 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def sigmoid_np(x): diff --git a/python/paddle/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py index ed2262da4bc..0527b9ac6a3 100644 --- a/python/paddle/fluid/tests/unittests/test_lstmp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstmp_op.py @@ -13,7 +13,7 @@ #limitations under the License. import unittest import numpy as np -import test_lstm_op as LstmTest +from . import test_lstm_op as LstmTest ACTIVATION = { 'identity': LstmTest.identity, diff --git a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py index 97c112487fd..d4e2cee0888 100644 --- a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestMarginRankLossOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index 852a80261e0..73419cedc4b 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import decorators +from . import decorators import paddle.fluid as fluid import numpy diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index cae2c8fa87d..6d7c015ce4e 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y): diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index f5ddf72516b..1bea7e58d3d 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def maxout_forward_naive(input, groups): diff --git a/python/paddle/fluid/tests/unittests/test_mean_iou.py b/python/paddle/fluid/tests/unittests/test_mean_iou.py index 64d42b693bf..986bfa1d35b 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_iou.py +++ b/python/paddle/fluid/tests/unittests/test_mean_iou.py @@ -15,7 +15,7 @@ from __future__ import division import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, @@ -80,7 +80,7 @@ class TestMeanIOUOp(OpTest): 'InCorrects': in_corrects, 'InMeanIou': in_mean_ious } - self.attrs = {'num_classes': long(self.num_classes)} + self.attrs = {'num_classes': int(self.num_classes)} mean_iou, out_wrong, out_correct = compute_mean_iou( predictions, labels, self.num_classes, in_wrongs, in_corrects, in_mean_ious) diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 15472a8fc47..9fa1b993a32 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestMeanOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_merge_ids_op.py b/python/paddle/fluid/tests/unittests/test_merge_ids_op.py index f209bdf30fa..56c4872ad42 100644 --- a/python/paddle/fluid/tests/unittests/test_merge_ids_op.py +++ b/python/paddle/fluid/tests/unittests/test_merge_ids_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestMergeIdsOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py index 54ee85c1a7a..ba495959841 100644 --- a/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py +++ b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import sys import math -from op_test import OpTest +from .op_test import OpTest class TestMineHardExamplesOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_minus_op.py b/python/paddle/fluid/tests/unittests/test_minus_op.py index ee32bd49925..d238b8102b4 100644 --- a/python/paddle/fluid/tests/unittests/test_minus_op.py +++ b/python/paddle/fluid/tests/unittests/test_minus_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestMinusOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py b/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py index 62035efe8ec..df4fc8ce92e 100644 --- a/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def modified_huber_loss_forward(val): diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index c75d3bd276a..1a3d655ab9b 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestMomentumOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index bbc782c1bce..ccec6060a8f 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest class TestMulOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index aacd8ae45af..2a645e42b0e 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np import copy -from op_test import OpTest +from .op_test import OpTest def iou(box_a, box_b): @@ -112,7 +112,7 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, if keep_top_k > -1 and num_det > keep_top_k: score_index = [] - for c, indices in selected_indices.iteritems(): + for c, indices in selected_indices.items(): for idx in indices: score_index.append((scores[c][idx], c, idx)) @@ -143,7 +143,7 @@ def batched_multiclass_nms(boxes, scores, background, score_threshold, lod.append(nmsed_num) if nmsed_num == 0: continue - for c, indices in nmsed_outs.iteritems(): + for c, indices in nmsed_outs.items(): for idx in indices: xmin, ymin, xmax, ymax = boxes[n][idx][:] det_outs.append([c, scores[n][c][idx], xmin, ymin, xmax, ymax]) diff --git a/python/paddle/fluid/tests/unittests/test_multiplex_op.py b/python/paddle/fluid/tests/unittests/test_multiplex_op.py index 03cad8b43ba..7e8717ebd51 100644 --- a/python/paddle/fluid/tests/unittests/test_multiplex_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiplex_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestMultiplexOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py index 76ecc8ba08b..5aa08fafcdf 100644 --- a/python/paddle/fluid/tests/unittests/test_nce.py +++ b/python/paddle/fluid/tests/unittests/test_nce.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def nce(input, weight, bias, sample_weight, labels, num_classes, @@ -66,7 +66,7 @@ class TestNCE(OpTest): self.attrs = { 'num_total_classes': num_classes, 'num_neg_samples': num_neg_samples, - 'custom_neg_classes': range(num_neg_samples) + 'custom_neg_classes': list(range(num_neg_samples)) } self.inputs = { 'Input': input, diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index 108a665f37f..84ef777b760 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def l2_norm(x, axis, epsilon): diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_op.py index d13f2b3afde..ab63ba226ab 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import math -from op_test import OpTest +from .op_test import OpTest import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework @@ -28,13 +28,13 @@ class TestOneHotOp(OpTest): depth = 10 dimension = 12 x_lod = [[4, 1, 3, 3]] - x = [np.random.randint(0, depth - 1) for i in xrange(sum(x_lod[0]))] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int').reshape([sum(x_lod[0]), 1]) out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype('float32') - for i in xrange(np.product(x.shape)): + for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} @@ -51,13 +51,13 @@ class TestOneHotOp_default_dtype(OpTest): depth = 10 dimension = 12 x_lod = [[4, 1, 3, 3]] - x = [np.random.randint(0, depth - 1) for i in xrange(sum(x_lod[0]))] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int').reshape([sum(x_lod[0]), 1]) out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype('float32') - for i in xrange(np.product(x.shape)): + for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} @@ -76,7 +76,7 @@ class TestOneHotOp_exception(OpTest): self.dimension = 12 self.x = core.LoDTensor() x_lod = [[4, 1, 3, 3]] - data = [np.random.randint(11, 20) for i in xrange(sum(x_lod[0]))] + data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))] data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1]) self.x.set(data, self.place) self.x.set_recursive_sequence_lengths(x_lod) diff --git a/python/paddle/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py index 300f3ffcb8d..deb7637f2b8 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestPadOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py index 63fb58c6927..d17e493c36a 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -167,10 +167,10 @@ class TestCRFModel(unittest.TestCase): place=fluid.CPUPlace()) data = train_data() - for i in xrange(10): + for i in range(10): cur_batch = next(data) - print pe.run(feed=feeder.feed(cur_batch), - fetch_list=[avg_cost.name])[0] + print(pe.run(feed=feeder.feed(cur_batch), + fetch_list=[avg_cost.name])[0]) @unittest.skip(reason="CI hangs") def test_update_sparse_parameter_all_reduce(self): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index 60d63364d5f..a43f2e7c49c 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -71,7 +71,7 @@ class TestFetchOp(unittest.TestCase): fetch_list = [] all_vars = main.global_block().vars - for k, v in all_vars.iteritems(): + for k, v in all_vars.items(): if 'tmp' not in k and k[0] is not '_' or v.persistable: fetch_list.append(k) @@ -90,7 +90,7 @@ class TestFetchOp(unittest.TestCase): iters = 3 train_inputs = [] for i in range(iters): - train_inputs.append(tst_reader_iter.next()) + train_inputs.append(next(tst_reader_iter)) os.environ['CPU_NUM'] = str(4) if core.is_compiled_with_cuda(): @@ -133,7 +133,7 @@ class TestFeedParallel(unittest.TestCase): for batch_id, data in enumerate(reader()): loss_np = pe.run(feed=data, fetch_list=[loss.name])[0] - print batch_id, loss_np + print(batch_id, loss_np) if batch_id == 2: break diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index 76389d916fc..dfbf9eea510 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from parallel_executor_test_base import TestParallelExecutorBase +from .parallel_executor_test_base import TestParallelExecutorBase import paddle.fluid as fluid import paddle.fluid.core as core import numpy as np @@ -37,7 +37,7 @@ def simple_fc_net(use_feed): reader = fluid.layers.io.double_buffer(reader) img, label = fluid.layers.read_file(reader) hidden = img - for _ in xrange(4): + for _ in range(4): hidden = fluid.layers.fc( hidden, size=200, @@ -64,7 +64,7 @@ def fc_with_batchnorm(use_feed): img, label = fluid.layers.read_file(reader) hidden = img - for _ in xrange(1): + for _ in range(1): hidden = fluid.layers.fc( hidden, size=200, @@ -131,9 +131,9 @@ class TestMNIST(TestParallelExecutorBase): use_reduce=True) for loss in zip(all_reduce_first_loss, reduce_first_loss): - self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) + self.assertAlmostEqual(loss[0], loss[1], delta=1e-6) for loss in zip(all_reduce_last_loss, reduce_last_loss): - self.assertAlmostEquals(loss[0], loss[1], delta=1e-4) + self.assertAlmostEqual(loss[0], loss[1], delta=1e-4) # simple_fc def check_simple_fc_convergence(self, use_cuda, use_reduce=False): @@ -184,9 +184,9 @@ class TestMNIST(TestParallelExecutorBase): use_parallel_executor=True) for p_f in parallel_first_loss: - self.assertAlmostEquals(p_f, single_first_loss[0], delta=1e-6) + self.assertAlmostEqual(p_f, single_first_loss[0], delta=1e-6) for p_l in parallel_last_loss: - self.assertAlmostEquals(p_l, single_last_loss[0], delta=1e-6) + self.assertAlmostEqual(p_l, single_last_loss[0], delta=1e-6) def test_simple_fc_parallel_accuracy(self): self.check_simple_fc_parallel_accuracy(True) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py index 834e920845f..cabfecf62e8 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py @@ -17,7 +17,7 @@ import paddle.fluid.layers.ops as ops from paddle.fluid.initializer import init_on_cpu from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter import paddle.fluid.core as core -from parallel_executor_test_base import TestParallelExecutorBase +from .parallel_executor_test_base import TestParallelExecutorBase import unittest import math import os @@ -191,9 +191,9 @@ class TestResnet(TestParallelExecutorBase): optimizer=_optimizer) for p_f in parallel_first_loss: - self.assertAlmostEquals(p_f, single_first_loss[0], delta=1e-6) + self.assertAlmostEqual(p_f, single_first_loss[0], delta=1e-6) for p_l in parallel_last_loss: - self.assertAlmostEquals(p_l, single_last_loss[0], delta=1e-6) + self.assertAlmostEqual(p_l, single_last_loss[0], delta=1e-6) def test_seresnext_with_learning_rate_decay(self): self.check_resnet_convergence_with_learning_rate_decay(True, False) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py index 7688b8495d7..fcb5947ff05 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py @@ -25,7 +25,7 @@ def simple_fc_net(): img = fluid.layers.data(name='image', shape=[784], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = img - for _ in xrange(4): + for _ in range(4): hidden = fluid.layers.fc( hidden, size=200, @@ -71,7 +71,7 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): share_vars_from=train_exe, build_strategy=build_strategy) - for i in xrange(5): + for i in range(5): test_loss, = test_exe.run([loss.name], feed=feed_dict) train_loss, = train_exe.run([loss.name], feed=feed_dict) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py index b6215fddb11..7717de0218d 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py @@ -13,9 +13,9 @@ # limitations under the License. import paddle.fluid as fluid -import transformer_model +from . import transformer_model import numpy as np -from parallel_executor_test_base import TestParallelExecutorBase +from .parallel_executor_test_base import TestParallelExecutorBase import unittest import paddle import paddle.dataset.wmt16 as wmt16 diff --git a/python/paddle/fluid/tests/unittests/test_parallel_op.py b/python/paddle/fluid/tests/unittests/test_parallel_op.py index 18309f45770..fb2c9d41e41 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_op.py @@ -102,7 +102,7 @@ class BaseParallelForTest(unittest.TestCase): Fetched numpy arrays. """ - if isinstance(fetch, basestring): + if isinstance(fetch, str): fetch = [fetch] main = fluid.Program() startup = fluid.Program() @@ -124,7 +124,7 @@ class BaseParallelForTest(unittest.TestCase): data = [data] with pd.do(): - ins = map(pd.read_input, data) + ins = list(map(pd.read_input, data)) if len(ins) == 1: ins = ins[0] loss = generator.send(ins) # patch input diff --git a/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py index 2105d320665..d6e75e4a834 100644 --- a/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py +++ b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def PolygonBoxRestore(input): @@ -23,9 +23,9 @@ def PolygonBoxRestore(input): geo_channels = shape[1] h = shape[2] w = shape[3] - h_indexes = np.array(range(h) * w).reshape( + h_indexes = np.array(list(range(h)) * w).reshape( [w, h]).transpose()[np.newaxis, :] # [1, h, w] - w_indexes = np.array(range(w) * h).reshape( + w_indexes = np.array(list(range(w)) * h).reshape( [h, w])[np.newaxis, :] # [1, h, w] indexes = np.concatenate( (w_indexes, h_indexes))[np.newaxis, :] # [1, 2, h, w] diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py index 003ebba18b2..eba4bc310a4 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from test_pool2d_op import TestPool2d_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 +from .test_pool2d_op import TestPool2d_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 class TestMKLDNNCase1(TestPool2d_Op): diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index f7e1e857329..b66fb7201ba 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest def max_pool2D_forward_naive(x, @@ -35,8 +35,8 @@ def max_pool2D_forward_naive(x, ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) - for i in xrange(H_out): - for j in xrange(W_out): + for i in range(H_out): + for j in range(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) @@ -63,8 +63,8 @@ def avg_pool2D_forward_naive(x, ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) - for i in xrange(H_out): - for j in xrange(W_out): + for i in range(H_out): + for j in range(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index 142165f29be..326da90eeed 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest def max_pool3D_forward_naive(x, @@ -38,13 +38,13 @@ def max_pool3D_forward_naive(x, ) / strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) - for k in xrange(D_out): + for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) - for i in xrange(H_out): + for i in range(H_out): h_start = np.max((i * strides[0] - paddings[0], 0)) h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) - for j in xrange(W_out): + for j in range(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] @@ -72,13 +72,13 @@ def avg_pool3D_forward_naive(x, ) / strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) - for k in xrange(D_out): + for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) - for i in xrange(H_out): + for i in range(H_out): h_start = np.max((i * strides[0] - paddings[0], 0)) h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) - for j in xrange(W_out): + for j in range(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] diff --git a/python/paddle/fluid/tests/unittests/test_pool_max_op.py b/python/paddle/fluid/tests/unittests/test_pool_max_op.py index cf9b7639224..486c6eb24ad 100644 --- a/python/paddle/fluid/tests/unittests/test_pool_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool_max_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=False): @@ -29,21 +29,21 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=False): W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) mask = np.zeros((N, C, D_out, H_out, W_out)) - for k in xrange(D_out): + for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) - for i in xrange(H_out): + for i in range(H_out): h_start = np.max((i * strides[0] - paddings[0], 0)) h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) - for j in xrange(W_out): + for j in range(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) - for n in xrange(N): - for c in xrange(C): + for n in range(N): + for c in range(C): arr = x_masked[n, c, :, :, :] index = np.where(arr == np.max(arr)) sub_deep = index[0][0] @@ -67,8 +67,8 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False): W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) mask = np.zeros((N, C, H_out, W_out)) - for i in xrange(H_out): - for j in xrange(W_out): + for i in range(H_out): + for j in range(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) @@ -77,8 +77,8 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False): out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) - for n in xrange(N): - for c in xrange(C): + for n in range(N): + for c in range(C): arr = x_masked[n, c, :, :] index = np.where(arr == np.max(arr)) sub_row = index[0][0] diff --git a/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py b/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py index 091cfc9c727..0ce32390de1 100644 --- a/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py +++ b/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py @@ -15,7 +15,7 @@ import unittest import itertools import numpy as np -from op_test import OpTest +from .op_test import OpTest def py_pnpair_op(score, label, query, column=-1, weight=None): @@ -32,7 +32,7 @@ def py_pnpair_op(score, label, query, column=-1, weight=None): # accumulate statistics pos, neg, neu = 0, 0, 0 - for _, ranks in predictions.items(): + for _, ranks in list(predictions.items()): for e1, e2 in itertools.combinations(ranks, 2): s1, s2, l1, l2, w1, w2 = e1[0], e2[0], e1[1], e2[1], e1[2], e2[2] w = (w1 + w2) * 0.5 diff --git a/python/paddle/fluid/tests/unittests/test_precision_recall_op.py b/python/paddle/fluid/tests/unittests/test_precision_recall_op.py index 7830ba29583..3fb231f1077 100644 --- a/python/paddle/fluid/tests/unittests/test_precision_recall_op.py +++ b/python/paddle/fluid/tests/unittests/test_precision_recall_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def calc_precision(tp_count, fp_count): @@ -39,19 +39,19 @@ def get_states(idxs, labels, cls_num, weights=None): ins_num = idxs.shape[0] # TP FP TN FN states = np.zeros((cls_num, 4)).astype('float32') - for i in xrange(ins_num): + for i in range(ins_num): w = weights[i] if weights is not None else 1.0 idx = idxs[i][0] label = labels[i][0] if idx == label: states[idx][0] += w - for j in xrange(cls_num): + for j in range(cls_num): states[j][2] += w states[idx][2] -= w else: states[label][3] += w states[idx][1] += w - for j in xrange(cls_num): + for j in range(cls_num): states[j][2] += w states[label][2] -= w states[idx][2] -= w @@ -64,7 +64,7 @@ def compute_metrics(states, cls_num): total_fn_count = 0.0 macro_avg_precision = 0.0 macro_avg_recall = 0.0 - for i in xrange(cls_num): + for i in range(cls_num): total_tp_count += states[i][0] total_fp_count += states[i][1] total_fn_count += states[i][3] @@ -90,9 +90,9 @@ class TestPrecisionRecallOp_0(OpTest): ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(xrange(cls_num), ins_num).reshape( + idxs = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') - labels = np.random.choice(xrange(cls_num), ins_num).reshape( + labels = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') states = get_states(idxs, labels, cls_num) metrics = compute_metrics(states, cls_num) @@ -117,10 +117,10 @@ class TestPrecisionRecallOp_1(OpTest): ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(xrange(cls_num), ins_num).reshape( + idxs = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - labels = np.random.choice(xrange(cls_num), ins_num).reshape( + labels = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') states = get_states(idxs, labels, cls_num, weights) @@ -151,10 +151,10 @@ class TestPrecisionRecallOp_2(OpTest): ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(xrange(cls_num), ins_num).reshape( + idxs = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - labels = np.random.choice(xrange(cls_num), ins_num).reshape( + labels = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') states = np.random.randint(0, 30, (cls_num, 4)).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index ae19a553bb8..1c7e32ee6f5 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class PReluTest(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_prior_box_op.py index e15554737b9..28400b26b11 100644 --- a/python/paddle/fluid/tests/unittests/test_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_prior_box_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import sys import math -from op_test import OpTest +from .op_test import OpTest class TestPriorBoxOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index f75a79bfa42..621dd681345 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -183,7 +183,7 @@ class TestBlockDesc(unittest.TestCase): op2 = block.append_op() op0 = block._prepend_op() all_ops = [] - for idx in xrange(0, block.op_size()): + for idx in range(0, block.op_size()): all_ops.append(block.op(idx)) self.assertEqual(all_ops, [op0, op1, op2]) @@ -205,7 +205,7 @@ class TestBlockDesc(unittest.TestCase): program._sync_with_cpp() all_ops = [] - for idx in xrange(0, block.op_size()): + for idx in range(0, block.op_size()): all_ops.append(block.op(idx)) self.assertEqual(all_ops, [op0, op2]) diff --git a/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py index 3c268958506..ddeb2cd6de2 100644 --- a/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py +++ b/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestProximalAdagradOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py b/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py index 137594b9a08..53ad2a921f0 100644 --- a/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py +++ b/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestProximalGDOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_random_crop_op.py b/python/paddle/fluid/tests/unittests/test_random_crop_op.py index 1c708d0386d..396edbba9f3 100644 --- a/python/paddle/fluid/tests/unittests/test_random_crop_op.py +++ b/python/paddle/fluid/tests/unittests/test_random_crop_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest class TestRandomCropOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_rank_loss_op.py index 7eba1e2077e..4e5673e84f8 100644 --- a/python/paddle/fluid/tests/unittests/test_rank_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_rank_loss_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestRankLossOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_reader_reset.py b/python/paddle/fluid/tests/unittests/test_reader_reset.py index d35183647ea..3ad85d57485 100644 --- a/python/paddle/fluid/tests/unittests/test_reader_reset.py +++ b/python/paddle/fluid/tests/unittests/test_reader_reset.py @@ -21,7 +21,7 @@ import unittest class TestReaderReset(unittest.TestCase): def prepare_data(self): def fake_data_generator(): - for n in xrange(self.total_ins_num): + for n in range(self.total_ins_num): yield np.ones(self.ins_shape) * n, n # Prepare data diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index d6ff18430e3..2e22df2beba 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -203,12 +203,12 @@ class RecurrentOpTest1(unittest.TestCase): num_grad[idx], ana_grad[idx], rtol=0.1).all()) def check_forward(self): - print 'test recurrent op forward' + print('test recurrent op forward') pd_output = self.forward() py_output = self.py_rnn.forward() - print 'pd_output', pd_output + print('pd_output', pd_output) print - print 'py_output', py_output + print('py_output', py_output) self.assertEqual(pd_output.shape, py_output.shape) self.assertTrue(np.isclose(pd_output, py_output, rtol=0.1).all()) @@ -445,7 +445,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape, self.output_shape) self.output = layers.mean(self.create_rnn_op(), **self.p_info) - print self.main_program + print(self.main_program) def create_rnn_op(self): x = layers.data( diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 06d116601bf..1d3cc2f88bd 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSumOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_registry.py b/python/paddle/fluid/tests/unittests/test_registry.py index a361c4624e3..8ad7908b1fc 100644 --- a/python/paddle/fluid/tests/unittests/test_registry.py +++ b/python/paddle/fluid/tests/unittests/test_registry.py @@ -15,7 +15,7 @@ import unittest import paddle.fluid as fluid import numpy as np -import decorators +from . import decorators class TestRegistry(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index f51b5a7e990..43d1eab0684 100644 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestReshapeOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_reverse_op.py b/python/paddle/fluid/tests/unittests/test_reverse_op.py index f845575a028..e0366717b37 100644 --- a/python/paddle/fluid/tests/unittests/test_reverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_reverse_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestReverseOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py index 0d84a5853ea..4c146f9d4b6 100644 --- a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py +++ b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestRmspropOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index df5684ab173..9e4554a8a4c 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import math import sys -from op_test import OpTest +from .op_test import OpTest class TestROIPoolOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_row_conv_op.py b/python/paddle/fluid/tests/unittests/test_row_conv_op.py index 07dcd108689..66262340a22 100644 --- a/python/paddle/fluid/tests/unittests/test_row_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_row_conv_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def row_conv_forward(x, lod, wt): diff --git a/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py index df6e0faaca6..bb9f1a2f366 100644 --- a/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from .op_test import OpTest def rpn_target_assign(iou, rpn_batch_size_per_im, rpn_positive_overlap, diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index 53f59c39905..068b96888c1 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestScaleOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_scatter_op.py b/python/paddle/fluid/tests/unittests/test_scatter_op.py index fb172874363..4ea10950182 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestScatterOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_seq_concat_op.py b/python/paddle/fluid/tests/unittests/test_seq_concat_op.py index 11ffa761a69..d89447d5fe2 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_seq_concat_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import sys -from op_test import OpTest +from .op_test import OpTest def to_abs_offset_lod(lod): diff --git a/python/paddle/fluid/tests/unittests/test_seq_conv.py b/python/paddle/fluid/tests/unittests/test_seq_conv.py index 9701d9adef1..838f1a5eba1 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_conv.py +++ b/python/paddle/fluid/tests/unittests/test_seq_conv.py @@ -15,7 +15,7 @@ import unittest import numpy as np import random -from op_test import OpTest +from .op_test import OpTest class TestSeqProject(OpTest): @@ -26,9 +26,9 @@ class TestSeqProject(OpTest): if self.context_length == 1 \ and self.context_start == 0 \ and self.padding_trainable: - print "If context_start is 0 " \ + print("If context_start is 0 " \ "and context_length is 1," \ - " padding_trainable should be false." + " padding_trainable should be false.") return # one level, batch size @@ -212,7 +212,7 @@ class TestSeqProjectCase2(TestSeqProject): self.context_stride = 1 self.input_size = [self.input_row, 23] - idx = range(self.input_size[0]) + idx = list(range(self.input_size[0])) del idx[0] offset_lod = [[0] + np.sort(random.sample(idx, 8)).tolist() + [self.input_size[0]]] diff --git a/python/paddle/fluid/tests/unittests/test_seq_pool.py b/python/paddle/fluid/tests/unittests/test_seq_pool.py index 0b3659d7a67..92e08b92b64 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_pool.py +++ b/python/paddle/fluid/tests/unittests/test_seq_pool.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSeqAvgPool(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py b/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py index 8f0765277ae..b567534cc8d 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def sequence_erase(in_seq, lod0, tokens): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/test_sequence_expand.py index 0bbd31814ef..51a8392b85a 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_expand.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_expand.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSequenceExpand(OpTest): @@ -44,7 +44,7 @@ class TestSequenceExpand(OpTest): out_lod = [[]] offset = 0 - for i in xrange(len(y_lod[ref_level])): + for i in range(len(y_lod[ref_level])): repeat_num = y_lod[ref_level][i] x_len = x_idx[i] @@ -55,7 +55,7 @@ class TestSequenceExpand(OpTest): stacked_x_sub = np.vstack((stacked_x_sub, x_sub)) out = np.vstack((out, stacked_x_sub)) if x_lod is not None: - for j in xrange(repeat_num): + for j in range(repeat_num): out_lod[0].append(x_len) offset += x_len diff --git a/python/paddle/fluid/tests/unittests/test_sequence_reshape.py b/python/paddle/fluid/tests/unittests/test_sequence_reshape.py index 68f2e5eba35..6aacd4f83f2 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_reshape.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_reshape.py @@ -15,7 +15,7 @@ import unittest import numpy as np import math -from op_test import OpTest +from .op_test import OpTest class TestSequenceReshape(OpTest): @@ -35,7 +35,7 @@ class TestSequenceReshape(OpTest): def compute_output(self, x, x_lod, dimension): x_width = x.shape[1] out_lod = [[]] - for i in xrange(len(x_lod[0])): + for i in range(len(x_lod[0])): seq_len = x_lod[0][i] offset = (seq_len * x_width) / dimension assert int(offset) * dimension == seq_len * x_width diff --git a/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py b/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py index 313e485d1e3..8204153b532 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import sys -from op_test import OpTest +from .op_test import OpTest class TestSequenceSliceOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py b/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py index c4fc8b74cf8..4134a5d4074 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py @@ -14,8 +14,8 @@ import unittest import numpy as np -from op_test import OpTest -from test_softmax_op import stable_softmax +from .op_test import OpTest +from .test_softmax_op import stable_softmax import paddle.fluid.core as core diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index 3126293f9d8..d3e067e67c9 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle.fluid.core as core from paddle.fluid.op import Operator -from op_test import OpTest +from .op_test import OpTest class TestSGDOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py index a62ee050075..28677e0a4f7 100644 --- a/python/paddle/fluid/tests/unittests/test_shape_op.py +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestShapeOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index 6f0e337034d..a994bf181a7 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -48,7 +48,7 @@ class TestShrinkRNNMemoryBase(unittest.TestCase): def sum_lodtensor(self, tensor): sum_res = 0.0 - for i in xrange(np.product(tensor.shape())): + for i in range(np.product(tensor.shape())): sum_res += tensor._get_float_element(i) return sum_res diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py index c435796569c..f74529b4d68 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py @@ -13,7 +13,7 @@ # limitations under the License. import numpy as np -from op_test import OpTest +from .op_test import OpTest from scipy.special import logit from scipy.special import expit import unittest diff --git a/python/paddle/fluid/tests/unittests/test_sign_op.py b/python/paddle/fluid/tests/unittests/test_sign_op.py index 087a0c575bf..2909c4520ab 100644 --- a/python/paddle/fluid/tests/unittests/test_sign_op.py +++ b/python/paddle/fluid/tests/unittests/test_sign_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSignOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 1a48bce3bb7..eaf33d9f0de 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSliceOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py index e74664dac4d..9c53a699410 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def smooth_l1_loss_forward(val, sigma2): diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 0ab581cfb0e..e42f9a705c7 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest import paddle.fluid.core as core diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index c0d9fc8f22a..97f5c4f8df5 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -15,8 +15,8 @@ import unittest import numpy as np -from op_test import OpTest -from test_softmax_op import stable_softmax +from .op_test import OpTest +from .test_softmax_op import stable_softmax class TestSoftmaxWithCrossEntropyOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_split_ids_op.py b/python/paddle/fluid/tests/unittests/test_split_ids_op.py index e9f0a06a56b..fdc9d90cef0 100644 --- a/python/paddle/fluid/tests/unittests/test_split_ids_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_ids_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSplitIdsOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index eb49a53e54f..919020a9fbd 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSplitOp(OpTest): @@ -26,7 +26,7 @@ class TestSplitOp(OpTest): self.inputs = {'X': x} self.attrs = {'axis': axis, 'sections': [2, 1, 2]} self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in xrange(len(out))]} + for i in range(len(out))]} def _set_op_type(self): self.op_type = "split" diff --git a/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py b/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py index 61040a39ced..2b261820e04 100644 --- a/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py @@ -53,7 +53,7 @@ class TestSpliteSelectedRows(unittest.TestCase): height_sections = [5, 5, 5, 5, 3] # initialize output variables [out0, out1] - outs_name = ["out%d" % i for i in xrange(len(height_sections))] + outs_name = ["out%d" % i for i in range(len(height_sections))] outs = [ scope.var(var_name).get_selected_rows() for var_name in outs_name ] diff --git a/python/paddle/fluid/tests/unittests/test_spp_op.py b/python/paddle/fluid/tests/unittests/test_spp_op.py index f0ab5909df6..efd7f2e1bb4 100644 --- a/python/paddle/fluid/tests/unittests/test_spp_op.py +++ b/python/paddle/fluid/tests/unittests/test_spp_op.py @@ -14,9 +14,9 @@ import unittest import numpy as np -from op_test import OpTest -from test_pool2d_op import max_pool2D_forward_naive -from test_pool2d_op import avg_pool2D_forward_naive +from .op_test import OpTest +from .test_pool2d_op import max_pool2D_forward_naive +from .test_pool2d_op import avg_pool2D_forward_naive class TestSppOp(OpTest): @@ -26,7 +26,7 @@ class TestSppOp(OpTest): input = np.random.random(self.shape).astype("float32") nsize, csize, hsize, wsize = input.shape out_level_flatten = [] - for i in xrange(self.pyramid_height): + for i in range(self.pyramid_height): bins = np.power(2, i) kernel_size = [0, 0] padding = [0, 0] diff --git a/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py index 78bc300ebec..1edd243c928 100644 --- a/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py +++ b/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSquaredL2DistanceOp_f0(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py index 609445d5228..e174d1f86be 100644 --- a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py @@ -15,7 +15,7 @@ import numpy as np import unittest from numpy import linalg as LA -from op_test import OpTest +from .op_test import OpTest class TestL2LossOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py index bca6af2fd5d..725ebdc0642 100644 --- a/python/paddle/fluid/tests/unittests/test_squeeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest # Correct: General. diff --git a/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py index 7956897d68a..9984638c1b5 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py @@ -14,7 +14,7 @@ import unittest -from test_sum_op import TestSumOp +from .test_sum_op import TestSumOp class TestMKLDNN(TestSumOp): diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 1d90414e137..d1798075b3c 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestSumOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_target_assign_op.py index bd208897520..67cf60a80b0 100644 --- a/python/paddle/fluid/tests/unittests/test_target_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_target_assign_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np import random -from op_test import OpTest +from .op_test import OpTest def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): diff --git a/python/paddle/fluid/tests/unittests/test_top_k_op.py b/python/paddle/fluid/tests/unittests/test_top_k_op.py index cc2fcc5ec0a..eca8cbd7a35 100644 --- a/python/paddle/fluid/tests/unittests/test_top_k_op.py +++ b/python/paddle/fluid/tests/unittests/test_top_k_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestTopkOp(OpTest): @@ -28,7 +28,7 @@ class TestTopkOp(OpTest): self.inputs = {'X': input} self.attrs = {'k': k} - for rowid in xrange(32): + for rowid in range(32): row = input[rowid] output[rowid] = np.sort(row)[-k:] indices[rowid] = row.argsort()[-k:] @@ -52,7 +52,7 @@ class TestTopkOp3d(OpTest): self.inputs = {'X': input_flat_2d} self.attrs = {'k': k} - for rowid in xrange(64): + for rowid in range(64): row = input_flat_2d[rowid] output[rowid] = np.sort(row)[-k:] indices[rowid] = row.argsort()[-k:] diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index ebd63fbd495..dc4ddecaaf1 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestTransposeOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py index e033e86114f..a40129e74ef 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest class TestUniformRandomBatchSizeLike(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 346a949b6e7..80eb90e98ef 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest import paddle.fluid.core as core from paddle.fluid.op import Operator diff --git a/python/paddle/fluid/tests/unittests/test_unpool_op.py b/python/paddle/fluid/tests/unittests/test_unpool_op.py index a97d6dfdda9..1c9ca00cdb3 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool_op.py @@ -14,7 +14,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest def unpool2dmax_forward_naive(input, indices, ksize, strides, paddings): @@ -22,10 +22,10 @@ def unpool2dmax_forward_naive(input, indices, ksize, strides, paddings): out_hsize = (s2 - 1) * strides[0] - 2 * paddings[0] + ksize[0] out_wsize = (s2 - 1) * strides[1] - 2 * paddings[1] + ksize[1] out = np.zeros((s0, s1, out_hsize, out_wsize)) - for nidx in xrange(s0): - for cidx in xrange(s1): - for h in xrange(s2): - for w in xrange(s3): + for nidx in range(s0): + for cidx in range(s1): + for h in range(s2): + for w in range(s3): index = indices[nidx, cidx, h, w] hidx = (index - index % out_wsize) / out_wsize widx = index % out_wsize @@ -47,16 +47,16 @@ class TestUnpoolOp(OpTest): self.strides[1] + 1 input = np.zeros((nsize, csize, hsize_out, wsize_out)) indices = np.zeros((nsize, csize, hsize_out, wsize_out)) - for i in xrange(hsize_out): - for j in xrange(wsize_out): + for i in range(hsize_out): + for j in range(wsize_out): r_start = np.max((i * self.strides[0] - self.paddings[0], 0)) r_end = np.min((i * self.strides[0] + self.ksize[0] - \ self.paddings[0], hsize)) c_start = np.max((j * self.strides[1] - self.paddings[1], 0)) c_end = np.min((j * self.strides[1] + self.ksize[1] - \ self.paddings[1], wsize)) - for nidx in xrange(nsize): - for cidx in xrange(csize): + for nidx in range(nsize): + for cidx in range(csize): x_masked = pre_input[nidx, cidx, r_start:r_end, \ c_start:c_end] input[nidx, cidx, i, j] = x_masked.max() diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py index 7a4aa0a40b5..f0ab1e8f84b 100644 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from op_test import OpTest +from .op_test import OpTest # Correct: General. diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index 9f1aaee472f..3e1d99df7fa 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -15,8 +15,8 @@ import sys import unittest import numpy as np -from op_test import OpTest -from test_softmax_op import stable_softmax +from .op_test import OpTest +from .test_softmax_op import stable_softmax CUDA_BLOCK_SIZE = 512 diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index fe8808bc044..790e6afe5f0 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -66,7 +66,7 @@ class TestWhileOp(unittest.TestCase): exe = Executor(cpu) d = [] - for i in xrange(3): + for i in range(3): d.append(numpy.random.random(size=[10]).astype('float32')) outs = exe.run(feed={'d0': d[0], -- GitLab