diff --git a/python/paddle/fluid/compat.py b/python/paddle/fluid/compat.py index 16932a2a0ca5c5a16489b59b26d1b6ff058a1de2..f0ea9d4aac9844e979df609184a7d65ace587258 100644 --- a/python/paddle/fluid/compat.py +++ b/python/paddle/fluid/compat.py @@ -183,9 +183,11 @@ def round(x, d=0): if x > 0.0: p = 10 ** d return float(math.floor((x * p) + math.copysign(0.5, x))) / p - else: + elif x < 0.0: p = 10 ** d return float(math.ceil((x * p) + math.copysign(0.5, x))) / p + else: + return math.copysign(0.0, x) else: import __builtin__ return __builtin__.round(x, d) diff --git a/python/paddle/fluid/debugger.py b/python/paddle/fluid/debugger.py index dd8523f95ba7a6686b1cb4555c0e32ca4d0d088f..ea6c14df72eaf4f24cdf2e5ac37fe2b0bd99b70f 100644 --- a/python/paddle/fluid/debugger.py +++ b/python/paddle/fluid/debugger.py @@ -13,6 +13,7 @@ # limitations under the License. import sys +import six import re from .graphviz import GraphPreviewGenerator from .proto import framework_pb2 diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index 0557d7fd8affed52f53ff72c88144ffd4fe5f5b4..5e823418bd2bb61ec4d1755e6c46582b30904bd0 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -15,6 +15,7 @@ import os import random import six +import functools import subprocess import logging diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 60e9215457e2a7867d5d9ec69f65dd70bcba9745..5fbb35abddb25e92ef53fe22af372b497f5a63f0 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -15,6 +15,7 @@ from . import core from contextlib import contextmanager import os +import six __all__ = [ 'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler', @@ -88,7 +89,7 @@ def cuda_profiler(output_file, output_mode=None, config=None): config = NVPROF_CONFIG if config is None else config config_file = 'nvprof_config_file' with open(config_file, 'wb') as fp: - fp.writelines(["%s\n" % item for item in config]) + fp.writelines([six.b("%s\n" % item) for item in config]) core.nvprof_init(output_file, output_mode, config_file) # Enables profiler collection by the active CUDA profiling tool. core.nvprof_start() diff --git a/python/paddle/fluid/tests/unittests/test_compat.py b/python/paddle/fluid/tests/unittests/test_compat.py index 0725d2c49aa55f53ef9f9e484b6e35d48288b2d9..20e93515de335890cc6c4d14f230daaee5f74a8c 100644 --- a/python/paddle/fluid/tests/unittests/test_compat.py +++ b/python/paddle/fluid/tests/unittests/test_compat.py @@ -440,6 +440,8 @@ class TestCompatible(unittest.TestCase): self.assertEqual(3.0, cpt.round(3.4)) self.assertEqual(4.0, cpt.round(3.5)) self.assertEqual(0.0, cpt.round(0.1)) + self.assertEqual(0.0, cpt.round(0.0)) + self.assertEqual(-0.0, cpt.round(-0.0)) self.assertEqual(-0.0, cpt.round(-0.1)) self.assertEqual(-3.0, cpt.round(-3.4)) self.assertEqual(-4.0, cpt.round(-3.5)) diff --git a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py index 9fdb7baa90d2184c3c439e76b6bb5f0668f5f9ee..d52483205837ef4d522bb6352d798a086e27ec27 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py @@ -21,7 +21,7 @@ def conv_shift_forward(x, y): out = np.zeros_like(x) M = x.shape[1] N = y.shape[1] - y_half_width = (N - 1) / 2 + y_half_width = (N - 1) // 2 for i in range(M): for j in range(N): out[:, i] += x[:, (i + j + M - y_half_width) % M] * y[:, j] diff --git a/python/paddle/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py index 86a2c674d01f45b2b141572c8191d2fba7fa312f..4bbec06a91537b3aa0e84312c54dfee6670a6914 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np import math +import functools from op_test import OpTest from test_lstm_op import identity, sigmoid, tanh, relu @@ -38,7 +39,7 @@ class TestGRUOp(OpTest): for i in range(len(seq_lens)): seq_starts.append(seq_starts[-1] + seq_lens[i]) sorted_seqs = sorted( - list(range(len(seq_lens))), lambda x, y: seq_lens[y] - seq_lens[x]) + list(range(len(seq_lens))), key=functools.cmp_to_key(lambda x, y: seq_lens[y] - seq_lens[x])) num_batch = seq_lens[sorted_seqs[0]] for batch_idx in range(num_batch): idx_in_seq = [] diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index 4cd203155f446df07d2fe6c1d56e0d20f1113679..66cc78e4d4683a844e76408360910b022df4dbd5 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -14,6 +14,7 @@ import unittest +import six import numpy as np import paddle.fluid.core as core @@ -48,7 +49,7 @@ class TestBook(unittest.TestCase): exe.run(init_program, feed={}, fetch_list=[]) - for i in range(100): + for i in six.moves.xrange(100): tensor_x = np.array( [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32") @@ -64,7 +65,7 @@ class TestBook(unittest.TestCase): 'y': tensor_y}, fetch_list=[avg_cost])[0] - reload(executor) # reload to build a new scope + six.moves.reload_module(executor) # reload to build a new scope exe = executor.Executor(place) [infer_prog, feed_var_names, fetch_vars] = load_inference_model( diff --git a/python/paddle/fluid/tests/unittests/test_pool_max_op.py b/python/paddle/fluid/tests/unittests/test_pool_max_op.py index e6a9f6f08cf1445c14494506641b0c3502591c37..9a23fde3407562fa2889e98cd4d0a4fbdcaeea47 100644 --- a/python/paddle/fluid/tests/unittests/test_pool_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool_max_op.py @@ -24,9 +24,9 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=False): ksize = [D, H, W] paddings = [0, 0, 0] - D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 - H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 - W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + D_out = (D - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + H_out = (H - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + W_out = (W - ksize[2] + 2 * paddings[2]) // strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) mask = np.zeros((N, C, D_out, H_out, W_out)) for k in range(D_out): @@ -63,8 +63,8 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False): ksize = [H, W] paddings = [0, 0] - H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 - W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + H_out = (H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + W_out = (W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) mask = np.zeros((N, C, H_out, W_out)) for i in range(H_out):