From 59adf7ced11acbc659953055b9746d3065699058 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Thu, 9 Aug 2018 23:48:36 +0800 Subject: [PATCH] Fix round(0.0) special issue --- python/paddle/fluid/compat.py | 4 +++- python/paddle/fluid/debugger.py | 1 + python/paddle/fluid/graphviz.py | 1 + python/paddle/fluid/profiler.py | 3 ++- python/paddle/fluid/tests/unittests/test_compat.py | 2 ++ .../paddle/fluid/tests/unittests/test_conv_shift_op.py | 2 +- python/paddle/fluid/tests/unittests/test_gru_op.py | 3 ++- .../fluid/tests/unittests/test_inference_model_io.py | 5 +++-- .../paddle/fluid/tests/unittests/test_pool_max_op.py | 10 +++++----- 9 files changed, 20 insertions(+), 11 deletions(-) diff --git a/python/paddle/fluid/compat.py b/python/paddle/fluid/compat.py index 16932a2a0ca..f0ea9d4aac9 100644 --- a/python/paddle/fluid/compat.py +++ b/python/paddle/fluid/compat.py @@ -183,9 +183,11 @@ def round(x, d=0): if x > 0.0: p = 10 ** d return float(math.floor((x * p) + math.copysign(0.5, x))) / p - else: + elif x < 0.0: p = 10 ** d return float(math.ceil((x * p) + math.copysign(0.5, x))) / p + else: + return math.copysign(0.0, x) else: import __builtin__ return __builtin__.round(x, d) diff --git a/python/paddle/fluid/debugger.py b/python/paddle/fluid/debugger.py index dd8523f95ba..ea6c14df72e 100644 --- a/python/paddle/fluid/debugger.py +++ b/python/paddle/fluid/debugger.py @@ -13,6 +13,7 @@ # limitations under the License. import sys +import six import re from .graphviz import GraphPreviewGenerator from .proto import framework_pb2 diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index 0557d7fd8af..5e823418bd2 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -15,6 +15,7 @@ import os import random import six +import functools import subprocess import logging diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 60e9215457e..5fbb35abddb 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -15,6 +15,7 @@ from . import core from contextlib import contextmanager import os +import six __all__ = [ 'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler', @@ -88,7 +89,7 @@ def cuda_profiler(output_file, output_mode=None, config=None): config = NVPROF_CONFIG if config is None else config config_file = 'nvprof_config_file' with open(config_file, 'wb') as fp: - fp.writelines(["%s\n" % item for item in config]) + fp.writelines([six.b("%s\n" % item) for item in config]) core.nvprof_init(output_file, output_mode, config_file) # Enables profiler collection by the active CUDA profiling tool. core.nvprof_start() diff --git a/python/paddle/fluid/tests/unittests/test_compat.py b/python/paddle/fluid/tests/unittests/test_compat.py index 0725d2c49aa..20e93515de3 100644 --- a/python/paddle/fluid/tests/unittests/test_compat.py +++ b/python/paddle/fluid/tests/unittests/test_compat.py @@ -440,6 +440,8 @@ class TestCompatible(unittest.TestCase): self.assertEqual(3.0, cpt.round(3.4)) self.assertEqual(4.0, cpt.round(3.5)) self.assertEqual(0.0, cpt.round(0.1)) + self.assertEqual(0.0, cpt.round(0.0)) + self.assertEqual(-0.0, cpt.round(-0.0)) self.assertEqual(-0.0, cpt.round(-0.1)) self.assertEqual(-3.0, cpt.round(-3.4)) self.assertEqual(-4.0, cpt.round(-3.5)) diff --git a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py index 9fdb7baa90d..d5248320583 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py @@ -21,7 +21,7 @@ def conv_shift_forward(x, y): out = np.zeros_like(x) M = x.shape[1] N = y.shape[1] - y_half_width = (N - 1) / 2 + y_half_width = (N - 1) // 2 for i in range(M): for j in range(N): out[:, i] += x[:, (i + j + M - y_half_width) % M] * y[:, j] diff --git a/python/paddle/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py index 86a2c674d01..4bbec06a915 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np import math +import functools from op_test import OpTest from test_lstm_op import identity, sigmoid, tanh, relu @@ -38,7 +39,7 @@ class TestGRUOp(OpTest): for i in range(len(seq_lens)): seq_starts.append(seq_starts[-1] + seq_lens[i]) sorted_seqs = sorted( - list(range(len(seq_lens))), lambda x, y: seq_lens[y] - seq_lens[x]) + list(range(len(seq_lens))), key=functools.cmp_to_key(lambda x, y: seq_lens[y] - seq_lens[x])) num_batch = seq_lens[sorted_seqs[0]] for batch_idx in range(num_batch): idx_in_seq = [] diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index 4cd203155f4..66cc78e4d46 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -14,6 +14,7 @@ import unittest +import six import numpy as np import paddle.fluid.core as core @@ -48,7 +49,7 @@ class TestBook(unittest.TestCase): exe.run(init_program, feed={}, fetch_list=[]) - for i in range(100): + for i in six.moves.xrange(100): tensor_x = np.array( [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32") @@ -64,7 +65,7 @@ class TestBook(unittest.TestCase): 'y': tensor_y}, fetch_list=[avg_cost])[0] - reload(executor) # reload to build a new scope + six.moves.reload_module(executor) # reload to build a new scope exe = executor.Executor(place) [infer_prog, feed_var_names, fetch_vars] = load_inference_model( diff --git a/python/paddle/fluid/tests/unittests/test_pool_max_op.py b/python/paddle/fluid/tests/unittests/test_pool_max_op.py index e6a9f6f08cf..9a23fde3407 100644 --- a/python/paddle/fluid/tests/unittests/test_pool_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool_max_op.py @@ -24,9 +24,9 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=False): ksize = [D, H, W] paddings = [0, 0, 0] - D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 - H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 - W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + D_out = (D - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + H_out = (H - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + W_out = (W - ksize[2] + 2 * paddings[2]) // strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) mask = np.zeros((N, C, D_out, H_out, W_out)) for k in range(D_out): @@ -63,8 +63,8 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False): ksize = [H, W] paddings = [0, 0] - H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 - W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + H_out = (H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + W_out = (W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) mask = np.zeros((N, C, H_out, W_out)) for i in range(H_out): -- GitLab