diff --git a/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py b/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py index 1ec463192d9198c527fd84c9d8ee805ce001ca7a..201aa9c1b4ba24276cb38607f0c49f07c40e73a7 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py +++ b/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py @@ -60,7 +60,7 @@ class TestMovingAverageAbsMaxScaleOp(unittest.TestCase): moving_average_abs_max_scale_ops = [ op for op in main_program.blocks[0].ops - if op.type == u'moving_average_abs_max_scale' + if op.type == 'moving_average_abs_max_scale' ] assert ( len(moving_average_abs_max_scale_ops) == 1 diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index 1f900eff0df87f7636bc5667b547ad61fa9525e7..2728cbfb8d28f77c3e2266347bea983f86eb350f 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -50,6 +50,7 @@ def convert_dtype(dtype): if dtype in _PADDLE_DTYPE_2_NUMPY_DTYPE: return _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype] elif isinstance(dtype, type): + # This branch is for NumPy scalar types if dtype in [ bool, np.float16, @@ -66,6 +67,7 @@ def convert_dtype(dtype): ]: return dtype.__name__ else: + # This branch is for np.dtype and str if dtype in [ 'bool', 'float16', @@ -79,24 +81,10 @@ def convert_dtype(dtype): 'uint8', 'complex64', 'complex128', - u'bool', - u'float16', - u'uint16', - u'float32', - u'float64', - u'int8', - u'int16', - u'int32', - u'int64', - u'uint8', - u'complex64', - u'complex128', ]: - # this code is a little bit dangerous, since error could happen - # when casting no-ascii code to str in python2. - # but since the set itself is limited, so currently, it is good. - # however, jointly supporting python2 and python3, (as well as python4 maybe) - # may still be a long-lasting problem. + # NOTE(SigureMo): Since the np.dtype object is not an instance of + # type, so it will not be handled by the previous branch. We need + # to convert it to str here. return str(dtype) # NOTE(zhangbo): Now numpy does not support bfloat, and paddle use uint16 to represent bfloat16, and there binaries are consistent. if dtype in ['bfloat16']: @@ -104,7 +92,8 @@ def convert_dtype(dtype): raise TypeError( "dtype must be any of [bool, float16, uint16, float32, float64, int8, int16, " - "int32, int64, uint8, complex64, complex128], but received %s" % dtype + "int32, int64, uint8, complex64, complex128, bfloat16], but received %s" + % dtype ) diff --git a/python/paddle/fluid/tests/unittests/test_backward.py b/python/paddle/fluid/tests/unittests/test_backward.py index 03b2c95105c83e2bf22565c414e2c09556f6fac0..98c2c05e60c58c4bf71de27b526149684a4fcd4c 100644 --- a/python/paddle/fluid/tests/unittests/test_backward.py +++ b/python/paddle/fluid/tests/unittests/test_backward.py @@ -188,24 +188,24 @@ class SimpleNet(BackwardNet): super().__init__() self.stop_gradient_grad_vars = set( [ - u'x_no_grad@GRAD', - u'x2_no_grad@GRAD', - u'x3_no_grad@GRAD', - u'label_no_grad@GRAD', + 'x_no_grad@GRAD', + 'x2_no_grad@GRAD', + 'x3_no_grad@GRAD', + 'label_no_grad@GRAD', ] ) self.no_grad_vars = set() - self.params_names = set([u'w2v', u'fc_predict.b_0', u'fc_w']) + self.params_names = set(['w2v', 'fc_predict.b_0', 'fc_w']) self.op_path = [ - u'lookup_table_v2', - u'lookup_table_v2', # embedding - u'elementwise_add', # merge - u'mul', - u'elementwise_add', - u'softmax', # fc - u'elementwise_sub', - u'square', - u'reduce_mean', + 'lookup_table_v2', + 'lookup_table_v2', # embedding + 'elementwise_add', # merge + 'mul', + 'elementwise_add', + 'softmax', # fc + 'elementwise_sub', + 'square', + 'reduce_mean', ] # loss self.shape = [16, 50] diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index 9e2ff12bcad1f160ee939f4a344e14055bf39b47..7b5fe2c11434bf628bd4b4ac475929a0167f0047 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -1471,9 +1471,9 @@ class TestRemoteHsigmoid(TestDistLookupTableBase): for name in ["epmap", "table_names", "epmap"]: assert op.has_attr(name) if name == "epmap": - assert op.attr(name)[0] == u'127.0.0.1:6174' + assert op.attr(name)[0] == '127.0.0.1:6174' elif name == "table_names": - assert op.attr(name)[0] == u'hierarchical_sigmoid_0.w_0' + assert op.attr(name)[0] == 'hierarchical_sigmoid_0.w_0' else: assert op.attr(name) == 3 elif op.type == "lookup_table": @@ -1484,7 +1484,7 @@ class TestRemoteHsigmoid(TestDistLookupTableBase): for op in trainer.blocks[0].ops: if op.type == "recv": assert len(op.output("Out")) == 1 - assert op.output("Out")[0] == u'hierarchical_sigmoid_0.b_0' + assert op.output("Out")[0] == 'hierarchical_sigmoid_0.b_0' op_count += 1 assert op_count == 1 diff --git a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py index 3b885f26683cd45c80c7a25a7f0600d16784933e..764e5e75067480df86d2c408903b2031ac0667d8 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py +++ b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py @@ -106,7 +106,7 @@ class TestFeedData(unittest.TestCase): "The fed Variable %r should have dimensions = %r, " "shape = %r, but received fed shape %r on each device" % ( - u'data', + 'data', len(in_shape_tuple), in_shape_tuple, error_shape_list, @@ -120,7 +120,7 @@ class TestFeedData(unittest.TestCase): self.assertEqual( str(dtype_mismatch_err.exception), "The data type of fed Variable %r must be 'int64', but " - "received 'float64'" % (u'label'), + "received 'float64'" % ('label'), ) def _test_feed_data_dtype_mismatch(self, use_cuda, use_parallel_executor): diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index 0ce1230f52e868b3e133e9b58f1e20802b6f5f44..372515f4f025d1762daee8d77e3c1647cd337984 100755 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -125,7 +125,7 @@ class TestPrintOpBackward(unittest.TestCase): loss = paddle.static.Print(loss) paddle.optimizer.Adam().minimize(loss) - print_ops = [op for op in main.blocks[0].ops if op.type == u'print'] + print_ops = [op for op in main.blocks[0].ops if op.type == 'print'] assert len(print_ops) == 2, "The number of print op should be 2" place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() diff --git a/python/paddle/framework/framework.py b/python/paddle/framework/framework.py index e366b13077d2a35050ed2c1350e8fd8d1b8d2632..cafabb0d73ba85a0ccea29f236a7ec6138e51528 100644 --- a/python/paddle/framework/framework.py +++ b/python/paddle/framework/framework.py @@ -40,6 +40,7 @@ def set_default_dtype(d): """ if isinstance(d, type): + # This branch is for NumPy scalar types if d in [np.float16, np.float32, np.float64]: d = d.__name__ else: @@ -48,19 +49,11 @@ def set_default_dtype(d): ", but received %s" % d.__name__ ) else: - if d in [ - 'float16', - 'float32', - 'float64', - u'float16', - u'float32', - u'float64', - ]: - # this code is a little bit dangerous, since error could happen - # when casting no-ascii code to str in python2. - # but since the set itself is limited, so currently, it is good. - # however, jointly supporting python2 and python3, (as well as python4 maybe) - # may still be a long-lasting problem. + # This branch is for np.dtype and str + if d in ['float16', 'float32', 'float64']: + # NOTE(SigureMo): Since the np.dtype object is not an instance of + # type, so it will not be handled by the previous branch. We need + # to convert it to str here. d = str(d) else: raise TypeError( diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 22aef853314c72fdabe57f95cda3533856a1f41f..a47ca0ed06b9cba00c72d6e2ad272f3dbe0023f1 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1357,14 +1357,6 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): 'int64', 'complex64', 'complex128', - u'bool', - u'float16', - u'float32', - u'float64', - u'int32', - u'int64', - u'complex64', - u'complex128', ], 'sum', ) diff --git a/tools/prune_for_jetson.py b/tools/prune_for_jetson.py index 8bfe9ebf9f8dd7b7a4c24036ea4a9b8090d37504..14d1fb3ef2e2878c3b7d3a43ade0a6bce6e2021d 100644 --- a/tools/prune_for_jetson.py +++ b/tools/prune_for_jetson.py @@ -90,7 +90,7 @@ def prune_phi_kernels(): content = content.replace(p, '') with open(op_file, 'w', encoding='utf-8') as f: - f.write(u'{}'.format(content)) + f.write(content) print('We erase all grad op and kernel for Paddle-Inference lib.') print('%50s%10s' % ('type', 'count')) @@ -137,7 +137,7 @@ def append_fluid_kernels(): return False with open(file_name, 'w', encoding='utf-8') as f: - f.write(u'{}'.format(new_content)) + f.write(new_content) # 2. add op and kernel register op_white_list.append("tensorrt_engine") @@ -170,7 +170,7 @@ def append_fluid_kernels(): matches[0], matches[0].replace(k, k + "__") ) with open(op_file, 'w', encoding='utf-8') as f: - f.write(u'{}'.format(content)) + f.write(content) return True diff --git a/tools/remove_grad_op_and_kernel.py b/tools/remove_grad_op_and_kernel.py index 8aa456bdb671f6e75d34b5cf58779055a08ad21a..85bfed9640da8406d5522cb1f720016d3eaaeb16 100644 --- a/tools/remove_grad_op_and_kernel.py +++ b/tools/remove_grad_op_and_kernel.py @@ -170,7 +170,7 @@ if __name__ == '__main__': content = content.replace(i, '') with open(op_file, 'w', encoding='utf-8') as f: - f.write(u'{}'.format(content)) + f.write(content) # 2. update operators/CMakeLists.txt cmake_file = os.path.join(