diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index b4643b3afb55c06d9a69db284909c6f2f0594249..77abddce03d827af37d095db722156b353b29336 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -17,6 +17,7 @@ import numpy as np import os import multiprocessing import warnings +import struct from .framework import ( Variable, @@ -45,6 +46,27 @@ _PADDLE_DTYPE_2_NUMPY_DTYPE = { } +def copy_bits_from_float_to_uint16(f): + return struct.unpack('> 16 + + +def convert_float_to_uint16(data, data_format="NCHW"): + if data.size == 0: + return data.view(np.uint16) + + if data_format == "NHWC": + data = np.transpose(data, [0, 3, 1, 2]) + + new_data = [] + for x in np.nditer(data): + new_data.append(np.uint16(copy_bits_from_float_to_uint16(x))) + new_data = np.reshape(new_data, data.shape).view(np.uint16) + + if data_format == "NHWC": + new_data = np.transpose(new_output, [0, 2, 3, 1]) + return new_data + + def convert_dtype(dtype): if isinstance(dtype, core.VarDesc.VarType): if dtype in _PADDLE_DTYPE_2_NUMPY_DTYPE: @@ -86,7 +108,9 @@ def convert_dtype(dtype): # type, so it will not be handled by the previous branch. We need # to convert it to str here. return str(dtype) - # NOTE(zhangbo): Now numpy does not support bfloat, and paddle use uint16 to represent bfloat16, and there binaries are consistent. + # NOTE(zhangbo): Now numpy does not support bfloat, so use numpy.uint16 to represent paddle.bfloat16, there binaries are consistent. + # If cast ndarray to uint16 and trans to tensor, should not ndarray.astype('uint16') directly + # should use function 'convert_float_to_uint16' above, otherwise bits is wrong if dtype in ['bfloat16']: return 'uint16' diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 24920eb375ce1154355d65e119b8b97ade90e0ca..d31ccaf86dd311f342a6791600345841ca98cddf 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -246,6 +246,26 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(x.numpy(), numpy_array) self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) + # test dtype bfloat16 + x = paddle.to_tensor(-1e6, dtype=paddle.bfloat16) + self.assertEqual(x.dtype, core.VarDesc.VarType.BF16) + self.assertTrue(x == -999424.0) + + x = paddle.to_tensor([-1e6, -1e6, -1e6], dtype='bfloat16') + self.assertEqual(x.dtype, core.VarDesc.VarType.BF16) + self.assertTrue(x[0] == -999424.0) + self.assertTrue(x[1] == -999424.0) + self.assertTrue(x[2] == -999424.0) + + x = paddle.to_tensor( + -1e6, dtype=paddle.bfloat16, stop_gradient=False + ) + self.assertEqual(x.dtype, core.VarDesc.VarType.BF16) + self.assertTrue(x == -999424.0) + y = x * x + y.backward() + self.assertTrue(x.grad == -999424.0 * 2) + with self.assertRaises(ValueError): paddle.randn([3, 2, 2]).item() with self.assertRaises(ValueError): diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 3248aa30103f7d1ffae9eb586b1e825f91280c0e..d9b22ac045f8da61c21803c4f92c819fc82f9cef 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -28,6 +28,7 @@ from ..fluid.data_feeder import ( check_type, check_variable_and_dtype, convert_dtype, + convert_float_to_uint16, ) from ..fluid.framework import ( Variable, @@ -613,7 +614,11 @@ def _to_tensor_non_static(data, dtype=None, place=None, stop_gradient=True): data = data.astype(default_type) if dtype and convert_dtype(dtype) != data.dtype: - data = data.astype(convert_dtype(dtype)) + if convert_dtype(dtype) in ['uint16']: + # should not ndarray.astype('uint16') directly, data bits is wrong + data = convert_float_to_uint16(data.astype('float32')) + else: + data = data.astype(convert_dtype(dtype)) if _in_eager_without_dygraph_check() and isinstance(data, np.ndarray): return core.eager.Tensor(