From b2385821cdb11732fd37fa3c905f10d5f2825334 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Mon, 20 Mar 2023 13:27:34 +0800 Subject: [PATCH] Add fp16 and bf16 to the checking dtype list of rand apis. (#51684) * Add fp16 and bf16 to the checking dtype list of rand apis. * Remove the checking of raising TypeError. --- .../unittests/test_gaussian_random_op.py | 16 +++++++++------ .../fluid/tests/unittests/test_rand_op.py | 10 ++++++---- .../tests/unittests/test_uniform_random_op.py | 8 +++++--- python/paddle/tensor/random.py | 20 +++++++++---------- 4 files changed, 31 insertions(+), 23 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 18b08620f96..44a1bf1e038 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -292,9 +292,8 @@ class TestGaussianRandomAPI(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.gaussian([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.gaussian([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -306,6 +305,9 @@ class TestGaussianRandomAPI(unittest.TestCase): out = paddle.tensor.random.gaussian([2, 3]) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() @@ -318,9 +320,8 @@ class TestStandardNormalDtype(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.standard_normal([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.standard_normal([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -332,6 +333,9 @@ class TestStandardNormalDtype(unittest.TestCase): out = paddle.tensor.random.standard_normal([2, 3]) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() diff --git a/python/paddle/fluid/tests/unittests/test_rand_op.py b/python/paddle/fluid/tests/unittests/test_rand_op.py index 9f59c3d3908..d259f2c9163 100644 --- a/python/paddle/fluid/tests/unittests/test_rand_op.py +++ b/python/paddle/fluid/tests/unittests/test_rand_op.py @@ -108,7 +108,7 @@ class TestRandOpForDygraph(unittest.TestCase): dim_2 = paddle.tensor.fill_constant([1], "int32", 5) rand(shape=[dim_1, dim_2]) - var_shape = fluid.dygraph.to_variable(np.array([3, 4])) + var_shape = paddle.to_tensor(np.array([3, 4])) rand(var_shape) def test_run(self): @@ -123,9 +123,8 @@ class TestRandDtype(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.rand([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.rand([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -137,6 +136,9 @@ class TestRandDtype(unittest.TestCase): out = paddle.tensor.random.rand([2, 3]) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 5ecb6d4b2c6..3a03b7c0dce 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -570,9 +570,8 @@ class TestUniformDtype(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.uniform([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.uniform([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -592,6 +591,9 @@ class TestUniformDtype(unittest.TestCase): out = paddle.uniform([2, 3], dtype=paddle.float16) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() test_dygraph_fp16() diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 8970abf4be3..e3fc96f3854 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -322,13 +322,14 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): distribution, with ``shape`` and ``dtype``. """ op_type_for_check = 'gaussian/standard_normal/randn/normal' + supported_dtypes = ['float32', 'float64', 'float16', 'uint16'] if dtype is None: dtype = paddle.framework.get_default_dtype() - if dtype not in ['float32', 'float64']: + if dtype not in supported_dtypes: raise TypeError( - "{} only supports [float32, float64], but the default dtype is {}".format( - op_type_for_check, dtype + "{} only supports {}, but the default dtype is {}".format( + op_type_for_check, supported_dtypes, dtype ) ) if not isinstance(dtype, core.VarDesc.VarType): @@ -342,7 +343,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ) else: check_shape(shape, op_type_for_check) - check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check) + check_dtype(dtype, 'dtype', supported_dtypes, op_type_for_check) inputs = {} attrs = { @@ -630,12 +631,13 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): # [[-0.8517412, -0.4006908, 0.2551912 ], # random # [ 0.3364414, 0.36278176, -0.16085452]] # random """ + supported_dtypes = ['float32', 'float64', 'float16', 'uint16'] if dtype is None: dtype = paddle.framework.get_default_dtype() - if dtype not in ['float32', 'float64']: + if dtype not in supported_dtypes: raise TypeError( - "uniform/rand only supports [float32, float64], but the default dtype is {}".format( - dtype + "uniform/rand only supports {}, but the default dtype is {}".format( + supported_dtypes, dtype ) ) @@ -654,9 +656,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ) else: check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand') - check_dtype( - dtype, 'dtype', ('float16', 'float32', 'float64'), 'uniform/rand' - ) + check_dtype(dtype, 'dtype', supported_dtypes, 'uniform/rand') check_type(min, 'min', (float, int, Variable), 'uniform/rand') check_type(max, 'max', (float, int, Variable), 'uniform/rand') -- GitLab