diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 18b08620f963886e91046773b1838707f282c310..44a1bf1e038e478d9558acbe45bea18707e67adf 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -292,9 +292,8 @@ class TestGaussianRandomAPI(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.gaussian([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.gaussian([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -306,6 +305,9 @@ class TestGaussianRandomAPI(unittest.TestCase): out = paddle.tensor.random.gaussian([2, 3]) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() @@ -318,9 +320,8 @@ class TestStandardNormalDtype(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.standard_normal([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.standard_normal([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -332,6 +333,9 @@ class TestStandardNormalDtype(unittest.TestCase): out = paddle.tensor.random.standard_normal([2, 3]) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() diff --git a/python/paddle/fluid/tests/unittests/test_rand_op.py b/python/paddle/fluid/tests/unittests/test_rand_op.py index 9f59c3d39085d64650a6307bc63ad9479182b5a9..d259f2c91630b8baf4eaec7d21f3e1c72a38c405 100644 --- a/python/paddle/fluid/tests/unittests/test_rand_op.py +++ b/python/paddle/fluid/tests/unittests/test_rand_op.py @@ -108,7 +108,7 @@ class TestRandOpForDygraph(unittest.TestCase): dim_2 = paddle.tensor.fill_constant([1], "int32", 5) rand(shape=[dim_1, dim_2]) - var_shape = fluid.dygraph.to_variable(np.array([3, 4])) + var_shape = paddle.to_tensor(np.array([3, 4])) rand(var_shape) def test_run(self): @@ -123,9 +123,8 @@ class TestRandDtype(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.rand([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.rand([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -137,6 +136,9 @@ class TestRandDtype(unittest.TestCase): out = paddle.tensor.random.rand([2, 3]) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 5ecb6d4b2c6c9bebf4fc45f04e58ae9a08704006..3a03b7c0dce4870fd907de481bc3d2ead78fe300 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -570,9 +570,8 @@ class TestUniformDtype(unittest.TestCase): def test_default_fp16(): paddle.framework.set_default_dtype('float16') - paddle.tensor.random.uniform([2, 3]) - - self.assertRaises(TypeError, test_default_fp16) + out = paddle.tensor.random.uniform([2, 3]) + self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') @@ -592,6 +591,9 @@ class TestUniformDtype(unittest.TestCase): out = paddle.uniform([2, 3], dtype=paddle.float16) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + if paddle.is_compiled_with_cuda(): + paddle.set_device('gpu') + test_default_fp16() test_default_fp64() test_default_fp32() test_dygraph_fp16() diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 8970abf4be3e7b0612a97d12b938709b409d9042..e3fc96f3854002c3abd7575c0f11250dfd96c91f 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -322,13 +322,14 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): distribution, with ``shape`` and ``dtype``. """ op_type_for_check = 'gaussian/standard_normal/randn/normal' + supported_dtypes = ['float32', 'float64', 'float16', 'uint16'] if dtype is None: dtype = paddle.framework.get_default_dtype() - if dtype not in ['float32', 'float64']: + if dtype not in supported_dtypes: raise TypeError( - "{} only supports [float32, float64], but the default dtype is {}".format( - op_type_for_check, dtype + "{} only supports {}, but the default dtype is {}".format( + op_type_for_check, supported_dtypes, dtype ) ) if not isinstance(dtype, core.VarDesc.VarType): @@ -342,7 +343,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ) else: check_shape(shape, op_type_for_check) - check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check) + check_dtype(dtype, 'dtype', supported_dtypes, op_type_for_check) inputs = {} attrs = { @@ -630,12 +631,13 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): # [[-0.8517412, -0.4006908, 0.2551912 ], # random # [ 0.3364414, 0.36278176, -0.16085452]] # random """ + supported_dtypes = ['float32', 'float64', 'float16', 'uint16'] if dtype is None: dtype = paddle.framework.get_default_dtype() - if dtype not in ['float32', 'float64']: + if dtype not in supported_dtypes: raise TypeError( - "uniform/rand only supports [float32, float64], but the default dtype is {}".format( - dtype + "uniform/rand only supports {}, but the default dtype is {}".format( + supported_dtypes, dtype ) ) @@ -654,9 +656,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ) else: check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand') - check_dtype( - dtype, 'dtype', ('float16', 'float32', 'float64'), 'uniform/rand' - ) + check_dtype(dtype, 'dtype', supported_dtypes, 'uniform/rand') check_type(min, 'min', (float, int, Variable), 'uniform/rand') check_type(max, 'max', (float, int, Variable), 'uniform/rand')