未验证 提交 b2385821 编写于 作者: Y Yiqun Liu 提交者: GitHub

Add fp16 and bf16 to the checking dtype list of rand apis. (#51684)

* Add fp16 and bf16 to the checking dtype list of rand apis.

* Remove the checking of raising TypeError.
上级 7a79fd88
...@@ -292,9 +292,8 @@ class TestGaussianRandomAPI(unittest.TestCase): ...@@ -292,9 +292,8 @@ class TestGaussianRandomAPI(unittest.TestCase):
def test_default_fp16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.gaussian([2, 3]) out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16)
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
...@@ -306,6 +305,9 @@ class TestGaussianRandomAPI(unittest.TestCase): ...@@ -306,6 +305,9 @@ class TestGaussianRandomAPI(unittest.TestCase):
out = paddle.tensor.random.gaussian([2, 3]) out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
if paddle.is_compiled_with_cuda():
paddle.set_device('gpu')
test_default_fp16()
test_default_fp64() test_default_fp64()
test_default_fp32() test_default_fp32()
...@@ -318,9 +320,8 @@ class TestStandardNormalDtype(unittest.TestCase): ...@@ -318,9 +320,8 @@ class TestStandardNormalDtype(unittest.TestCase):
def test_default_fp16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.standard_normal([2, 3]) out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16)
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
...@@ -332,6 +333,9 @@ class TestStandardNormalDtype(unittest.TestCase): ...@@ -332,6 +333,9 @@ class TestStandardNormalDtype(unittest.TestCase):
out = paddle.tensor.random.standard_normal([2, 3]) out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
if paddle.is_compiled_with_cuda():
paddle.set_device('gpu')
test_default_fp16()
test_default_fp64() test_default_fp64()
test_default_fp32() test_default_fp32()
......
...@@ -108,7 +108,7 @@ class TestRandOpForDygraph(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestRandOpForDygraph(unittest.TestCase):
dim_2 = paddle.tensor.fill_constant([1], "int32", 5) dim_2 = paddle.tensor.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2]) rand(shape=[dim_1, dim_2])
var_shape = fluid.dygraph.to_variable(np.array([3, 4])) var_shape = paddle.to_tensor(np.array([3, 4]))
rand(var_shape) rand(var_shape)
def test_run(self): def test_run(self):
...@@ -123,9 +123,8 @@ class TestRandDtype(unittest.TestCase): ...@@ -123,9 +123,8 @@ class TestRandDtype(unittest.TestCase):
def test_default_fp16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.rand([2, 3]) out = paddle.tensor.random.rand([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16)
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
...@@ -137,6 +136,9 @@ class TestRandDtype(unittest.TestCase): ...@@ -137,6 +136,9 @@ class TestRandDtype(unittest.TestCase):
out = paddle.tensor.random.rand([2, 3]) out = paddle.tensor.random.rand([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
if paddle.is_compiled_with_cuda():
paddle.set_device('gpu')
test_default_fp16()
test_default_fp64() test_default_fp64()
test_default_fp32() test_default_fp32()
......
...@@ -570,9 +570,8 @@ class TestUniformDtype(unittest.TestCase): ...@@ -570,9 +570,8 @@ class TestUniformDtype(unittest.TestCase):
def test_default_fp16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.uniform([2, 3]) out = paddle.tensor.random.uniform([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16)
self.assertRaises(TypeError, test_default_fp16)
def test_default_fp32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
...@@ -592,6 +591,9 @@ class TestUniformDtype(unittest.TestCase): ...@@ -592,6 +591,9 @@ class TestUniformDtype(unittest.TestCase):
out = paddle.uniform([2, 3], dtype=paddle.float16) out = paddle.uniform([2, 3], dtype=paddle.float16)
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16)
if paddle.is_compiled_with_cuda():
paddle.set_device('gpu')
test_default_fp16()
test_default_fp64() test_default_fp64()
test_default_fp32() test_default_fp32()
test_dygraph_fp16() test_dygraph_fp16()
......
...@@ -322,13 +322,14 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ...@@ -322,13 +322,14 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
distribution, with ``shape`` and ``dtype``. distribution, with ``shape`` and ``dtype``.
""" """
op_type_for_check = 'gaussian/standard_normal/randn/normal' op_type_for_check = 'gaussian/standard_normal/randn/normal'
supported_dtypes = ['float32', 'float64', 'float16', 'uint16']
if dtype is None: if dtype is None:
dtype = paddle.framework.get_default_dtype() dtype = paddle.framework.get_default_dtype()
if dtype not in ['float32', 'float64']: if dtype not in supported_dtypes:
raise TypeError( raise TypeError(
"{} only supports [float32, float64], but the default dtype is {}".format( "{} only supports {}, but the default dtype is {}".format(
op_type_for_check, dtype op_type_for_check, supported_dtypes, dtype
) )
) )
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
...@@ -342,7 +343,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ...@@ -342,7 +343,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
) )
else: else:
check_shape(shape, op_type_for_check) check_shape(shape, op_type_for_check)
check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check) check_dtype(dtype, 'dtype', supported_dtypes, op_type_for_check)
inputs = {} inputs = {}
attrs = { attrs = {
...@@ -630,12 +631,13 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -630,12 +631,13 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
# [[-0.8517412, -0.4006908, 0.2551912 ], # random # [[-0.8517412, -0.4006908, 0.2551912 ], # random
# [ 0.3364414, 0.36278176, -0.16085452]] # random # [ 0.3364414, 0.36278176, -0.16085452]] # random
""" """
supported_dtypes = ['float32', 'float64', 'float16', 'uint16']
if dtype is None: if dtype is None:
dtype = paddle.framework.get_default_dtype() dtype = paddle.framework.get_default_dtype()
if dtype not in ['float32', 'float64']: if dtype not in supported_dtypes:
raise TypeError( raise TypeError(
"uniform/rand only supports [float32, float64], but the default dtype is {}".format( "uniform/rand only supports {}, but the default dtype is {}".format(
dtype supported_dtypes, dtype
) )
) )
...@@ -654,9 +656,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -654,9 +656,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
) )
else: else:
check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand') check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand')
check_dtype( check_dtype(dtype, 'dtype', supported_dtypes, 'uniform/rand')
dtype, 'dtype', ('float16', 'float32', 'float64'), 'uniform/rand'
)
check_type(min, 'min', (float, int, Variable), 'uniform/rand') check_type(min, 'min', (float, int, Variable), 'uniform/rand')
check_type(max, 'max', (float, int, Variable), 'uniform/rand') check_type(max, 'max', (float, int, Variable), 'uniform/rand')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册