未验证 提交 a6ee3bcc 编写于 作者: P pangyoki 提交者: GitHub

change uniform_random to uniform and optimize function names in unittest for...

change uniform_random to uniform and optimize function names in unittest for random ops (#26951) (#27111)

* fix unittest format and extract common function

* change function name
上级 e8edfc7d
...@@ -239,24 +239,24 @@ class TestGaussianRandomAPI(unittest.TestCase): ...@@ -239,24 +239,24 @@ class TestGaussianRandomAPI(unittest.TestCase):
def test_default_dtype(self): def test_default_dtype(self):
paddle.disable_static() paddle.disable_static()
def test_default_fp_16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.gaussian([2, 3]) paddle.tensor.random.gaussian([2, 3])
self.assertRaises(TypeError, test_default_fp_16) self.assertRaises(TypeError, test_default_fp16)
def test_default_fp_32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.gaussian([2, 3]) out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp_64(): def test_default_fp64():
paddle.framework.set_default_dtype('float64') paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.gaussian([2, 3]) out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp_64() test_default_fp64()
test_default_fp_32() test_default_fp32()
paddle.enable_static() paddle.enable_static()
...@@ -265,24 +265,24 @@ class TestStandardNormalDtype(unittest.TestCase): ...@@ -265,24 +265,24 @@ class TestStandardNormalDtype(unittest.TestCase):
def test_default_dtype(self): def test_default_dtype(self):
paddle.disable_static() paddle.disable_static()
def test_default_fp_16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.standard_normal([2, 3]) paddle.tensor.random.standard_normal([2, 3])
self.assertRaises(TypeError, test_default_fp_16) self.assertRaises(TypeError, test_default_fp16)
def test_default_fp_32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.standard_normal([2, 3]) out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp_64(): def test_default_fp64():
paddle.framework.set_default_dtype('float64') paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.standard_normal([2, 3]) out = paddle.tensor.random.standard_normal([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp_64() test_default_fp64()
test_default_fp_32() test_default_fp32()
paddle.enable_static() paddle.enable_static()
......
...@@ -120,24 +120,24 @@ class TestRandDtype(unittest.TestCase): ...@@ -120,24 +120,24 @@ class TestRandDtype(unittest.TestCase):
def test_default_dtype(self): def test_default_dtype(self):
paddle.disable_static() paddle.disable_static()
def test_default_fp_16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.rand([2, 3]) paddle.tensor.random.rand([2, 3])
self.assertRaises(TypeError, test_default_fp_16) self.assertRaises(TypeError, test_default_fp16)
def test_default_fp_32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.rand([2, 3]) out = paddle.tensor.random.rand([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp_64(): def test_default_fp64():
paddle.framework.set_default_dtype('float64') paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.rand([2, 3]) out = paddle.tensor.random.rand([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp_64() test_default_fp64()
test_default_fp_32() test_default_fp32()
paddle.enable_static() paddle.enable_static()
......
...@@ -540,24 +540,24 @@ class TestUniformDtype(unittest.TestCase): ...@@ -540,24 +540,24 @@ class TestUniformDtype(unittest.TestCase):
def test_default_dtype(self): def test_default_dtype(self):
paddle.disable_static() paddle.disable_static()
def test_default_fp_16(): def test_default_fp16():
paddle.framework.set_default_dtype('float16') paddle.framework.set_default_dtype('float16')
paddle.tensor.random.uniform([2, 3]) paddle.tensor.random.uniform([2, 3])
self.assertRaises(TypeError, test_default_fp_16) self.assertRaises(TypeError, test_default_fp16)
def test_default_fp_32(): def test_default_fp32():
paddle.framework.set_default_dtype('float32') paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.uniform([2, 3]) out = paddle.tensor.random.uniform([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp_64(): def test_default_fp64():
paddle.framework.set_default_dtype('float64') paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.uniform([2, 3]) out = paddle.tensor.random.uniform([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp_64() test_default_fp64()
test_default_fp_32() test_default_fp32()
paddle.enable_static() paddle.enable_static()
......
...@@ -388,8 +388,8 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -388,8 +388,8 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
dtype = paddle.framework.get_default_dtype() dtype = paddle.framework.get_default_dtype()
if dtype not in ['float32', 'float64']: if dtype not in ['float32', 'float64']:
raise TypeError( raise TypeError(
"uniform only supports [float32, float64], but the default dtype is %s" "uniform/rand only supports [float32, float64], but the default dtype is {}".
% dtype) format(dtype))
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
...@@ -400,15 +400,15 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -400,15 +400,15 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
float(min), 'max', float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype) float(max), 'seed', seed, 'dtype', dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand') check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform_random/rand') check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform/rand')
inputs = dict() inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype} attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
utils.get_shape_tensor_inputs( utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand') inputs=inputs, attrs=attrs, shape=shape, op_type='uniform/rand')
helper = LayerHelper("uniform_random", **locals()) helper = LayerHelper("uniform", **locals())
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs, type="uniform_random", inputs=inputs, attrs=attrs,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册