未验证 提交 6a09b8f1 编写于 作者: Z zhupengyang 提交者: GitHub

erase Raises and refine doce of random functions (#26901)

上级 559d9f2b
......@@ -132,6 +132,28 @@ def check_dtype(input_dtype,
extra_message))
def check_shape(shape,
op_name,
expected_shape_type=(list, tuple, Variable),
expected_element_type=(int, Variable),
expected_tensor_dtype=('int32', 'int64')):
# See NOTE [ Why skip dynamic graph check ]
if in_dygraph_mode():
return
check_type(shape, 'shape', expected_shape_type, op_name)
if expected_element_type is not None and not isinstance(shape, Variable):
for item in shape:
check_type(item, 'element of shape', expected_element_type, op_name)
if expected_tensor_dtype is not None and isinstance(item, Variable):
check_dtype(
item.dtype, 'element of shape', expected_tensor_dtype,
op_name,
'If element of shape is Tensor, its data type should be {}'.
format(', '.join(expected_tensor_dtype)))
if expected_tensor_dtype is not None and isinstance(shape, Variable):
check_dtype(shape.dtype, 'shape', expected_tensor_dtype, op_name)
class DataToLoDTensorConverter(object):
def __init__(self, place, lod_level, shape, dtype):
self.place = place
......
......@@ -10610,7 +10610,7 @@ def gaussian_random(shape,
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils._convert_shape_to_list(shape)
shape = utils.convert_shape_to_list(shape)
return core.ops.gaussian_random('shape', shape, 'mean',
float(mean), 'std',
float(std), 'seed', seed, 'dtype',
......@@ -10627,7 +10627,7 @@ def gaussian_random(shape,
'dtype': dtype,
'use_mkldnn': False
}
utils._get_shape_tensor_inputs(
utils.get_shape_tensor_inputs(
inputs=inputs,
attrs=attrs,
shape=shape,
......@@ -15116,7 +15116,7 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils._convert_shape_to_list(shape)
shape = utils.convert_shape_to_list(shape)
return core.ops.uniform_random('shape', shape, 'min',
float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype)
......@@ -15126,7 +15126,7 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
utils._get_shape_tensor_inputs(
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand')
helper = LayerHelper("uniform_random", **locals())
......
......@@ -694,7 +694,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
attrs['str_value'] = str(float(value))
if in_dygraph_mode():
shape = utils._convert_shape_to_list(shape)
shape = utils.convert_shape_to_list(shape)
if out is None:
out = _varbase_creator(dtype=dtype)
......@@ -731,7 +731,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
'fill_constant')
helper = LayerHelper("fill_constant", **locals())
utils._get_shape_tensor_inputs(
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant')
if out is None:
......
......@@ -282,7 +282,7 @@ def _contain_var(list_or_tuple):
return False
def _get_shape_tensor_inputs(inputs, attrs, shape, op_type):
def get_shape_tensor_inputs(inputs, attrs, shape, op_type):
from .tensor import fill_constant, cast
def _get_attr_shape(list_shape):
......@@ -347,7 +347,7 @@ def _convert_to_tensor_list(old_list, dtype="int32"):
return new_list_tensor
def _convert_shape_to_list(shape):
def convert_shape_to_list(shape):
"""
Convert shape(list, tuple, variable) to list in imperative mode
"""
......
......@@ -241,18 +241,18 @@ class TestGaussianRandomAPI(unittest.TestCase):
def test_default_fp_16():
paddle.framework.set_default_dtype('float16')
paddle.tensor.random.gaussian_random([2, 3])
paddle.tensor.random.gaussian([2, 3])
self.assertRaises(TypeError, test_default_fp_16)
def test_default_fp_32():
paddle.framework.set_default_dtype('float32')
out = paddle.tensor.random.gaussian_random([2, 3])
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)
def test_default_fp_64():
paddle.framework.set_default_dtype('float64')
out = paddle.tensor.random.gaussian_random([2, 3])
out = paddle.tensor.random.gaussian([2, 3])
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_default_fp_64()
......
......@@ -58,6 +58,11 @@ class TestRandintOpError(unittest.TestCase):
self.assertRaises(TypeError, paddle.randint, 5, dtype='float32')
self.assertRaises(ValueError, paddle.randint, 5, 5)
self.assertRaises(ValueError, paddle.randint, -5)
self.assertRaises(TypeError, paddle.randint, 5, shape=['2'])
shape_tensor = paddle.static.data('X', [1])
self.assertRaises(TypeError, paddle.randint, 5, shape=shape_tensor)
self.assertRaises(
TypeError, paddle.randint, 5, shape=[shape_tensor])
class TestRandintOp_attr_tensorlist(OpTest):
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册