未验证 提交 b76f3b27 编写于 作者: W wangchaochaohu 提交者: GitHub

add test for fill_value Tensor and refine the doc of full Op (#23524)

上级 62aff0a7
......@@ -68,14 +68,18 @@ class TestFullAPI(unittest.TestCase):
out_6 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1)
val = fluid.layers.fill_constant(shape=[1], dtype=np.float32, value=1.1)
out_7 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=val)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res_4, res_5, res_6 = exe.run(
res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
fluid.default_main_program(),
feed={
"shape_tensor_int32": np.array([1, 2]).astype("int32"),
"shape_tensor_int64": np.array([1, 2]).astype("int64"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6])
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7])
assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32"))
......@@ -83,6 +87,7 @@ class TestFullAPI(unittest.TestCase):
assert np.array_equal(res_4, np.full([1, 2], 1.2, dtype="float32"))
assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_6, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_7, np.full([1, 2], 1.1, dtype="float32"))
class TestFullOpError(unittest.TestCase):
......@@ -90,8 +95,16 @@ class TestFullOpError(unittest.TestCase):
with program_guard(Program(), Program()):
#for ci coverage
x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16")
x2 = np.random.randn(1, 2).astype('int32')
self.assertRaises(
ValueError, paddle.full, shape=[1], fill_value=5, dtype='uint4')
self.assertRaises(
TypeError,
paddle.full,
shape=[1],
fill_value=5,
dtype='int32',
out=x2)
self.assertRaises(
TypeError,
paddle.full,
......
......@@ -346,43 +346,57 @@ def full(shape,
stop_gradient=True,
name=None):
"""
This function return a Tensor with the `fill_value` which size is same as `shape`
This Op return a Tensor with the `fill_value` which size is same as `shape`
Args:
shape(list|tuple|Variable): Shape of the Tensor to be created.
The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
value(float): The constant value used to initialize the Tensor to be created.
fill_value(bool|float16|float32|float64|int32|int64|Variable): The constant value
used to initialize the Tensor to be created. If fill_value is an Variable, it must be an 1-D Tensor.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output tensor
which can be float16, float32, float64, int32, int64, if dytpe is `None`, the data
type of created tensor is `float32`
device(str, optional): This parameter specifies that the Tensor is created
on the GPU or CPU.
device(str, optional): On which device to run this Op. The :attr:`device` must be
None, 'cpu' or 'gpu'. If :attr:`device` is None, the device that the user set in
the paddle program will be chosen. Default value is None.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor which is created according to shape and dtype.
Raises:
TypeError: The `dtype` must be one of None, bool, float16, float32, float64, int32 and int64.
TypeError: The `out` must be a Variable.
TypeError: The `shape` must be one of Variable, list tuple.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
data1 = paddle.full(shape=[2,1], full_value=0, dtype='int64') # data1=[[0],[0]]
data2 = paddle.full(shape=[2,1], full_value=5, dtype='int64', device='gpu') # data2=[[5],[5]]
data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') # data1=[[0],[0]]
data2 = paddle.full(shape=[2,1], fill_value=5, dtype='int64', device='gpu') # data2=[[5],[5]]
# attr shape is a list which contains Variable Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = paddle.full(shape=[1, positive_2], dtype='float32', full_value=1.5) # data3=[1.5, 1.5]
data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = paddle.full(shape=shape, dtype='bool', full_value=True) # data4=[[True,True],[True,True]]
data4 = paddle.full(shape=shape, dtype='bool', fill_value=True) # data4=[[True,True],[True,True]]
# attr value is an Variable Tensor.
val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32') #data5=[[2.0],[2.0]]
"""
helper = LayerHelper("full", **locals())
......@@ -394,6 +408,8 @@ def full(shape,
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'full')
check_type(shape, 'shape', (Variable, list, tuple), 'full')
if out is not None:
check_type(shape, 'out', (Variable), 'full')
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册