提交 30dda5a7 编写于 作者: L liym27 提交者: Aurelius84

[cherry-pick] add fill_constant input(shape) dtype check. (#20620) (#20623)

test=release/1.6
上级 1822f86e
...@@ -326,7 +326,7 @@ paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None ...@@ -326,7 +326,7 @@ paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None
paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '191164436efbc1b7bccc4190a88e7de2')) paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '191164436efbc1b7bccc4190a88e7de2'))
paddle.fluid.layers.assign (ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', '98ce6e7c3659b8377c04cecfc72c2000')) paddle.fluid.layers.assign (ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', '98ce6e7c3659b8377c04cecfc72c2000'))
paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx', 'force_cpu'], varargs=None, keywords=None, defaults=(0, 0, False)), ('document', '2bb57637664173fee5f654e55896aec6')) paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx', 'force_cpu'], varargs=None, keywords=None, defaults=(0, 0, False)), ('document', '2bb57637664173fee5f654e55896aec6'))
paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', '66e1e468666dd47e5b2715226cebeac0')) paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'fa349c956a3e44297caf07a376eedaad'))
paddle.fluid.layers.argmin (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '53629e27597e5dfb7020aac5bc639ebb')) paddle.fluid.layers.argmin (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '53629e27597e5dfb7020aac5bc639ebb'))
paddle.fluid.layers.argmax (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', 'd9a89fbedbaebd5f65897ac75ee636f3')) paddle.fluid.layers.argmax (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', 'd9a89fbedbaebd5f65897ac75ee636f3'))
paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '8c7966eb4b37b2272a16717cac3a876c')) paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '8c7966eb4b37b2272a16717cac3a876c'))
......
...@@ -524,7 +524,10 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -524,7 +524,10 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
The attribute `stop_gradient` of the created Tensor is setted to True. The attribute `stop_gradient` of the created Tensor is setted to True.
Args: Args:
shape(tuple|list): Shape of the Tensor to be created. shape(list|tuple|Variable): Shape of the Tensor to be created.
The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
be float16, float32, float64, int32, int64. be float16, float32, float64, int32, int64.
value(float): The constant value used to initialize the Tensor to be created. value(float): The constant value used to initialize the Tensor to be created.
...@@ -544,9 +547,18 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -544,9 +547,18 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') #data1=[[0],[0]] # attr shape is a list which doesn't contain Variable Tensor.
data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1) data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
#data1=[[5], [5]] data2=[[5], [5]] data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
# data1=[[0], [0]] data2=[[5], [5]]
# attr shape is a list which contains Variable Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
""" """
helper = LayerHelper("fill_constant", **locals()) helper = LayerHelper("fill_constant", **locals())
if convert_dtype(dtype) not in [ if convert_dtype(dtype) not in [
...@@ -585,9 +597,17 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -585,9 +597,17 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
def _get_shape_tensor(list_shape): def _get_shape_tensor(list_shape):
new_shape_tensor = [] new_shape_tensor = []
for dim in list_shape: for idx, dim in enumerate(list_shape):
if isinstance(dim, Variable): if isinstance(dim, Variable):
dim.stop_gradient = True dim.stop_gradient = True
if convert_dtype(dim.dtype) not in ['int32', 'int64']:
raise TypeError(
"When type of 'shape' in fill_constant is list or tuple, "
"the data type of the element with type Variable must be int32 or int64, "
"but received the data type of shape[%d] is %s." %
(idx, convert_dtype(dim.dtype)))
if convert_dtype(dim.dtype) == 'int64':
dim = cast(x=dim, dtype='int32')
new_shape_tensor.append(dim) new_shape_tensor.append(dim)
else: else:
temp_out = helper.create_variable_for_type_inference('int32') temp_out = helper.create_variable_for_type_inference('int32')
...@@ -597,6 +617,12 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -597,6 +617,12 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
if isinstance(shape, Variable): if isinstance(shape, Variable):
shape.stop_gradient = True shape.stop_gradient = True
if convert_dtype(shape.dtype) not in ['int32', 'int64']:
raise TypeError(
"When type of 'shape' in fill_constant is Variable, the data type of 'shape' must be int32 or int64, "
"but received %s." % (convert_dtype(shape.dtype)))
if (convert_dtype(shape.dtype) == 'int64'):
shape = cast(shape, 'int32')
inputs["ShapeTensor"] = shape inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)): elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, ( assert len(shape) > 0, (
......
...@@ -189,33 +189,47 @@ class TestFillConstantOp1_ShapeTensor(OpTest): ...@@ -189,33 +189,47 @@ class TestFillConstantOp1_ShapeTensor(OpTest):
self.check_output() self.check_output()
# # Test python API # Test python API
class TestFillConstantAPI(OpTest): class TestFillConstantAPI(OpTest):
def test_api(self): def test_api(self):
positive_2 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2)
shape_tensor = fluid.layers.data(
name="shape_tensor", positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2)
shape=[2], shape_tensor_int32 = fluid.data(
append_batch_size=False, name="shape_tensor_int32", shape=[2], dtype="int32")
dtype="int32")
shape_tensor_int64 = fluid.data(
name="shape_tensor_int64", shape=[2], dtype="int64")
out_1 = fluid.layers.fill_constant( out_1 = fluid.layers.fill_constant(
shape=[1, 2], dtype="float32", value=1.1) shape=[1, 2], dtype="float32", value=1.1)
out_2 = fluid.layers.fill_constant( out_2 = fluid.layers.fill_constant(
shape=[1, positive_2], dtype="float32", value=1.1) shape=[1, positive_2_int32], dtype="float32", value=1.1)
out_3 = fluid.layers.fill_constant( out_3 = fluid.layers.fill_constant(
shape=shape_tensor, dtype="float32", value=1.1) shape=[1, positive_2_int64], dtype="float32", value=1.1)
out_4 = fluid.layers.fill_constant(
shape=shape_tensor_int32, dtype="float32", value=1.1)
out_5 = fluid.layers.fill_constant(
shape=shape_tensor_int64, dtype="float32", value=1.1)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3 = exe.run( res_1, res_2, res_3, res_4, res_5 = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed={"shape_tensor": np.array([1, 2]).astype("int32")}, feed={
fetch_list=[out_1, out_2, out_3]) "shape_tensor_int32": np.array([1, 2]).astype("int32"),
"shape_tensor_int64": np.array([1, 2]).astype("int64"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5])
assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_4, np.full([1, 2], 1.1, dtype="float32"))
assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32"))
class TestFillConstantOpError(OpTest): class TestFillConstantOpError(OpTest):
...@@ -236,7 +250,8 @@ class TestFillConstantOpError(OpTest): ...@@ -236,7 +250,8 @@ class TestFillConstantOpError(OpTest):
value=5, value=5,
dtype='int16', dtype='int16',
out=x1) out=x1)
# The input dtype of fill_constant must be one of bool, float16,
# The argument dtype of fill_constant_op must be one of bool, float16,
#float32, float64, int32 or int64 #float32, float64, int32 or int64
x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32") x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32")
...@@ -254,17 +269,35 @@ class TestFillConstantOpError(OpTest): ...@@ -254,17 +269,35 @@ class TestFillConstantOpError(OpTest):
dtype='float64', dtype='float64',
out=x2) out=x2)
# test Error of Shape # The argument shape's type of fill_constant_op must be list, tuple or Variable.
def test_shape_type(): def test_shape_type():
fluid.layers.fill_constant(shape=1, dtype="float32", value=1) fluid.layers.fill_constant(shape=1, dtype="float32", value=1)
self.assertRaises(TypeError, test_shape_type) self.assertRaises(TypeError, test_shape_type)
# The argument shape's size of fill_constant_op must not be 0.
def test_shape_size(): def test_shape_size():
fluid.layers.fill_constant(shape=[], dtype="float32", value=1) fluid.layers.fill_constant(shape=[], dtype="float32", value=1)
self.assertRaises(AssertionError, test_shape_size) self.assertRaises(AssertionError, test_shape_size)
# The shape dtype of fill_constant_op must be int32 or int64.
def test_shape_tensor_dtype():
shape = fluid.data(
name="shape_tensor", shape=[2], dtype="float32")
fluid.layers.fill_constant(
shape=shape, dtype="float32", value=1)
self.assertRaises(TypeError, test_shape_tensor_dtype)
def test_shape_tensor_list_dtype():
shape = fluid.data(
name="shape_tensor_list", shape=[1], dtype="bool")
fluid.layers.fill_constant(
shape=[shape, 2], dtype="float32", value=1)
self.assertRaises(TypeError, test_shape_tensor_list_dtype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册