From ae4ddbc82135a690b02c5b34b776b6af7af5a3e9 Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Tue, 15 Oct 2019 14:40:37 +0800 Subject: [PATCH] add fill_constant input(shape) dtype check. test=develop (#20620) --- python/paddle/fluid/layers/tensor.py | 36 +++++++++-- .../tests/unittests/test_fill_constant_op.py | 61 ++++++++++++++----- 2 files changed, 78 insertions(+), 19 deletions(-) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 0641739243a..fe81da1ea2e 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -524,7 +524,10 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): The attribute `stop_gradient` of the created Tensor is setted to True. Args: - shape(tuple|list): Shape of the Tensor to be created. + shape(list|tuple|Variable): Shape of the Tensor to be created. + The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple, + the elements of it should be integers or Tensors with shape [1]. + If ``shape`` is an Variable, it should be an 1-D Tensor . dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can be float16, float32, float64, int32, int64. value(float): The constant value used to initialize the Tensor to be created. @@ -544,9 +547,18 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): .. code-block:: python import paddle.fluid as fluid - data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') #data1=[[0],[0]] - data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1) - #data1=[[5], [5]] data2=[[5], [5]] + # attr shape is a list which doesn't contain Variable Tensor. + data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]] + data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1) + # data1=[[0], [0]] data2=[[5], [5]] + + # attr shape is a list which contains Variable Tensor. + positive_2 = fluid.layers.fill_constant([1], "int32", 2) + data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5] + + # attr shape is an Variable Tensor. + shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2] + data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]] """ helper = LayerHelper("fill_constant", **locals()) if convert_dtype(dtype) not in [ @@ -585,9 +597,17 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): def _get_shape_tensor(list_shape): new_shape_tensor = [] - for dim in list_shape: + for idx, dim in enumerate(list_shape): if isinstance(dim, Variable): dim.stop_gradient = True + if convert_dtype(dim.dtype) not in ['int32', 'int64']: + raise TypeError( + "When type of 'shape' in fill_constant is list or tuple, " + "the data type of the element with type Variable must be int32 or int64, " + "but received the data type of shape[%d] is %s." % + (idx, convert_dtype(dim.dtype))) + if convert_dtype(dim.dtype) == 'int64': + dim = cast(x=dim, dtype='int32') new_shape_tensor.append(dim) else: temp_out = helper.create_variable_for_type_inference('int32') @@ -597,6 +617,12 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): if isinstance(shape, Variable): shape.stop_gradient = True + if convert_dtype(shape.dtype) not in ['int32', 'int64']: + raise TypeError( + "When type of 'shape' in fill_constant is Variable, the data type of 'shape' must be int32 or int64, " + "but received %s." % (convert_dtype(shape.dtype))) + if (convert_dtype(shape.dtype) == 'int64'): + shape = cast(shape, 'int32') inputs["ShapeTensor"] = shape elif isinstance(shape, (list, tuple)): assert len(shape) > 0, ( diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index f6e5c2166bc..d6bcd6999ff 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -189,33 +189,47 @@ class TestFillConstantOp1_ShapeTensor(OpTest): self.check_output() -# # Test python API +# Test python API class TestFillConstantAPI(OpTest): def test_api(self): - positive_2 = fluid.layers.fill_constant([1], "int32", 2) - shape_tensor = fluid.layers.data( - name="shape_tensor", - shape=[2], - append_batch_size=False, - dtype="int32") + positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) + + positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) + shape_tensor_int32 = fluid.data( + name="shape_tensor_int32", shape=[2], dtype="int32") + + shape_tensor_int64 = fluid.data( + name="shape_tensor_int64", shape=[2], dtype="int64") out_1 = fluid.layers.fill_constant( shape=[1, 2], dtype="float32", value=1.1) + out_2 = fluid.layers.fill_constant( - shape=[1, positive_2], dtype="float32", value=1.1) + shape=[1, positive_2_int32], dtype="float32", value=1.1) out_3 = fluid.layers.fill_constant( - shape=shape_tensor, dtype="float32", value=1.1) + shape=[1, positive_2_int64], dtype="float32", value=1.1) + + out_4 = fluid.layers.fill_constant( + shape=shape_tensor_int32, dtype="float32", value=1.1) + + out_5 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype="float32", value=1.1) exe = fluid.Executor(place=fluid.CPUPlace()) - res_1, res_2, res_3 = exe.run( + res_1, res_2, res_3, res_4, res_5 = exe.run( fluid.default_main_program(), - feed={"shape_tensor": np.array([1, 2]).astype("int32")}, - fetch_list=[out_1, out_2, out_3]) + feed={ + "shape_tensor_int32": np.array([1, 2]).astype("int32"), + "shape_tensor_int64": np.array([1, 2]).astype("int64"), + }, + fetch_list=[out_1, out_2, out_3, out_4, out_5]) assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_3, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(res_4, np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal(res_5, np.full([1, 2], 1.1, dtype="float32")) class TestFillConstantOpError(OpTest): @@ -236,7 +250,8 @@ class TestFillConstantOpError(OpTest): value=5, dtype='int16', out=x1) - # The input dtype of fill_constant must be one of bool, float16, + + # The argument dtype of fill_constant_op must be one of bool, float16, #float32, float64, int32 or int64 x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32") @@ -254,17 +269,35 @@ class TestFillConstantOpError(OpTest): dtype='float64', out=x2) - # test Error of Shape + # The argument shape's type of fill_constant_op must be list, tuple or Variable. def test_shape_type(): fluid.layers.fill_constant(shape=1, dtype="float32", value=1) self.assertRaises(TypeError, test_shape_type) + # The argument shape's size of fill_constant_op must not be 0. def test_shape_size(): fluid.layers.fill_constant(shape=[], dtype="float32", value=1) self.assertRaises(AssertionError, test_shape_size) + # The shape dtype of fill_constant_op must be int32 or int64. + def test_shape_tensor_dtype(): + shape = fluid.data( + name="shape_tensor", shape=[2], dtype="float32") + fluid.layers.fill_constant( + shape=shape, dtype="float32", value=1) + + self.assertRaises(TypeError, test_shape_tensor_dtype) + + def test_shape_tensor_list_dtype(): + shape = fluid.data( + name="shape_tensor_list", shape=[1], dtype="bool") + fluid.layers.fill_constant( + shape=[shape, 2], dtype="float32", value=1) + + self.assertRaises(TypeError, test_shape_tensor_list_dtype) + if __name__ == "__main__": unittest.main() -- GitLab