diff --git a/paddle/fluid/operators/flatten_op.cc b/paddle/fluid/operators/flatten_op.cc index 51644c306333f61eb3cabb303c02b4c4c19cbddf..067e00f35bc4605ce004744a276a9588cb9e425d 100644 --- a/paddle/fluid/operators/flatten_op.cc +++ b/paddle/fluid/operators/flatten_op.cc @@ -29,17 +29,17 @@ class FlattenOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input (X) of Flatten op should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output (Output) of Flatten op should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Flatten"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Flatten"); const auto &axis = ctx->Attrs().Get("axis"); const auto &in_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE_GE(axis, 0, - "The axis should be greater than or equal to 0."); + platform::errors::InvalidArgument( + "The axis should be greater than or equal to 0.")); PADDLE_ENFORCE_LE( axis, in_dims.size(), - "The axis should be less than or equal to input tensor's rank."); + platform::errors::InvalidArgument( + "The axis should be less than or equal to input tensor's rank.")); const auto &out_dims = GetOutputShape(axis, in_dims); ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); @@ -161,17 +161,17 @@ class Flatten2Op : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input (X) of Flatten op should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output (Output) of Flatten op should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Flatten2"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Flatten2"); const auto &axis = ctx->Attrs().Get("axis"); const auto &in_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE_GE(axis, 0, - "The axis should be greater than or equal to 0."); + platform::errors::InvalidArgument( + "The axis should be greater than or equal to 0.")); PADDLE_ENFORCE_LE( axis, in_dims.size(), - "The axis should be less than or equal to input tensor's rank."); + platform::errors::InvalidArgument( + "The axis should be less than or equal to input tensor's rank")); const auto &out_dims = FlattenOp::GetOutputShape(axis, in_dims); ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); @@ -181,8 +181,7 @@ class Flatten2Op : public framework::OperatorWithKernel { ctx->ShareLoD("X", "Out"); } - PADDLE_ENFORCE_EQ(ctx->HasOutput("XShape"), true, - "Output (XShape) of Flatten op should not be null."); + OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Flatten2"); std::vector xshape_dims(in_dims.size() + 1); xshape_dims[0] = 0; for (int i = 0; i < in_dims.size(); ++i) { @@ -223,10 +222,10 @@ class Flatten2GradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE_EQ(context->HasInput("XShape"), true, - "Input(XShape) shouldn't be null."); - PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true, - "Input(Out@GRAD) shouldn't be null."); + OP_INOUT_CHECK(context->HasInput("XShape"), "Input", "XShape", + "Flatten2Grad"); + OP_INOUT_CHECK(context->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "Flatten2Grad"); auto xshape_dims = context->GetInputDim("XShape"); auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); context->SetOutputDim(framework::GradVarName("X"), x_dims); diff --git a/paddle/fluid/operators/hash_op.cc b/paddle/fluid/operators/hash_op.cc index 5ef91dcb66638d5786e9769802bfc3790ffc6079..b6017a6eafc03921883e9427732cf0c2c769bcab 100644 --- a/paddle/fluid/operators/hash_op.cc +++ b/paddle/fluid/operators/hash_op.cc @@ -26,14 +26,13 @@ class HashOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of HashOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of HashOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Hash"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Hash"); auto dims = ctx->GetInputDim("X"); PADDLE_ENFORCE_EQ(dims.size(), 2UL, - "The input of hash_op's dimensions must be 2"); + platform::errors::InvalidArgument( + "The input of hash_op's dimensions must be 2")); std::vector out_dims; int num_hash = ctx->Attrs().Get("num_hash"); HashOutputSize(dims, out_dims, num_hash); diff --git a/paddle/fluid/operators/is_empty_op.cc b/paddle/fluid/operators/is_empty_op.cc index 1f78675060ecb4335261f96ba0c36ba16aa1f052..2750367dc773925e998507db4690e39c15f985d0 100644 --- a/paddle/fluid/operators/is_empty_op.cc +++ b/paddle/fluid/operators/is_empty_op.cc @@ -25,10 +25,8 @@ class IsEmptyOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of IsEmptyOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of IsEmptyOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "IsEmpty"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "IsEmpty"); ctx->SetOutputDim("Out", {1}); } diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 8b10d3a438e1ec57f79ee6677dcac9f4872f44b1..a8efe99278b4a81cb88e9bb9ab852b23e97f0182 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -26,7 +26,7 @@ import numpy import warnings import six from functools import reduce, partial -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type +from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype from ... import compat as cpt from ..backward import _infer_var_data_type_shape_ @@ -3725,15 +3725,15 @@ def is_empty(x, cond=None): # fluid.layers.is_empty(x=input, cond=res) """ + check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], + 'is_empty') + check_type(cond, 'cond', (Variable, type(None)), 'is_empty') helper = LayerHelper("is_empty", **locals()) if cond is None: cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True - elif not isinstance(cond, Variable): - raise TypeError("cond takes a variable") - elif cond.dtype != 'bool': - raise TypeError("The data type of cond must be bool") - + else: + check_dtype(cond.dtype, 'cond', ['bool'], 'is_empty') helper.append_op( type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]}) return cond diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 133bcc5381d5970bf33c60e4835f701354d59a20..af5d7c80b1562b1ffee4d6e1678f22a14318ba56 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -9628,6 +9628,8 @@ def flatten(x, axis=1, name=None): out = fluid.layers.flatten(x=x, axis=2) # out shape is [16, 3] """ + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten') helper = LayerHelper('flatten', **locals()) if not (isinstance(x, Variable)): @@ -12466,6 +12468,9 @@ def hash(input, hash_size, num_hash=1, name=None): # [386] # [901]]] """ + check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash') + check_type(hash_size, 'hash_size', ['int32', 'int64'], 'hash') + check_type(num_hash, 'num_hash', ['int32', 'int64'], 'hash') helper = LayerHelper('hash', **locals()) out = helper.create_variable_for_type_inference( helper.input_dtype(), stop_gradient=True) diff --git a/python/paddle/fluid/tests/unittests/test_flatten2_op.py b/python/paddle/fluid/tests/unittests/test_flatten2_op.py index c37e115fdc6911799a1ce10c61b5ead9cf2096a5..189a63a0868459c839782dac13c9bf462959927b 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten2_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten2_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np - +import paddle.fluid as fluid from op_test import OpTest @@ -69,5 +69,25 @@ class TestFlattenOpSixDims(TestFlattenOp): self.new_shape = (36, 16) +class TestFlatten2OpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + input_data = np.random.random((3, 2, 4, 5)).astype("float64") + + def test_Variable(): + # the input type must be Variable + fluid.layers.flatten(input_data, axis=1) + + self.assertRaises(TypeError, test_Variable) + + def test_type(): + # dtype must be float32, float64, int8, int32, int64. + x2 = fluid.layers.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16') + fluid.layers.flatten(x2, axis=1) + + self.assertRaises(TypeError, test_type) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_hash_op.py b/python/paddle/fluid/tests/unittests/test_hash_op.py index 75af02bd5f46ea61f0bf4bc2494cb941fb1f64b4..3fe8bca2f192e959e2364912af657134ee123443 100644 --- a/python/paddle/fluid/tests/unittests/test_hash_op.py +++ b/python/paddle/fluid/tests/unittests/test_hash_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid class TestHashOp(OpTest): @@ -102,5 +103,41 @@ class TestHashOp3(TestHashOp): self.check_output() +class TestHashOpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + input_data = np.random.randint(0, 10, (8, 1)).astype("int32") + + def test_Variable(): + # the input type must be Variable + fluid.layers.hash(input=input_data, hash_size=2**32) + + self.assertRaises(TypeError, test_Variable) + + def test_type(): + # dtype must be int32, int64. + x2 = fluid.layers.data( + name='x2', shape=[1], dtype="float32", lod_level=1) + fluid.layers.hash(input=x2, hash_size=2**32) + + self.assertRaises(TypeError, test_type) + + def test_hash_size_type(): + # hash_size dtype must be int32, int64. + x3 = fluid.layers.data( + name='x3', shape=[1], dtype="int32", lod_level=1) + fluid.layers.hash(input=x3, hash_size=1024.5) + + self.assertRaises(TypeError, test_hash_size_type) + + def test_num_hash_type(): + # num_hash dtype must be int32, int64. + x4 = fluid.layers.data( + name='x4', shape=[1], dtype="int32", lod_level=1) + fluid.layers.hash(input=x4, hash_size=2**32, num_hash=2.5) + + self.assertRaises(TypeError, test_num_hash_type) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_is_empty_op.py b/python/paddle/fluid/tests/unittests/test_is_empty_op.py index 26d607718aec0bdffa00b9b4bca06ec6c0196217..8ded6fce5de3d4de97b4d6ac718e3a92aefb38d7 100644 --- a/python/paddle/fluid/tests/unittests/test_is_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_is_empty_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid class TestEmpty(OpTest): @@ -36,5 +37,42 @@ class TestNotEmpty(TestEmpty): self.outputs = {'Out': np.array([True])} +class TestIsEmptyOpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + input_data = np.random.random((3, 2)).astype("float64") + + def test_Variable(): + # the input type must be Variable + fluid.layers.is_empty(x=input_data) + + self.assertRaises(TypeError, test_Variable) + + def test_cond_Variable(): + # cond type must be Variable or None + x2 = fluid.layers.data(name="x2", shape=[3, 2], dtype="float32") + cond_data = np.random.random((3, 2)).astype("float32") + fluid.layers.is_empty(x=x2, cond=cond_data) + + self.assertRaises(TypeError, test_cond_Variable) + + def test_type(): + # dtype must be float32, float64, int32, int64 + x3 = fluid.layers.data( + name="x3", shape=[4, 32, 32], dtype="bool") + res = fluid.layers.is_empty(x=x3) + + self.assertRaises(TypeError, test_type) + + def test_cond_type(): + # cond dtype must be bool. + x4 = fluid.layers.data(name="x4", shape=[3, 2], dtype="float32") + cond = fluid.layers.data( + name="cond", shape=[1], dtype="float32") + fluid.layers.is_empty(x=x4, cond=cond) + + self.assertRaises(TypeError, test_cond_type) + + if __name__ == "__main__": unittest.main()