diff --git a/paddle/fluid/operators/cum_op.h b/paddle/fluid/operators/cum_op.h index 3e975420e3ef1d7dd6e442ed093da902e9d88251..e336e25f0f457d600d96c2059762b66e985a65c7 100644 --- a/paddle/fluid/operators/cum_op.h +++ b/paddle/fluid/operators/cum_op.h @@ -42,7 +42,9 @@ class CumKernel : public framework::OpKernel { } PADDLE_ENFORCE_LT( axis, x_dims.size(), - "axis should be less than the dimensiotn of the input tensor"); + platform::errors::InvalidArgument("axis(%d) should be less than the " + "dimension(%d) of the input tensor.", + axis, x_dims.size())); Out.template mutable_data(context.GetPlace()); int pre = 1; diff --git a/paddle/fluid/operators/fsp_op.cc b/paddle/fluid/operators/fsp_op.cc index 38d190b2f21f95a8d38b346575737df0d321b06d..d1aedf41e9a2be3b2970f4ff2032c7e2e3ce3a42 100644 --- a/paddle/fluid/operators/fsp_op.cc +++ b/paddle/fluid/operators/fsp_op.cc @@ -23,23 +23,35 @@ class FSPOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FSPOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of FSPOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of FSPOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "fsp_op"); + OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "fsp_op"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "fsp_op"); auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE( - x_dims.size() == 4, - "The Input(X) must have shape [batch_size, channel, height, width]."); - PADDLE_ENFORCE( - y_dims.size() == 4, - "The Input(Y) must have shape [batch_size, channel, height, width]."); - PADDLE_ENFORCE( - (x_dims[2] == y_dims[2]) && (x_dims[3] == y_dims[3]), - "The Input(X) and Input(Y) should have the same height and width."); + PADDLE_ENFORCE_EQ( + x_dims.size(), 4UL, + platform::errors::InvalidArgument( + "The Input(X) must have shape [batch_size, channel, height, width]." + "Now the dimension of 'X' is %d.", + x_dims.size())); + PADDLE_ENFORCE_EQ( + y_dims.size(), 4UL, + platform::errors::InvalidArgument( + "The Input(Y) must have shape [batch_size, channel, height, width]." + "Now the dimension of 'Y' is %d.", + y_dims.size())); + PADDLE_ENFORCE_EQ( + x_dims[2], y_dims[2], + platform::errors::InvalidArgument( + "The Input(X)(%d) and Input(Y)(%d) should have the same height.", + x_dims[2], y_dims[2])); + PADDLE_ENFORCE_EQ( + x_dims[3], y_dims[3], + platform::errors::InvalidArgument( + "The Input(X)(%d) and Input(Y)(%d) should have the same width.", + x_dims[3], y_dims[3])); ctx->SetOutputDim("Out", {x_dims[0], x_dims[1], y_dims[1]}); ctx->ShareLoD("X", "Out"); diff --git a/paddle/fluid/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc index b359d98e2775a67c1046f47f42d6077a21e9a23e..2001d668654ceda65a5c435adbb918aff1f3f098 100644 --- a/paddle/fluid/operators/increment_op.cc +++ b/paddle/fluid/operators/increment_op.cc @@ -27,11 +27,13 @@ class IncrementOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of IncrementOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of IncrementOp should not be null."); - PADDLE_ENFORCE_EQ(1, framework::product(ctx->GetInputDim("X"))); + PADDLE_ENFORCE_EQ(framework::product(ctx->GetInputDim("X")), 1UL, + platform::errors::InvalidArgument( + "The number of elements in Input(X) should be 1." + "Now the number is %d.", + framework::product(ctx->GetInputDim("X")))); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "increment"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "increment"); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", "Out"); } diff --git a/paddle/fluid/operators/isfinite_op.cc b/paddle/fluid/operators/isfinite_op.cc index 8936a8c9a2a7da1af8df3f199091192ab18e65ce..af737ec42f631c534bb26ad38901e03d804d07b3 100644 --- a/paddle/fluid/operators/isfinite_op.cc +++ b/paddle/fluid/operators/isfinite_op.cc @@ -27,9 +27,8 @@ class OverflowOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of OverflowOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "isfinite"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "isfinite"); ctx->SetOutputDim("Out", {1}); } diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index fb54bfa4092a3360ca5362bbd172e45aa53ae01b..e80447b53c62924da53d373153603a6c9dd45116 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1235,6 +1235,8 @@ def increment(x, value=1.0, in_place=True): counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.] fluid.layers.increment(counter) # [1.] """ + check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], + 'increment') helper = LayerHelper("increment", **locals()) if not in_place: out = helper.create_variable_for_type_inference(dtype=x.dtype) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 056ef7d8de458d7aff5c33e434481a801b3ef0e6..1370aedbde0058167b21facee29269892ac6dea5 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13166,6 +13166,8 @@ def fsp_matrix(x, y): loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) """ + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix') + check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix') helper = LayerHelper('fsp_matrix', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype( input_param_name='x')) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 90f979a77f6e2535f8e5bad80c2bac8ae9d24543..d77ed157e722607f56e3462810e7594ec5fb4e1d 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -16,8 +16,8 @@ from __future__ import print_function import os from .layer_function_generator import generate_layer_fn, generate_activation_fn from .. import core -from ..framework import convert_np_dtype_to_dtype_ -from ..data_feeder import check_variable_and_dtype +from ..framework import convert_np_dtype_to_dtype_, Variable +from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype __activations_noattr__ = [ 'sigmoid', @@ -136,6 +136,7 @@ _cum_sum_ = generate_layer_fn('cumsum') def cumsum(x, axis=None, exclusive=None, reverse=None): + check_type(x, 'x', (Variable), 'cumsum') locals_var = locals().copy() kwargs = dict() for name, val in locals_var.items(): diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 5da24ac12b4fb225f63e3b567afdbb85256b2ac3..4be6c29a3076efc501d2c4dbec6c28b7e26c73c9 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -1137,7 +1137,7 @@ def has_inf(x): res = fluid.layers.has_inf(data) """ - # check_type(x, 'x', (Variable), 'has_inf') + check_type(x, 'x', (Variable), 'has_inf') helper = LayerHelper("isinf", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) @@ -1162,7 +1162,7 @@ def has_nan(x): res = fluid.layers.has_nan(data) """ - # check_type(x, 'x', (Variable), 'has_nan') + check_type(x, 'x', (Variable), 'has_nan') helper = LayerHelper("isnan", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 68bb8b17c5d85b52a924bc59f4c1fb4eea4aa776..dc023df4ff0782e8362accb046ace5f333bcee71 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -17,6 +17,9 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core +import paddle.fluid as fluid +from paddle.fluid import compiler, Program, program_guard class TestSumOp1(OpTest): @@ -125,5 +128,16 @@ class TestSumOp8(OpTest): self.check_grad(['X'], 'Out') +class BadInputTest(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + data = [1, 2, 3] + result = fluid.layers.cumsum(data, axis=0) + + self.assertRaises(TypeError, test_bad_x) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fsp_op.py b/python/paddle/fluid/tests/unittests/test_fsp_op.py index 3503c4ade4a5e7274011a7dcc8433672e4bd6aff..7864f4efcdf246eec4803c4fd283bdf3bcf1c6ba 100644 --- a/python/paddle/fluid/tests/unittests/test_fsp_op.py +++ b/python/paddle/fluid/tests/unittests/test_fsp_op.py @@ -15,6 +15,9 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core +import paddle.fluid as fluid +from paddle.fluid import compiler, Program, program_guard def fsp_matrix(a, b): @@ -56,5 +59,28 @@ class TestFSPOp(OpTest): self.check_grad(['X', 'Y'], 'Out') +class BadInputTest(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + data = fluid.layers.data(name='data', shape=[3, 32, 32]) + feature_map_0 = [1, 2, 3] + feature_map_1 = fluid.layers.conv2d( + data, num_filters=2, filter_size=3) + loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) + + self.assertRaises(TypeError, test_bad_x) + + def test_bad_y(): + data = fluid.layers.data(name='data', shape=[3, 32, 32]) + feature_map_0 = fluid.layers.conv2d( + data, num_filters=2, filter_size=3) + feature_map_1 = [1, 2, 3] + loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) + + self.assertRaises(TypeError, test_bad_y) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index 9b682bb62ec5310f2dd3061179cc4d81669b1225..4c7add3f271a2ce8dfec9dea0bac8fad4dd7ca41 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest -import unittest +from paddle.fluid import compiler, Program, program_guard class TestInf(OpTest): @@ -116,5 +116,22 @@ class TestFP16Isfinite(TestIsfinite): self.dtype = np.float16 +class BadInputTest(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_has_inf_bad_x(): + data = [1, 2, 3] + result = fluid.layers.has_inf(data) + + self.assertRaises(TypeError, test_has_inf_bad_x) + + def test_has_nan_bad_x(): + data = [1, 2, 3] + result = fluid.layers.has_nan(data) + + self.assertRaises(TypeError, test_has_nan_bad_x) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index dab521290cc3a5396e5f077e6751d6745cd25758..207ff66a0f877598989e47a8632aa783b53bcc67 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -21,6 +21,7 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid.backward import append_backward import numpy +from paddle.fluid import compiler, Program, program_guard class TestWhileOp(unittest.TestCase): @@ -122,5 +123,16 @@ class TestWhileOp(unittest.TestCase): layers.While(cond=cond) +class BadInputTest(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + x = [1, 2, 3] + fluid.layers.increment(x) + + self.assertRaises(TypeError, test_bad_x) + + if __name__ == '__main__': unittest.main()