diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index c70d5b8bc7569c38cbc003aca7c62dc503df11cf..e99da6e3412ba994570c106694df077e8cd14160 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -50,16 +50,20 @@ class GaussianRandomOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of GaussianRandomOp should not be null."); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "GaussianRandom"); + auto shape = ctx->Attrs().Get>("shape"); std::vector temp; temp.reserve(shape.size()); for (auto dim : shape) { temp.push_back(static_cast(dim)); } - PADDLE_ENFORCE(shape.size() > 0UL, - "shape can be one int or array. shape must be set."); + PADDLE_ENFORCE_GT( + shape.size(), 0UL, + platform::errors::InvalidArgument( + "Attribute(shape) of GaussianRandomOp must be set " + "and shape.size() > 0, but reveived shape.size() is %d", + shape.size())); ctx->SetOutputDim("Out", framework::make_ddim(temp)); } diff --git a/paddle/fluid/operators/mean_iou_op.cc b/paddle/fluid/operators/mean_iou_op.cc index 4bf46edad61ab988801ccd59589881a939124ae5..ae720e609e1ea2821bc05d1de11d4031848891bd 100644 --- a/paddle/fluid/operators/mean_iou_op.cc +++ b/paddle/fluid/operators/mean_iou_op.cc @@ -22,16 +22,14 @@ class MeanIoUOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Predictions"), - "Input (Predictions) of MeanIoU op should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Labels"), - "Input (labels) of MeanIoU op should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutMeanIou"), - "Output (OutMeanIou) of MeanIoU op should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutWrong"), - "Output (OutWrong) of MeanIoU op should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutCorrect"), - "Output (OutWrong) of MeanIoU op should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Predictions"), "Input", "Predictions", + "MeanIoU"); + OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Labels", "MeanIoU"); + OP_INOUT_CHECK(ctx->HasOutput("OutMeanIou"), "Output", "OutMeanIou", + "MeanIoU"); + OP_INOUT_CHECK(ctx->HasOutput("OutWrong"), "Output", "OutWrong", "MeanIoU"); + OP_INOUT_CHECK(ctx->HasOutput("OutCorrect"), "Output", "OutCorrect", + "MeanIoU"); int64_t num_classes = static_cast(ctx->Attrs().Get("num_classes")); diff --git a/paddle/fluid/operators/metrics/accuracy_op.cc b/paddle/fluid/operators/metrics/accuracy_op.cc index 1178b2cc03a6bd4f2f1fcfa2d991f4f6b0b110cb..3692ace8bb5a46b06bd10a07a5d5d95d8825bdc6 100644 --- a/paddle/fluid/operators/metrics/accuracy_op.cc +++ b/paddle/fluid/operators/metrics/accuracy_op.cc @@ -22,18 +22,32 @@ class AccuracyOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Out"), - "Input (Out) of accuracy op should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Indices"), - "Input (Indices) of accuracy op should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), - "Input (Label) of accuracy op should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Accuracy"), - "Output (Accuracy) of AccuracyOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Correct"), - "Output (Correct) of AccuracyOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Total"), - "Output (Total) of AccuracyOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Out"), true, + platform::errors::NotFound("Input (Out) of AccuracyOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Indices"), true, + platform::errors::NotFound( + "Input (Indices) of AccuracyOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true, + platform::errors::NotFound( + "Input (Label) of AccuracyOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Accuracy"), true, + platform::errors::NotFound( + "Output (Accuracy) of AccuracyOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Correct"), true, + platform::errors::NotFound( + "Output (Correct) of AccuracyOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Total"), true, + platform::errors::NotFound( + "Output (Total) of AccuracyOp is not found.")); + + OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "Accuracy"); + OP_INOUT_CHECK(ctx->HasInput("Indices"), "Input", "Indices", "Accuracy"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "Accuracy"); + OP_INOUT_CHECK(ctx->HasOutput("Accuracy"), "Output", "Accuracy", + "Accuracy"); + OP_INOUT_CHECK(ctx->HasOutput("Correct"), "Output", "Correct", "Accuracy"); + OP_INOUT_CHECK(ctx->HasOutput("Total"), "Output", "Total", "Accuracy"); auto inference_dim = ctx->GetInputDim("Out"); auto label_dim = ctx->GetInputDim("Label"); @@ -42,22 +56,26 @@ class AccuracyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( label_dim.size(), 2, - "ShapeError: label's dimensions of AccuracyOp must be 2. " - "But received label's dimensions = %d, label's shape = [%s]", - label_dim.size(), label_dim); + platform::errors::InvalidArgument( + "ShapeError: label's dimensions of AccuracyOp must be 2. " + "But received label's dimensions = %d, label's shape = [%s]", + label_dim.size(), label_dim)); if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(label_dim[1], 1, - "ShapeError: label's second dimension of " - "AccuracyOp must be 1. But received label's " - "second dimension is = %d, label's shape = [%s]", - label_dim[1], label_dim); + platform::errors::InvalidArgument( + "ShapeError: label's second dimension of " + "AccuracyOp must be 1. But received label's " + "second dimension is = %d, label's shape = [%s]", + label_dim[1], label_dim)); PADDLE_ENFORCE_EQ( inference_dim[0], label_dim[0], - "ShapeError: the output's num_rows of AccuracyOp must be" - " the same as label's num_rows. But received output's " - "shape = [%s], label's shape = [%s], output's num_rows = %d, label's " - "num_rows = %d", - inference_dim, label_dim, inference_dim[0], label_dim[0]); + platform::errors::InvalidArgument( + "ShapeError: the output's num_rows of AccuracyOp must be" + " the same as label's num_rows. But received output's " + "shape = [%s], label's shape = [%s], output's num_rows = %d, " + "label's " + "num_rows = %d", + inference_dim, label_dim, inference_dim[0], label_dim[0])); } ctx->SetOutputDim("Accuracy", {1}); diff --git a/paddle/fluid/operators/metrics/accuracy_op.cu b/paddle/fluid/operators/metrics/accuracy_op.cu index 4682940f7e15bc8af5dcda24ea058ac7351887c6..ab5ee745aaf8b68dc5bc9ef06f2be05d7fbbfb99 100644 --- a/paddle/fluid/operators/metrics/accuracy_op.cu +++ b/paddle/fluid/operators/metrics/accuracy_op.cu @@ -56,8 +56,6 @@ template class AccuracyOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); auto* inference = ctx.Input("Out"); auto* indices = ctx.Input("Indices"); auto* label = ctx.Input("Label"); diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index 90c4cd96d689a7fea55cae2b5cf1daf4cef22cc9..ea11529c905dc143a4a21e096aa473f89646ca2d 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -23,8 +23,8 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SmoothL1Loss"); + OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "SmoothL1Loss"); auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); @@ -34,14 +34,23 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { check = false; } if (check) { - PADDLE_ENFORCE_EQ(x_dims, y_dims); + PADDLE_ENFORCE_EQ( + x_dims, y_dims, + platform::errors::InvalidArgument( + "Input(X) ans Input(Y) of SmoothL1LossOp should " + "have the same size, but received X dim is %s, Y dim is %s", + x_dims.to_str(), y_dims.to_str())); } PADDLE_ENFORCE_GE(x_dims.size(), 2, - "The tensor rank of Input(X) should not be less than 2."); + platform::errors::InvalidArgument( + "The tensor rank of Input(X) of SmoothL1LossOp " + "should not be less than 2, but received %d.", + x_dims.size())); if (ctx->HasInput("InsideWeight")) { - PADDLE_ENFORCE(ctx->HasInput("OutsideWeight"), - "If weights are provided, must specify both " - "inside and outside weights."); + PADDLE_ENFORCE_EQ(ctx->HasInput("OutsideWeight"), true, + platform::errors::InvalidArgument( + "If weights are provided, must specify both " + "inside and outside weights.")); auto dims = ctx->GetInputDim("InsideWeight"); bool check = true; if ((!ctx->IsRuntime()) && @@ -49,7 +58,12 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { check = false; } if (check) { - PADDLE_ENFORCE_EQ(dims, x_dims); + PADDLE_ENFORCE_EQ(x_dims, dims, + platform::errors::InvalidArgument( + "Input(X) ans Input(InsideWeight) of " + "SmoothL1LossOp should have the same size, but " + "received X dim is %s, InsideWeight dim is %s", + x_dims.to_str(), dims.to_str())); } dims = ctx->GetInputDim("OutsideWeight"); @@ -59,7 +73,12 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { check = false; } if (check) { - PADDLE_ENFORCE_EQ(dims, x_dims); + PADDLE_ENFORCE_EQ(x_dims, dims, + platform::errors::InvalidArgument( + "Input(X) ans Input(OutsideWeight) of " + "SmoothL1LossOp should have the same size, but " + "received X dim is %s, OutsideWeight dim is %s", + x_dims.to_str(), dims.to_str())); } } @@ -133,14 +152,23 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { auto in_dims = ctx->GetInputDim("Diff"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); - PADDLE_ENFORCE_GE(out_dims.size(), 2, - "The tensor rank of Input(Out@Grad) should be 2."); + PADDLE_ENFORCE_GE( + out_dims.size(), 2, + platform::errors::InvalidArgument( + "The tensor rank of Input(Out@Grad) should be 2, but received %d.", + out_dims.size())); if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(out_dims[0], in_dims[0], - "The 1st dimension of Input(Out@Grad) must be " - "same as input."); + PADDLE_ENFORCE_EQ( + out_dims[0], in_dims[0], + platform::errors::InvalidArgument( + "The 1st dimension of Input(Out@Grad) must be " + "same as input in SmoothL1LossGradOp, but received %d and %d.", + out_dims[0], in_dims[0])); PADDLE_ENFORCE_EQ(out_dims[1], 1, - "The 2nd dimension of Input(Out@Grad) must be 1."); + platform::errors::InvalidArgument( + "The 2nd dimension of Input(Out@Grad) must be 1 in " + "SmoothL1LossGradOp, but received %d.", + out_dims[1])); } auto x_grad_name = framework::GradVarName("X"); diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 9c956131a83bb2acdd8f8317e1890d59d6fa9041..063d390a56d3767e9c1821101845bb890aeda689 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -74,12 +74,13 @@ class CPUUniformRandomKernel : public framework::OpKernel { static_cast(ctx.Attr("diag_step")); auto diag_val = static_cast(ctx.Attr("diag_val")); if (diag_num > 0) { - PADDLE_ENFORCE_GT(size, (diag_num - 1) * (diag_step + 1), - "ShapeError: the diagonal's elements is equal (num-1) " - "* (step-1) with num %d, step %d," - "It should be smaller than %d, but received %d", - diag_num, diag_step, (diag_num - 1) * (diag_step + 1), - size); + PADDLE_ENFORCE_GT( + size, (diag_num - 1) * (diag_step + 1), + platform::errors::InvalidArgument( + "ShapeInvalid: the diagonal's elements is equal (num-1) " + "* (step-1) with num %d, step %d," + "It should be smaller than %d, but received %d", + diag_num, diag_step, (diag_num - 1) * (diag_step + 1), size)); for (int64_t i = 0; i < diag_num; ++i) { int64_t pos = i * diag_step + i; data[pos] = diag_val; @@ -117,9 +118,10 @@ class UniformRandomOp : public framework::OperatorWithKernel { auto inputs_name = ctx->Inputs("ShapeTensorList"); PADDLE_ENFORCE_GT( inputs_name.size(), 0, - "Input(ShapeTensorList)'size of Op(uniform_random) can't be zero." - "Please check the Attr(shape)'s size of" - "Op(fluid.layers.uniform_random).)"); + platform::errors::InvalidArgument( + "Input(ShapeTensorList)'size of Op(uniform_random) can't be zero." + "Please check the Attr(shape)'s size of" + "Op(fluid.layers.uniform_random).)")); auto out_dims = std::vector(inputs_name.size(), -1); ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); @@ -130,10 +132,11 @@ class UniformRandomOp : public framework::OperatorWithKernel { auto shape_dims = ctx->GetInputDim("ShapeTensor"); PADDLE_ENFORCE_EQ( shape_dims.size(), 1, - "ShapeError: Input(ShapeTensor)' dimension size of " - "Op(uniform_random) must be 1." - "But received ShapeTensor's dimensions = %d, shape = [%s]", - shape_dims.size(), shape_dims); + platform::errors::InvalidArgument( + "ShapeError: Input(ShapeTensor)' dimension size of " + "Op(uniform_random) must be 1." + "But received ShapeTensor's dimensions = %d, shape = [%s]", + shape_dims.size(), shape_dims)); int num_ele = 1; for (int i = 0; i < shape_dims.size(); ++i) { num_ele *= shape_dims[i]; @@ -144,11 +147,12 @@ class UniformRandomOp : public framework::OperatorWithKernel { return; } - PADDLE_ENFORCE_EQ( - shape.empty(), false, - "if there is no Input(ShapeTensorList) and no Input(ShapeTensor),the " - "attr(shape) information must " - "be set by Attr(shape)."); + PADDLE_ENFORCE_EQ(shape.empty(), false, + platform::errors::InvalidArgument( + "if there is no Input(ShapeTensorList) and no " + "Input(ShapeTensor),the " + "attr(shape) information must " + "be set by Attr(shape).")); std::vector tensor_shape; tensor_shape.reserve(shape.size()); for (auto dim : shape) { diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 5d159b3429054d194b5d0c0d0d3e8af88edd0c94..3f8ef2c9193edec8aa4dd5ff2cd0984df8a8cf14 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -21,6 +21,7 @@ import numpy as np from .wrapped_decorator import signature_safe_contextmanager from .core import VarDesc from . import unique_name +from .data_feeder import check_variable_and_dtype, check_type, check_dtype __all__ = [ 'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear', @@ -216,8 +217,10 @@ class UniformInitializer(Initializer): Returns: the initialization op """ - assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) + check_variable_and_dtype(var, "Out", ["float16", "float32", "float64"], + "uniform_random") + # Initialization Ops should be prepended and not appended if self._seed == 0: self._seed = block.program.random_seed @@ -303,8 +306,10 @@ class NormalInitializer(Initializer): Returns: the initialization op """ - assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) + + check_variable_and_dtype(var, "Out", ["float16", "float32", "float64"], + "guassian_random") # Initialization Ops should be prepended and not appended if self._seed == 0: self._seed = block.program.random_seed @@ -494,8 +499,10 @@ class XavierInitializer(Initializer): Returns: the initialization op """ - assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) + check_variable_and_dtype(var, "Out", ["float16", "float32", "float64"], + "xavier_init") + f_in, f_out = self._compute_fans(var) # If fan_in and fan_out are passed, use them diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3076ddeadecbb0cb273792171c6d328b63f6b6b2..5338a276655e37e4057fd9b30177cb79b8b404a3 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5626,8 +5626,11 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): # [0.20541131]], dtype=float32)] """ + check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'smooth_l1_loss') + check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'smooth_l1_loss') helper = LayerHelper('smooth_l1_loss', **locals()) + diff = helper.create_variable_for_type_inference(dtype=x.dtype) loss = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( @@ -8355,6 +8358,9 @@ def mean_iou(input, label, num_classes): num_classes) """ helper = LayerHelper('mean_iou', **locals()) + check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'], + 'mean_iou') + check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou') dtype = helper.input_dtype() out_mean_iou = helper.create_variable_for_type_inference(dtype='float32') out_wrong = helper.create_variable_for_type_inference(dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 496aa4111056591efce14549011d66f9ae49713a..44df5daab20536a7f244ed465662257a3975d078 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -72,5 +72,42 @@ class TestGaussianRandomOp(unittest.TestCase): pass +class TestGaussianRandomOpError(unittest.TestCase): + def setUp(self): + self.op_type = "gaussian_random" + self.inputs = {} + self.use_mkldnn = False + self.attrs = { + "shape": [1000, 784], + "mean": .0, + "std": 1., + "seed": 10, + "use_mkldnn": self.use_mkldnn + } + + self.outputs = ["Out"] + + def test_errors(self): + program = fluid.Program() + with fluid.program_guard(fluid.Program(), program): + input_data = numpy.random.random((2, 4)).astype("float32") + block = program.global_block() + vout = block.create_var(name="Out", dtype='int32') + normal_initializer = fluid.initializer.NormalInitializer( + loc=0.0, scale=1.0, seed=0) + + def test_Variable(): + # the input type must be Variable + normal_initializer(input_data) + + self.assertRaises(TypeError, test_Variable) + + def test_type(): + # dtype must be float32 or float64 + normal_initializer(vout) + + self.assertRaises(TypeError, test_type) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mean_iou.py b/python/paddle/fluid/tests/unittests/test_mean_iou.py index 03e94483178e83adad9886cd7df2107581360dd1..e2e118ac9e3b46499055c2dd46755d5401d5abd5 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_iou.py +++ b/python/paddle/fluid/tests/unittests/test_mean_iou.py @@ -18,6 +18,7 @@ from __future__ import division import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, @@ -112,5 +113,20 @@ class TestCase1(TestMeanIOUOp): self.in_mean_iou_num = 2 +class TestMeanIOUOpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + # The input type of accuracy_op must be Variable. + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + self.assertRaises(TypeError, fluid.layers.mean_iou, x1, y1) + # The input dtype of accuracy_op must be float32 or float64. + x2 = fluid.layers.data(name='x2', shape=[4], dtype="float32") + y2 = fluid.layers.data(name='x2', shape=[4], dtype="float32") + self.assertRaises(TypeError, fluid.layers.mean_iou, x2, y2) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py index 217bae7c06631f137cb976c3fbb665cf146ebf8b..3c825c08e8c3fc00db440a1a43b5de87b01d0c97 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid def smooth_l1_loss_forward(val, sigma2): @@ -105,5 +106,20 @@ class TestSmoothL1LossOp2(OpTest): no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight'])) +class TestSmoothL1LossOpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + # The input type of accuracy_op must be Variable. + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + self.assertRaises(TypeError, fluid.layers.smooth_l1, x1, y1) + # The input dtype of accuracy_op must be float32 or float64. + x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") + y2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") + self.assertRaises(TypeError, fluid.layers.smooth_l1, x2, y2) + + if __name__ == '__main__': unittest.main()