diff --git a/paddle/fluid/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc index 1cd3f27c1849b4a38e9e16ff9670f305a6b484b3..9b7a923fb4bbcfb6e525f8360cb692c003555260 100644 --- a/paddle/fluid/operators/rank_loss_op.cc +++ b/paddle/fluid/operators/rank_loss_op.cc @@ -27,55 +27,87 @@ class RankLossOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true, - "Input(Label) shouldn't be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("Left"), true, - "Input(Left) shouldn't be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("Right"), true, - "Input(Right) shouldn't be null."); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "RankLoss"); + OP_INOUT_CHECK(ctx->HasInput("Left"), "Input", "Left", "RankLoss"); + OP_INOUT_CHECK(ctx->HasInput("Right"), "Input", "Right", "RankLoss"); auto label_dims = ctx->GetInputDim("Label"); auto left_dims = ctx->GetInputDim("Left"); auto right_dims = ctx->GetInputDim("Right"); // check label_dims valid - PADDLE_ENFORCE_GE(label_dims.size(), 1, - "The dimension size of Input(Label) must be greater than " - "or equal to 1."); + PADDLE_ENFORCE_GE( + label_dims.size(), 1, + platform::errors::InvalidArgument( + "The dimension size of Input(Label) must be greater than " + "or equal to 1, but received %d.", + label_dims.size())); PADDLE_ENFORCE_LE( label_dims.size(), 2, - "The dimension size of Input(Label) must be less than or equal to 2."); + platform::errors::InvalidArgument("The dimension size of Input(Label) " + "must be less than or equal to 2, " + "but received %d.", + label_dims.size())); if (label_dims.size() == 2U) { - PADDLE_ENFORCE_EQ(label_dims[1], 1, - "The last dimension of Input(Label) must be 1."); + PADDLE_ENFORCE_EQ( + label_dims[1], 1, + platform::errors::InvalidArgument( + "The last dimension of Input(Label) must be 1, but received %d.", + label_dims[1])); } // check left_dims valid - PADDLE_ENFORCE_GE(left_dims.size(), 1, - "The dimension size of Input(Left) must be greater than " - "or equal to 1."); + PADDLE_ENFORCE_GE( + left_dims.size(), 1, + platform::errors::InvalidArgument( + "The dimension size of Input(Left) must be greater than " + "or equal to 1, but received %d.", + left_dims.size())); PADDLE_ENFORCE_LE( left_dims.size(), 2, - "The dimension size of Input(Left) must be less than or equal to 2."); + platform::errors::InvalidArgument("The dimension size of Input(Left) " + "must be less than or equal to 2, " + "but received %d.", + left_dims.size())); if (left_dims.size() == 2U) { - PADDLE_ENFORCE_EQ(left_dims[1], 1, - "The last dimension of Input(Left) must be 1."); + PADDLE_ENFORCE_EQ( + left_dims[1], 1, + platform::errors::InvalidArgument( + "The last dimension of Input(Left) must be 1, but received %d.", + left_dims[1])); } // check right_dims valid - PADDLE_ENFORCE_GE(right_dims.size(), 1, - "The dimension size of Input(Right) must be greater than " - "or equal to 1."); + PADDLE_ENFORCE_GE( + right_dims.size(), 1, + platform::errors::InvalidArgument( + "The dimension size of Input(Right) must be greater than " + "or equal to 1, but received %d.", + right_dims.size())); PADDLE_ENFORCE_LE( right_dims.size(), 2, - "The dimension size of Input(Right) must be less than or equal to 2."); + platform::errors::InvalidArgument("The dimension size of Input(Right) " + "must be less than or equal to 2, " + "but received %d.", + right_dims.size())); if (right_dims.size() == 2U) { - PADDLE_ENFORCE_EQ(right_dims[1], 1, - "The last dimension of Input(Right) must be 1."); + PADDLE_ENFORCE_EQ( + right_dims[1], 1, + platform::errors::InvalidArgument( + "The last dimension of Input(Right) must be 1, but received %d.", + right_dims[1])); } - PADDLE_ENFORCE_EQ(label_dims[0], left_dims[0], - "The first dimension of Input(Label) and Input(Left) " - "must have the same value."); - PADDLE_ENFORCE_EQ(label_dims[0], right_dims[0], - "The first dimension of Input(Label) and Input(Right) " - "must have the same value."); + PADDLE_ENFORCE_EQ( + label_dims[0], left_dims[0], + platform::errors::InvalidArgument( + "The first dimension of Input(Label) and Input(Left) " + "must have the same value. But received Label.dims[0]=%d, " + "Left.dims[0]=%d.", + label_dims[0], left_dims[0])); + PADDLE_ENFORCE_EQ( + label_dims[0], right_dims[0], + platform::errors::InvalidArgument( + "The first dimension of Input(Label) and Input(Right) " + "must have the same value. But received Label.dims[0]=%d, " + "Right.dims[0]=%d.", + label_dims[0], right_dims[0])); ctx->SetOutputDim("Out", label_dims); } }; @@ -133,14 +165,12 @@ class RankLossGradOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true, - "Input(Label) shouldn't be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("Left"), true, - "Input(Left) shouldn't be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("Right"), true, - "Input(Right) shouldn't be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, - "Input(Out@GRAD) shouldn't be null."); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "RankLossGrad"); + OP_INOUT_CHECK(ctx->HasInput("Left"), "Input", "Left", "RankLossGrad"); + OP_INOUT_CHECK(ctx->HasInput("Right"), "Input", "Right", "RankLossGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "RankLossGrad"); + auto left_dims = ctx->GetInputDim("Left"); auto right_dims = ctx->GetInputDim("Right"); auto left_grad_name = framework::GradVarName("Left"); diff --git a/paddle/fluid/operators/similarity_focus_op.cc b/paddle/fluid/operators/similarity_focus_op.cc index 3e3c9cf98fd7eb2d3bdbc0e90c95bb15abdd5b76..3d88fdb000a6f8bce7ce0a5f0ea95b49210d01af 100644 --- a/paddle/fluid/operators/similarity_focus_op.cc +++ b/paddle/fluid/operators/similarity_focus_op.cc @@ -59,10 +59,15 @@ class SimilarityFocusOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SimilarityFocus"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SimilarityFocus"); + auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_EQ(x_dims.size(), 4, "Input(X)'s rank should be 4."); + PADDLE_ENFORCE_EQ( + x_dims.size(), 4, + platform::errors::InvalidArgument( + "The dimension size of Input(X) be 4, but received %d.", + x_dims.size())); ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/similarity_focus_op.h b/paddle/fluid/operators/similarity_focus_op.h index bf3fed2aaf2cf92d5619ae5bce6dd70d9dfe9621..4fa4d772aa3a926fa95ba1ea9b45ed2037e1e568 100644 --- a/paddle/fluid/operators/similarity_focus_op.h +++ b/paddle/fluid/operators/similarity_focus_op.h @@ -43,13 +43,19 @@ class SimilarityFocusKernel : public framework::OpKernel { dim[i] = x->dims()[i]; } - if (indexes.size() < 1) { - PADDLE_THROW("Indexes' size can not be 0."); - } - for (auto index : indexes) { - if (dim[axis] < index) { - PADDLE_THROW("Index exceeds tensor shape limit."); - } + PADDLE_ENFORCE_GT( + indexes.size(), 0, + platform::errors::InvalidArgument("The size of Attr(indexes) must be " + "greater than 0, but received %d.", + indexes.size())); + + for (size_t i = 0; i < indexes.size(); i++) { + PADDLE_ENFORCE_GT( + dim[axis], indexes[i], + platform::errors::InvalidArgument( + "Each value of Attr(indexes) must be less than X.dim[axis], " + "but indexes[%d] received %d.", + i, indexes[i])); } int64_t array_size = 1; @@ -72,6 +78,16 @@ class SimilarityFocusKernel : public framework::OpKernel { d3 * dim[3] + d4; }; + PADDLE_ENFORCE_GT( + axis, 0, + platform::errors::InvalidArgument( + "The value of Attr(axis) must be 1 or 2 or 3, but received %d.", + axis)); + PADDLE_ENFORCE_LT( + axis, 4, + platform::errors::InvalidArgument( + "The value of Attr(axis) must be 1 or 2 or 3, but received %d.", + axis)); memset(out_data, 0, sizeof(T) * batch_size * dim[1] * dim[2] * dim[3]); for (int i = 0; i < batch_size; ++i) { for (auto index : indexes) { @@ -156,8 +172,6 @@ class SimilarityFocusKernel : public framework::OpKernel { break; } } - } else { - PADDLE_THROW("Axis must be 1 or 2 or 3"); } } } diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc index 758f6e1f8844f48d3e2591c4a4aa87274ad3d3aa..6ba56e0064aea56987d51713877fabcc3ab11416 100644 --- a/paddle/fluid/operators/squeeze_op.cc +++ b/paddle/fluid/operators/squeeze_op.cc @@ -27,29 +27,19 @@ class SqueezeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of Squeeze operator should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of Squeeze operator should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Squeeze"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Squeeze"); const auto &x_dims = ctx->GetInputDim("X"); // Check input tensor dims (<6) Eigen limit. PADDLE_ENFORCE_LE(x_dims.size(), 6, - "ShapeError: the dimensions of Input(X) " - "should be in the range of [1, 6] (Eigen limit)." - "But received X's dimensions = %d, X's shape=[%s].", - x_dims.size(), x_dims); + platform::errors::InvalidArgument( + "The dimensions of Input(X) " + "should be in the range of [1, 6] (Eigen limit)." + "But received X's dimensions = %d, X's shape=[%s].", + x_dims.size(), x_dims)); const auto &axes = ctx->Attrs().Get>("axes"); - for (int a : axes) { - PADDLE_ENFORCE_LT( - a, x_dims.size(), - "ShapeError: The squeeze axis should be less than input " - "tensor's dimensions. But received axis = %d, input " - "tensor's dimensions = %d, input tensor's shape = [%s].", - a, x_dims.size(), x_dims); - } - auto out_dims = GetOutputShape(axes, x_dims); ctx->SetOutputDim("Out", out_dims); if (x_dims[0] == out_dims[0]) { @@ -78,10 +68,18 @@ class SqueezeOp : public framework::OperatorWithKernel { for (size_t idx = 0; idx < num_squeeze_dims; ++idx) { int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + in_dims.size() : squeeze_dims[idx]; - PADDLE_ENFORCE_GE(current, 0, - "Invalid axis, the axis should >= 0." - "Current axis is:%d, input tensor's shape = [%s].", - current, in_dims); + PADDLE_ENFORCE_GE( + current, 0, + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); + PADDLE_ENFORCE_LT( + current, in_dims.size(), + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); if (!(should_squeeze[current])) { ++cnt_squeezed_dims; @@ -171,28 +169,19 @@ class Squeeze2Op : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of Squeeze operator should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of Squeeze operator should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Squeeze2"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Squeeze2"); const auto &x_dims = ctx->GetInputDim("X"); // Check input tensor dims (<6) Eigen limit. PADDLE_ENFORCE_LE(x_dims.size(), 6, - "ShapeError: the dimensions of Input(X) " - "should be in the range of [1, 6] (Eigen limit)." - "But received X's dimensions = %d, X's shape = [%s].", - x_dims.size(), x_dims); + platform::errors::InvalidArgument( + "The dimensions of Input(X) " + "should be in the range of [1, 6] (Eigen limit)." + "But received X's dimensions = %d, X's shape = [%s].", + x_dims.size(), x_dims)); const auto &axes = ctx->Attrs().Get>("axes"); - for (int a : axes) { - PADDLE_ENFORCE_LT( - a, x_dims.size(), - "ShapeError: The squeeze axis should be less than input " - "tensor's dimensions. But received axis = %d, input " - "tensor's dimensions = %d, input tensor's shape = [%s].", - a, x_dims.size(), x_dims); - } auto out_dims = SqueezeOp::GetOutputShape(axes, x_dims); ctx->SetOutputDim("Out", out_dims); @@ -202,8 +191,8 @@ class Squeeze2Op : public framework::OperatorWithKernel { ctx->ShareLoD("X", "Out"); } - PADDLE_ENFORCE_EQ(ctx->HasOutput("XShape"), true, - "Output(XShape) of Squeeze operator should not be null."); + OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Squeeze2"); + std::vector xshape_dims(x_dims.size() + 1); xshape_dims[0] = 0; for (int i = 0; i < x_dims.size(); ++i) { @@ -233,10 +222,10 @@ class Squeeze2GradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE_EQ(context->HasInput("XShape"), true, - "Input(XShape) shouldn't be null."); - PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true, - "Input(Out@GRAD) shouldn't be null."); + OP_INOUT_CHECK(context->HasInput("XShape"), "Input", "XShape", + "Squeeze2Grad"); + OP_INOUT_CHECK(context->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "Squeeze2Grad"); auto xshape_dims = context->GetInputDim("XShape"); auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); context->SetOutputDim(framework::GradVarName("X"), x_dims); diff --git a/paddle/fluid/operators/squeeze_op.h b/paddle/fluid/operators/squeeze_op.h index 28fad645769e718e600c2668736a7a130bf0a948..e8e53bb0f4fcd5c71776092ce429be36ac63fc25 100644 --- a/paddle/fluid/operators/squeeze_op.h +++ b/paddle/fluid/operators/squeeze_op.h @@ -62,16 +62,25 @@ class SqueezeKernel : public framework::OpKernel { int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + in_dims.size() : squeeze_dims[idx]; - PADDLE_ENFORCE_GE(current, 0, - "Invalid axis, the axis should >= 0." - "Current axis is:%d, input tensor's shape = [%s].", - current, in_dims); + PADDLE_ENFORCE_GE( + current, 0, + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); + PADDLE_ENFORCE_LT( + current, in_dims.size(), + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); PADDLE_ENFORCE_EQ(in_dims[current], 1, - "Invalid axis index, the axis that will be squeezed " - "should be equal to 1. But current axis = %d," - "input tensor's shape = [%s].", - in_dims[current], in_dims); + platform::errors::InvalidArgument( + "The size of axis that will be squeezed " + "should be equal to 1. But current axis = %d," + "input tensor's shape = [%s].", + in_dims[current], in_dims)); if (!(should_squeeze[current])) { ++cnt_squeezed_dims; diff --git a/paddle/fluid/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc index 6758987713faf799a70536f99bb2b1e1f9849cd4..aa06b67a3e1c89b0944cd949aff555970ad21fd5 100644 --- a/paddle/fluid/operators/warpctc_op.cc +++ b/paddle/fluid/operators/warpctc_op.cc @@ -169,7 +169,7 @@ class WarpCTCGradOp : public framework::OperatorWithKernel { OP_INOUT_CHECK(ctx->HasInput("WarpCTCGrad"), "Input", "WarpCTCGrad", "WarpCTCGrad"); OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Logits")), "Output", - "WarpCTCGrad", "WarpCTCGrad"); + framework::GradVarName("Logits"), "WarpCTCGrad"); ctx->SetOutputDim(framework::GradVarName("Logits"), ctx->GetInputDim("Logits")); ctx->ShareLoD("Logits", /*->*/ framework::GradVarName("Logits")); diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index ea57c16bc5dfaab3044a1a2e534a9a9f9eab9962..f13d2f2d5c1f443d1fb0c4fc67b54b0deb0cd19b 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -1329,15 +1329,9 @@ def rank_loss(label, left, right, name=None): """ helper = LayerHelper('rank_loss', **locals()) - - if not (isinstance(label, Variable)): - raise ValueError("The label should be a Variable") - - if not (isinstance(left, Variable)): - raise ValueError("The left should be a Variable") - - if not (isinstance(right, Variable)): - raise ValueError("The right should be a Variable") + check_variable_and_dtype(label, 'label', ['float32'], "rank_loss") + check_variable_and_dtype(left, 'left', ['float32'], "rank_loss") + check_variable_and_dtype(right, 'right', ['float32'], "rank_loss") out = helper.create_variable_for_type_inference("float32") diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9c450173fc16e83fdecad2d2051450f02d22f8a8..904b9918b29a049e2188ae6e9a5e160983d5a9c2 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12355,10 +12355,10 @@ def similarity_focus(input, axis, indexes, name=None): """ helper = LayerHelper('similarity_focus', **locals()) # check attrs - if isinstance(axis, int) is False: - raise TypeError("axis must be int type.") - if isinstance(indexes, list) is False: - raise TypeError("indexes must be list type.") + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + "similarity_focus") + check_type(axis, 'axis', int, "similarity_focus") + check_type(indexes, 'indexes', list, "similarity_focus") if axis != 1 and axis != 2 and axis != 3: raise ValueError("axis must be 1, 2 or 3.") if len(indexes) == 0: diff --git a/python/paddle/fluid/tests/unittests/test_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_rank_loss_op.py index 994059c4ee0cdc00738c1ed9fe22014043b8e72b..c4851bc274b82a284ba44d447d5a0902785036d5 100644 --- a/python/paddle/fluid/tests/unittests/test_rank_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_rank_loss_op.py @@ -17,6 +17,8 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid +from paddle.fluid import Program, program_guard class TestRankLossOp(OpTest): @@ -84,5 +86,31 @@ class TestRankLossOp5(TestRankLossOp): return (batch_size), (batch_size), (batch_size) +class TestRankLossOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + label = fluid.data(name="label", shape=[16, 1], dtype="float32") + left = fluid.data(name="left", shape=[16, 1], dtype="float32") + right = fluid.data(name="right", shape=[16, 1], dtype="float32") + + def test_label_Variable(): + label_data = np.random.rand(16, 1).astype("float32") + out = fluid.layers.rank_loss(label_data, left, right) + + self.assertRaises(TypeError, test_label_Variable) + + def test_left_Variable(): + left_data = np.random.rand(16, 1).astype("float32") + out = fluid.layers.rank_loss(label, left_data, right) + + self.assertRaises(TypeError, test_left_Variable) + + def test_right_Variable(): + right_data = np.random.rand(16, 1).astype("float32") + out = fluid.layers.rank_loss(label, left, right_data) + + self.assertRaises(TypeError, test_right_Variable) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py b/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py index b3833f05f1aa3aac7b5bcc5b6fdc138870cc8844..888bec928ff8d404720201867ec59b56caade0f2 100755 --- a/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py +++ b/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py @@ -18,6 +18,8 @@ import unittest import numpy as np import paddle.fluid.core as core from op_test import OpTest +import paddle.fluid as fluid +from paddle.fluid import Program, program_guard class TestSimilarityFocusOp(OpTest): @@ -213,5 +215,32 @@ class TestSimilarityFocusOp_axis3(OpTest): self.check_output() +class TestSimilarityFocusOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + data = fluid.data(name='data', shape=[16, 3, 2, 2], dtype='float32') + + def test_input_Variable(): + input = np.random.rand(16, 3, 2, 2).astype("float32") + out = fluid.layers.similarity_focus( + input=input, axis=1, indexes=[0]) + + self.assertRaises(TypeError, test_input_Variable) + + def test_axis_Int(): + axis = 1.0 + out = fluid.layers.similarity_focus( + input=data, axis=axis, indexes=[0]) + + self.assertRaises(TypeError, test_axis_Int) + + def test_indexes_List(): + indexes = 0 + out = fluid.layers.similarity_focus( + input=data, axis=1, indexes=indexes) + + self.assertRaises(TypeError, test_indexes_List) + + if __name__ == "__main__": unittest.main()