diff --git a/paddle/fluid/operators/center_loss_op.cc b/paddle/fluid/operators/center_loss_op.cc index 19759519eb0529b99dc48c5679b2c44324c7a159..cd1aa9d9c841af8cb4520d2ce52c747c36c99c0d 100644 --- a/paddle/fluid/operators/center_loss_op.cc +++ b/paddle/fluid/operators/center_loss_op.cc @@ -27,29 +27,18 @@ class CenterLossOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of CenterLoss should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CenterLoss"); auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE(ctx->HasInput("CenterUpdateRate"), - "Input(CenterUpdateRate) of CenterLoss should not be null."); - - PADDLE_ENFORCE(ctx->HasInput("Label"), - "Input(Label) of CenterLoss should not be null."); - - PADDLE_ENFORCE(ctx->HasInput("Centers"), - "Input(Centers) of CenterLoss should not be null."); - - PADDLE_ENFORCE( - ctx->HasOutput("SampleCenterDiff"), - "Output(SampleCenterDiff) of CenterLoss should not be null."); - - PADDLE_ENFORCE(ctx->HasOutput("Loss"), - "Output(Loss) of CenterLoss should not be null."); - - PADDLE_ENFORCE( - ctx->HasOutput("CentersOut"), - "Output(CentersOut) of CenterLoss shared data with Centers."); + OP_INOUT_CHECK(ctx->HasInput("CenterUpdateRate"), "Input", + "CenterUpdateRate", "CenterLoss"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "CenterLoss"); + OP_INOUT_CHECK(ctx->HasInput("Centers"), "Input", "Centers", "CenterLoss"); + OP_INOUT_CHECK(ctx->HasOutput("SampleCenterDiff"), "Output", + "SampleCenterDiff", "CenterLoss"); + OP_INOUT_CHECK(ctx->HasOutput("Loss"), "Output", "Loss", "CenterLoss"); + OP_INOUT_CHECK(ctx->HasOutput("CentersOut"), "Output", "CentersOut", + "CenterLoss"); ctx->SetOutputDim("SampleCenterDiff", {x_dims[0], product(x_dims) / x_dims[0]}); @@ -99,12 +88,12 @@ class CenterLossGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("SampleCenterDiff"), - "Input(SampleCenterDiff) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), - "Input(Loss) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Output(X) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("SampleCenterDiff"), "Input", + "SampleCenterDiff", "CenterLossGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Loss")), "Input", + framework::GradVarName("Loss"), "CenterLossGrad"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "CenterLossGrad"); auto x_dims = ctx->GetInputDim("X"); auto x_grad_name = framework::GradVarName("X"); diff --git a/paddle/fluid/operators/ctc_align_op.cc b/paddle/fluid/operators/ctc_align_op.cc index 8af29133f1a2b1f790a451ac46932bb8f26b5989..350fff9ecdf511482e575c62fed653c5023d0434 100644 --- a/paddle/fluid/operators/ctc_align_op.cc +++ b/paddle/fluid/operators/ctc_align_op.cc @@ -22,10 +22,8 @@ class CTCAlignOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, - "Input of CTCAlignOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Output"), true, - "Output of CTCAlignOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "ctc_align"); + OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "ctc_align"); auto input_dims = ctx->GetInputDim("Input"); diff --git a/paddle/fluid/operators/one_hot_v2_op.cc b/paddle/fluid/operators/one_hot_v2_op.cc index 94a42bef5a35864b8a216d468256baa3955d88f7..29fe6f10c72f435627ae9b2ab0ce7c44f5ee9df7 100644 --- a/paddle/fluid/operators/one_hot_v2_op.cc +++ b/paddle/fluid/operators/one_hot_v2_op.cc @@ -24,14 +24,13 @@ class OneHotV2Op : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of OneHotOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of OneHotOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "one_hot_v2"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "one_hot_v2"); auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE_GE(x_dims.size(), 1, - "Rank of Input(X) should be at least 1."); + platform::errors::InvalidArgument( + "Rank of Input(X) should be at least 1.")); int depth = ctx->Attrs().Get("depth"); if (ctx->HasInput("depth_tensor")) { diff --git a/paddle/fluid/operators/prroi_pool_op.cc b/paddle/fluid/operators/prroi_pool_op.cc index 2027f506bdce2f4b018dc73f164235c53cd91655..f03a392bfc7367de5cd50416df3073b500aea139 100644 --- a/paddle/fluid/operators/prroi_pool_op.cc +++ b/paddle/fluid/operators/prroi_pool_op.cc @@ -79,15 +79,10 @@ class PRROIPoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - platform::errors::NotFound( - "Input(X) of op(PRROIPool) should not be null.")); - PADDLE_ENFORCE_EQ(ctx->HasInput("ROIs"), true, - platform::errors::NotFound( - "Input(ROIs) of op(PRROIPool) should not be null.")); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - platform::errors::NotFound( - "Output(Out) of op(PRROIPool) should not be null.")); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "prroi_pool"); + OP_INOUT_CHECK(ctx->HasInput("ROIs"), "Input", "ROIs", "prroi_pool"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Input", "Out", "prroi_pool"); + auto input_dims = ctx->GetInputDim("X"); auto rois_dims = ctx->GetInputDim("ROIs"); @@ -148,10 +143,10 @@ class PRROIPoolGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, - "The gradient of Out should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true, - "The gradient of X should not be null."); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "prroi_pool"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "prroi_pool"); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("ROIs"), ctx->GetInputDim("ROIs")); } diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index acfb46db65304c952abfbaa2ad7ce22fea5c53ce..01e74068496516be22a102c3824710aaa2cac204 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -26,39 +26,58 @@ class ROIPoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ROIPoolOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("ROIs"), - "Input(ROIs) of ROIPoolOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ROIPoolOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Argmax"), - "Output(Argmax) of ROIPoolOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "roi_pool"); + OP_INOUT_CHECK(ctx->HasInput("ROIs"), "Input", "ROIs", "roi_pool"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "roi_pool"); + OP_INOUT_CHECK(ctx->HasOutput("Argmax"), "Output", "Argmax", "roi_pool"); + auto input_dims = ctx->GetInputDim("X"); auto rois_dims = ctx->GetInputDim("ROIs"); + if (ctx->HasInput("RoisLod")) { auto rois_lod_dims = ctx->GetInputDim("RoisLod"); PADDLE_ENFORCE(rois_lod_dims.size() == 1, ""); } - PADDLE_ENFORCE(input_dims.size() == 4, - "The format of input tensor is NCHW."); - PADDLE_ENFORCE(rois_dims.size() == 2, - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" - "given as [[x1, y1, x2, y2], ...]."); - PADDLE_ENFORCE(rois_dims[1] == kROISize, - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" - "given as [[x1, y1, x2, y2], ...]."); + PADDLE_ENFORCE_EQ(input_dims.size(), 4, + platform::errors::InvalidArgument( + "The input data should be a four-dimensional " + "tensor with [N,C,H,W], but received input data with " + " %d dimension", + input_dims.size())); + PADDLE_ENFORCE_EQ( + rois_dims.size(), 2, + platform::errors::InvalidArgument( + "ROIs should be a 2-D LoDTensor with shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], ...], but received ROIs is " + "%d-dimensional LoDTensor", + rois_dims.size())); + PADDLE_ENFORCE_EQ( + rois_dims[1], kROISize, + platform::errors::InvalidArgument( + "ROIs should be a 2-D LoDTensor with shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], ...]. But the second dimension of " + "the received data is %d", + rois_dims[1])); int pooled_height = ctx->Attrs().Get("pooled_height"); int pooled_width = ctx->Attrs().Get("pooled_width"); float spatial_scale = ctx->Attrs().Get("spatial_scale"); PADDLE_ENFORCE_GT(pooled_height, 0, - "The pooled output height must greater than 0"); + platform::errors::OutOfRange( + "The pooled output height must be greater than 0" + "but received height is %d", + pooled_height)); PADDLE_ENFORCE_GT(pooled_width, 0, - "The pooled output width must greater than 0"); + platform::errors::OutOfRange( + "The pooled output width must be greater than 0" + "but received width is %d", + pooled_width)); PADDLE_ENFORCE_GT(spatial_scale, 0.0f, - "The spatial scale must greater than 0"); + platform::errors::OutOfRange( + "The spatial scale must be greater than 0, " + "but received spatial scale is %f", + spatial_scale)); auto out_dims = input_dims; out_dims[0] = rois_dims[0]; @@ -84,10 +103,10 @@ class ROIPoolGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "The gradient of Out should not be null."); - PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")), - "The gradient of X should not be null."); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "roi_pool"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "roi_pool"); ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); } diff --git a/python/paddle/fluid/input.py b/python/paddle/fluid/input.py index bf771d801e63bfe6d77025c51d7d17aa9d7a9fd6..acfc7464439a5acfcb9fdb6899481427420aca10 100644 --- a/python/paddle/fluid/input.py +++ b/python/paddle/fluid/input.py @@ -98,6 +98,7 @@ def one_hot(input, depth, allow_out_of_range=False): label = fluid.data(name="label", shape=[4], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=4) """ + check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot_v2') helper = LayerHelper("one_hot_v2", **locals()) one_hot_out = helper.create_variable_for_type_inference(dtype='float32') diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index dc61aa1f4cf4cd9fe2333450cfe1066799db613a..211dc231b21fb73dd99c4e02a217772ec87fc8a4 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -101,6 +101,10 @@ def center_loss(input, """ helper = LayerHelper('center_loss', **locals()) dtype = helper.input_dtype() + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'center_loss') + check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'center_loss') + centers_shape = [num_classes, input.shape[1]] centers_param = helper.create_parameter( attr=param_attr, shape=centers_shape, dtype=dtype) @@ -108,6 +112,8 @@ def center_loss(input, if isinstance(alpha, Variable): alpha_param = alpha + check_variable_and_dtype(alpha, 'alpha', ['float32', 'float64'], + 'center_loss') else: assert isinstance(alpha, float) alpha_param = helper.create_variable( diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ef3ee0a7cd34d6abd19a34d56602911d61756223..8ffd1d39d10f3dd1d8183080a964f573b7f5c85f 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5235,6 +5235,9 @@ def ctc_greedy_decoder(input, input_length=x_pad_len) """ + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'ctc_greedy_decoder') + helper = LayerHelper("ctc_greedy_decoder", **locals()) _, topk_indices = topk(input, k=1) @@ -6715,6 +6718,8 @@ def roi_pool(input, print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32) print(np.array(out).shape) # (2, 1, 1, 1) """ + check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool') + check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool') helper = LayerHelper('roi_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) @@ -13096,6 +13101,8 @@ def prroi_pool(input, """ + check_variable_and_dtype(input, 'input', ['float32'], 'prroi_pool') + check_variable_and_dtype(rois, 'rois', ['float32'], 'prroi_pool') helper = LayerHelper('prroi_pool', **locals()) # check attrs if not isinstance(spatial_scale, float): diff --git a/python/paddle/fluid/tests/unittests/test_center_loss.py b/python/paddle/fluid/tests/unittests/test_center_loss.py index 1408fb249ff27b7da8cebfec56d3e8fb8713ef9e..07175579fdd60171c2f9bf97427e394bf69aca40 100644 --- a/python/paddle/fluid/tests/unittests/test_center_loss.py +++ b/python/paddle/fluid/tests/unittests/test_center_loss.py @@ -18,6 +18,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid.core as core +import paddle.fluid as fluid class TestCenterLossOp(OpTest): @@ -91,5 +92,64 @@ class TestCenterLossOpNoUpdate(TestCenterLossOp): self.need_update = False +class BadInputTestCenterLoss(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + data = [[1, 2, 3, 4], [5, 6, 7, 8]] + label = fluid.layers.data( + name='label', shape=[2, 1], dtype='int32') + res = fluid.layers.center_loss( + data, + label, + num_classes=1000, + alpha=0.2, + param_attr=fluid.initializer.Xavier(uniform=False), + update_center=True) + + self.assertRaises(TypeError, test_bad_x) + + def test_bad_y(): + data = fluid.layers.data( + name='data', shape=[2, 32], dtype='float32') + label = [[2], [3]] + res = fluid.layers.center_loss( + data, + label, + num_classes=1000, + alpha=0.2, + param_attr=fluid.initializer.Xavier(uniform=False), + update_center=True) + + self.assertRaises(TypeError, test_bad_y) + + def test_bad_alpha(): + data = fluid.layers.data( + name='data2', + shape=[2, 32], + dtype='float32', + append_batch_size=False) + label = fluid.layers.data( + name='label2', + shape=[2, 1], + dtype='int32', + append_batch_size=False) + alpha = fluid.layers.data( + name='alpha', + shape=[1], + dtype='int64', + append_batch_size=False) + res = fluid.layers.center_loss( + data, + label, + num_classes=1000, + alpha=alpha, + param_attr=fluid.initializer.Xavier(uniform=False), + update_center=True) + + self.assertRaises(TypeError, test_bad_alpha) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_ctc_align.py b/python/paddle/fluid/tests/unittests/test_ctc_align.py index 2078ff8ef1c32fab7698b2599d7e7c9ab0863082..f5934debfd7b663b24a0949012ea2aa85e07ece8 100644 --- a/python/paddle/fluid/tests/unittests/test_ctc_align.py +++ b/python/paddle/fluid/tests/unittests/test_ctc_align.py @@ -217,5 +217,16 @@ class TestCTCAlignOpApi(unittest.TestCase): return_numpy=False) +class BadInputTestCTCAlignr(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + x = fluid.layers.data(name='x', shape=[8], dtype='int64') + cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0) + + self.assertRaises(TypeError, test_bad_x) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py index 9901cdfef63e498f16662d6bdf0e0396030fe04e..9b6c2b1fd873182e0cf2cc73bcf9cb9533f3a3f1 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py @@ -204,5 +204,20 @@ class TestOneHotOpApi(unittest.TestCase): return_numpy=False) +class BadInputTestOnehotV2(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="float32") + one_hot_label = fluid.one_hot(input=label, depth=4) + + self.assertRaises(TypeError, test_bad_x) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py b/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py index f414267a9a2453618d7a5854e0b2052da56b8f50..efb5e05bdebca1cdfd7b13118418318e0dc06452 100644 --- a/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py @@ -251,6 +251,33 @@ class TestPRROIPoolOpTensorRoIs(OpTest): self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, 7, 0.7) + def test_bad_x(): + x = fluid.layers.data( + name='data1', + shape=[2, 3, 16, 16], + dtype='int64', + append_batch_size=False) + label = fluid.layers.data( + name='label1', + shape=[2, 4], + dtype='float32', + lod_level=1, + append_batch_size=False) + output = fluid.layers.prroi_pool(x, label, 0.25, 2, 2) + + self.assertRaises(TypeError, test_bad_x) + + def test_bad_y(): + x = fluid.layers.data( + name='data2', + shape=[2, 3, 16, 16], + dtype='float32', + append_batch_size=False) + label = [[1, 2, 3, 4], [2, 3, 4, 5]] + output = fluid.layers.prroi_pool(x, label, 0.25, 2, 2) + + self.assertRaises(TypeError, test_bad_y) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index 79f9127760bc2978d8771049c7ca66f29fd654c1..1200b0e3470f650dce4365ee46458c8184281292 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -20,6 +20,7 @@ import math import sys import paddle.compat as cpt from op_test import OpTest +import paddle.fluid as fluid class TestROIPoolOp(OpTest): @@ -115,15 +116,15 @@ class TestROIPoolOp(OpTest): for bno in range(self.batch_size): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): - x1 = np.random.random_integers( + x1 = np.random.randint( 0, self.width // self.spatial_scale - self.pooled_width) - y1 = np.random.random_integers( + y1 = np.random.randint( 0, self.height // self.spatial_scale - self.pooled_height) - x2 = np.random.random_integers(x1 + self.pooled_width, - self.width // self.spatial_scale) - y2 = np.random.random_integers( - y1 + self.pooled_height, self.height // self.spatial_scale) + x2 = np.random.randint(x1 + self.pooled_width, + self.width // self.spatial_scale) + y2 = np.random.randint(y1 + self.pooled_height, + self.height // self.spatial_scale) roi = [bno, x1, y1, x2, y2] rois.append(roi) @@ -141,6 +142,31 @@ class TestROIPoolOp(OpTest): self.check_grad(['X'], 'Out') +class BadInputTestRoiPool(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + x = fluid.layers.data( + name='data1', shape=[2, 1, 4, 4], dtype='int64') + label = fluid.layers.data( + name='label', shape=[2, 4], dtype='float32', lod_level=1) + output = fluid.layers.roi_pool(x, label, 1, 1, 1.0) + + self.assertRaises(TypeError, test_bad_x) + + def test_bad_y(): + x = fluid.layers.data( + name='data2', + shape=[2, 1, 4, 4], + dtype='float32', + append_batch_size=False) + label = [[1, 2, 3, 4], [2, 3, 4, 5]] + output = fluid.layers.roi_pool(x, label, 1, 1, 1.0) + + self.assertRaises(TypeError, test_bad_y) + + class TestROIPoolInLodOp(TestROIPoolOp): def set_data(self): self.init_test_case()