未验证 提交 b4daea13 编写于 作者: D Double_V 提交者: GitHub

API/OP (center_loss, fluid.one_hot, prroi_pool, roi_pool, ctc_greed_decoder)...

API/OP (center_loss, fluid.one_hot, prroi_pool, roi_pool, ctc_greed_decoder) error message enhancement (#23794)

* error message enchanced, test=develop

* error message enchanced for APIs, test=develop

* error message enhanced for roi_pool, test=develop

* update added code, test=develop

* update fluid/input.py, test=develop

* update fluid/input.py ,test=develop

* fix code style, test=develop

* fix lower converage, test=develop

* error message enhanced for roi pool, test=develop
上级 56c54ccc
...@@ -27,29 +27,18 @@ class CenterLossOp : public framework::OperatorWithKernel { ...@@ -27,29 +27,18 @@ class CenterLossOp : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {} : OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CenterLoss");
"Input(X) of CenterLoss should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE(ctx->HasInput("CenterUpdateRate"), OP_INOUT_CHECK(ctx->HasInput("CenterUpdateRate"), "Input",
"Input(CenterUpdateRate) of CenterLoss should not be null."); "CenterUpdateRate", "CenterLoss");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "CenterLoss");
PADDLE_ENFORCE(ctx->HasInput("Label"), OP_INOUT_CHECK(ctx->HasInput("Centers"), "Input", "Centers", "CenterLoss");
"Input(Label) of CenterLoss should not be null."); OP_INOUT_CHECK(ctx->HasOutput("SampleCenterDiff"), "Output",
"SampleCenterDiff", "CenterLoss");
PADDLE_ENFORCE(ctx->HasInput("Centers"), OP_INOUT_CHECK(ctx->HasOutput("Loss"), "Output", "Loss", "CenterLoss");
"Input(Centers) of CenterLoss should not be null."); OP_INOUT_CHECK(ctx->HasOutput("CentersOut"), "Output", "CentersOut",
"CenterLoss");
PADDLE_ENFORCE(
ctx->HasOutput("SampleCenterDiff"),
"Output(SampleCenterDiff) of CenterLoss should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Loss"),
"Output(Loss) of CenterLoss should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("CentersOut"),
"Output(CentersOut) of CenterLoss shared data with Centers.");
ctx->SetOutputDim("SampleCenterDiff", ctx->SetOutputDim("SampleCenterDiff",
{x_dims[0], product(x_dims) / x_dims[0]}); {x_dims[0], product(x_dims) / x_dims[0]});
...@@ -99,12 +88,12 @@ class CenterLossGradOp : public framework::OperatorWithKernel { ...@@ -99,12 +88,12 @@ class CenterLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("SampleCenterDiff"), OP_INOUT_CHECK(ctx->HasInput("SampleCenterDiff"), "Input",
"Input(SampleCenterDiff) should not be null"); "SampleCenterDiff", "CenterLossGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Loss")), "Input",
"Input(Loss) should not be null"); framework::GradVarName("Loss"), "CenterLossGrad");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"Output(X) should not be null"); framework::GradVarName("X"), "CenterLossGrad");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
......
...@@ -22,10 +22,8 @@ class CTCAlignOp : public framework::OperatorWithKernel { ...@@ -22,10 +22,8 @@ class CTCAlignOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "ctc_align");
"Input of CTCAlignOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "ctc_align");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Output"), true,
"Output of CTCAlignOp should not be null.");
auto input_dims = ctx->GetInputDim("Input"); auto input_dims = ctx->GetInputDim("Input");
......
...@@ -24,14 +24,13 @@ class OneHotV2Op : public framework::OperatorWithKernel { ...@@ -24,14 +24,13 @@ class OneHotV2Op : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "one_hot_v2");
"Input(X) of OneHotOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "one_hot_v2");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of OneHotOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 1, PADDLE_ENFORCE_GE(x_dims.size(), 1,
"Rank of Input(X) should be at least 1."); platform::errors::InvalidArgument(
"Rank of Input(X) should be at least 1."));
int depth = ctx->Attrs().Get<int>("depth"); int depth = ctx->Attrs().Get<int>("depth");
if (ctx->HasInput("depth_tensor")) { if (ctx->HasInput("depth_tensor")) {
......
...@@ -79,15 +79,10 @@ class PRROIPoolOp : public framework::OperatorWithKernel { ...@@ -79,15 +79,10 @@ class PRROIPoolOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "prroi_pool");
platform::errors::NotFound( OP_INOUT_CHECK(ctx->HasInput("ROIs"), "Input", "ROIs", "prroi_pool");
"Input(X) of op(PRROIPool) should not be null.")); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Input", "Out", "prroi_pool");
PADDLE_ENFORCE_EQ(ctx->HasInput("ROIs"), true,
platform::errors::NotFound(
"Input(ROIs) of op(PRROIPool) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::NotFound(
"Output(Out) of op(PRROIPool) should not be null."));
auto input_dims = ctx->GetInputDim("X"); auto input_dims = ctx->GetInputDim("X");
auto rois_dims = ctx->GetInputDim("ROIs"); auto rois_dims = ctx->GetInputDim("ROIs");
...@@ -148,10 +143,10 @@ class PRROIPoolGradOp : public framework::OperatorWithKernel { ...@@ -148,10 +143,10 @@ class PRROIPoolGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"The gradient of Out should not be null."); framework::GradVarName("Out"), "prroi_pool");
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true, OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"The gradient of X should not be null."); framework::GradVarName("X"), "prroi_pool");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
ctx->SetOutputDim(framework::GradVarName("ROIs"), ctx->GetInputDim("ROIs")); ctx->SetOutputDim(framework::GradVarName("ROIs"), ctx->GetInputDim("ROIs"));
} }
......
...@@ -26,39 +26,58 @@ class ROIPoolOp : public framework::OperatorWithKernel { ...@@ -26,39 +26,58 @@ class ROIPoolOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "roi_pool");
"Input(X) of ROIPoolOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("ROIs"), "Input", "ROIs", "roi_pool");
PADDLE_ENFORCE(ctx->HasInput("ROIs"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "roi_pool");
"Input(ROIs) of ROIPoolOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Argmax"), "Output", "Argmax", "roi_pool");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of ROIPoolOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Argmax"),
"Output(Argmax) of ROIPoolOp should not be null.");
auto input_dims = ctx->GetInputDim("X"); auto input_dims = ctx->GetInputDim("X");
auto rois_dims = ctx->GetInputDim("ROIs"); auto rois_dims = ctx->GetInputDim("ROIs");
if (ctx->HasInput("RoisLod")) { if (ctx->HasInput("RoisLod")) {
auto rois_lod_dims = ctx->GetInputDim("RoisLod"); auto rois_lod_dims = ctx->GetInputDim("RoisLod");
PADDLE_ENFORCE(rois_lod_dims.size() == 1, ""); PADDLE_ENFORCE(rois_lod_dims.size() == 1, "");
} }
PADDLE_ENFORCE(input_dims.size() == 4, PADDLE_ENFORCE_EQ(input_dims.size(), 4,
"The format of input tensor is NCHW."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(rois_dims.size() == 2, "The input data should be a four-dimensional "
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" "tensor with [N,C,H,W], but received input data with "
"given as [[x1, y1, x2, y2], ...]."); " %d dimension",
PADDLE_ENFORCE(rois_dims[1] == kROISize, input_dims.size()));
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" PADDLE_ENFORCE_EQ(
"given as [[x1, y1, x2, y2], ...]."); rois_dims.size(), 2,
platform::errors::InvalidArgument(
"ROIs should be a 2-D LoDTensor with shape (num_rois, 4)"
"given as [[x1, y1, x2, y2], ...], but received ROIs is "
"%d-dimensional LoDTensor",
rois_dims.size()));
PADDLE_ENFORCE_EQ(
rois_dims[1], kROISize,
platform::errors::InvalidArgument(
"ROIs should be a 2-D LoDTensor with shape (num_rois, 4)"
"given as [[x1, y1, x2, y2], ...]. But the second dimension of "
"the received data is %d",
rois_dims[1]));
int pooled_height = ctx->Attrs().Get<int>("pooled_height"); int pooled_height = ctx->Attrs().Get<int>("pooled_height");
int pooled_width = ctx->Attrs().Get<int>("pooled_width"); int pooled_width = ctx->Attrs().Get<int>("pooled_width");
float spatial_scale = ctx->Attrs().Get<float>("spatial_scale"); float spatial_scale = ctx->Attrs().Get<float>("spatial_scale");
PADDLE_ENFORCE_GT(pooled_height, 0, PADDLE_ENFORCE_GT(pooled_height, 0,
"The pooled output height must greater than 0"); platform::errors::OutOfRange(
"The pooled output height must be greater than 0"
"but received height is %d",
pooled_height));
PADDLE_ENFORCE_GT(pooled_width, 0, PADDLE_ENFORCE_GT(pooled_width, 0,
"The pooled output width must greater than 0"); platform::errors::OutOfRange(
"The pooled output width must be greater than 0"
"but received width is %d",
pooled_width));
PADDLE_ENFORCE_GT(spatial_scale, 0.0f, PADDLE_ENFORCE_GT(spatial_scale, 0.0f,
"The spatial scale must greater than 0"); platform::errors::OutOfRange(
"The spatial scale must be greater than 0, "
"but received spatial scale is %f",
spatial_scale));
auto out_dims = input_dims; auto out_dims = input_dims;
out_dims[0] = rois_dims[0]; out_dims[0] = rois_dims[0];
...@@ -84,10 +103,10 @@ class ROIPoolGradOp : public framework::OperatorWithKernel { ...@@ -84,10 +103,10 @@ class ROIPoolGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"The gradient of Out should not be null."); framework::GradVarName("Out"), "roi_pool");
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")), OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"The gradient of X should not be null."); framework::GradVarName("X"), "roi_pool");
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
} }
......
...@@ -98,6 +98,7 @@ def one_hot(input, depth, allow_out_of_range=False): ...@@ -98,6 +98,7 @@ def one_hot(input, depth, allow_out_of_range=False):
label = fluid.data(name="label", shape=[4], dtype="int64") label = fluid.data(name="label", shape=[4], dtype="int64")
one_hot_label = fluid.one_hot(input=label, depth=4) one_hot_label = fluid.one_hot(input=label, depth=4)
""" """
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot_v2')
helper = LayerHelper("one_hot_v2", **locals()) helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32') one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
......
...@@ -101,6 +101,10 @@ def center_loss(input, ...@@ -101,6 +101,10 @@ def center_loss(input,
""" """
helper = LayerHelper('center_loss', **locals()) helper = LayerHelper('center_loss', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'center_loss')
check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'center_loss')
centers_shape = [num_classes, input.shape[1]] centers_shape = [num_classes, input.shape[1]]
centers_param = helper.create_parameter( centers_param = helper.create_parameter(
attr=param_attr, shape=centers_shape, dtype=dtype) attr=param_attr, shape=centers_shape, dtype=dtype)
...@@ -108,6 +112,8 @@ def center_loss(input, ...@@ -108,6 +112,8 @@ def center_loss(input,
if isinstance(alpha, Variable): if isinstance(alpha, Variable):
alpha_param = alpha alpha_param = alpha
check_variable_and_dtype(alpha, 'alpha', ['float32', 'float64'],
'center_loss')
else: else:
assert isinstance(alpha, float) assert isinstance(alpha, float)
alpha_param = helper.create_variable( alpha_param = helper.create_variable(
......
...@@ -5235,6 +5235,9 @@ def ctc_greedy_decoder(input, ...@@ -5235,6 +5235,9 @@ def ctc_greedy_decoder(input,
input_length=x_pad_len) input_length=x_pad_len)
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'ctc_greedy_decoder')
helper = LayerHelper("ctc_greedy_decoder", **locals()) helper = LayerHelper("ctc_greedy_decoder", **locals())
_, topk_indices = topk(input, k=1) _, topk_indices = topk(input, k=1)
...@@ -6715,6 +6718,8 @@ def roi_pool(input, ...@@ -6715,6 +6718,8 @@ def roi_pool(input,
print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32) print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32)
print(np.array(out).shape) # (2, 1, 1, 1) print(np.array(out).shape) # (2, 1, 1, 1)
""" """
check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
helper = LayerHelper('roi_pool', **locals()) helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype) pool_out = helper.create_variable_for_type_inference(dtype)
...@@ -13096,6 +13101,8 @@ def prroi_pool(input, ...@@ -13096,6 +13101,8 @@ def prroi_pool(input,
""" """
check_variable_and_dtype(input, 'input', ['float32'], 'prroi_pool')
check_variable_and_dtype(rois, 'rois', ['float32'], 'prroi_pool')
helper = LayerHelper('prroi_pool', **locals()) helper = LayerHelper('prroi_pool', **locals())
# check attrs # check attrs
if not isinstance(spatial_scale, float): if not isinstance(spatial_scale, float):
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid
class TestCenterLossOp(OpTest): class TestCenterLossOp(OpTest):
...@@ -91,5 +92,64 @@ class TestCenterLossOpNoUpdate(TestCenterLossOp): ...@@ -91,5 +92,64 @@ class TestCenterLossOpNoUpdate(TestCenterLossOp):
self.need_update = False self.need_update = False
class BadInputTestCenterLoss(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
data = [[1, 2, 3, 4], [5, 6, 7, 8]]
label = fluid.layers.data(
name='label', shape=[2, 1], dtype='int32')
res = fluid.layers.center_loss(
data,
label,
num_classes=1000,
alpha=0.2,
param_attr=fluid.initializer.Xavier(uniform=False),
update_center=True)
self.assertRaises(TypeError, test_bad_x)
def test_bad_y():
data = fluid.layers.data(
name='data', shape=[2, 32], dtype='float32')
label = [[2], [3]]
res = fluid.layers.center_loss(
data,
label,
num_classes=1000,
alpha=0.2,
param_attr=fluid.initializer.Xavier(uniform=False),
update_center=True)
self.assertRaises(TypeError, test_bad_y)
def test_bad_alpha():
data = fluid.layers.data(
name='data2',
shape=[2, 32],
dtype='float32',
append_batch_size=False)
label = fluid.layers.data(
name='label2',
shape=[2, 1],
dtype='int32',
append_batch_size=False)
alpha = fluid.layers.data(
name='alpha',
shape=[1],
dtype='int64',
append_batch_size=False)
res = fluid.layers.center_loss(
data,
label,
num_classes=1000,
alpha=alpha,
param_attr=fluid.initializer.Xavier(uniform=False),
update_center=True)
self.assertRaises(TypeError, test_bad_alpha)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -217,5 +217,16 @@ class TestCTCAlignOpApi(unittest.TestCase): ...@@ -217,5 +217,16 @@ class TestCTCAlignOpApi(unittest.TestCase):
return_numpy=False) return_numpy=False)
class BadInputTestCTCAlignr(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
x = fluid.layers.data(name='x', shape=[8], dtype='int64')
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
self.assertRaises(TypeError, test_bad_x)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -204,5 +204,20 @@ class TestOneHotOpApi(unittest.TestCase): ...@@ -204,5 +204,20 @@ class TestOneHotOpApi(unittest.TestCase):
return_numpy=False) return_numpy=False)
class BadInputTestOnehotV2(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
label = fluid.layers.data(
name="label",
shape=[4],
append_batch_size=False,
dtype="float32")
one_hot_label = fluid.one_hot(input=label, depth=4)
self.assertRaises(TypeError, test_bad_x)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -251,6 +251,33 @@ class TestPRROIPoolOpTensorRoIs(OpTest): ...@@ -251,6 +251,33 @@ class TestPRROIPoolOpTensorRoIs(OpTest):
self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25,
7, 0.7) 7, 0.7)
def test_bad_x():
x = fluid.layers.data(
name='data1',
shape=[2, 3, 16, 16],
dtype='int64',
append_batch_size=False)
label = fluid.layers.data(
name='label1',
shape=[2, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
output = fluid.layers.prroi_pool(x, label, 0.25, 2, 2)
self.assertRaises(TypeError, test_bad_x)
def test_bad_y():
x = fluid.layers.data(
name='data2',
shape=[2, 3, 16, 16],
dtype='float32',
append_batch_size=False)
label = [[1, 2, 3, 4], [2, 3, 4, 5]]
output = fluid.layers.prroi_pool(x, label, 0.25, 2, 2)
self.assertRaises(TypeError, test_bad_y)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -20,6 +20,7 @@ import math ...@@ -20,6 +20,7 @@ import math
import sys import sys
import paddle.compat as cpt import paddle.compat as cpt
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestROIPoolOp(OpTest): class TestROIPoolOp(OpTest):
...@@ -115,15 +116,15 @@ class TestROIPoolOp(OpTest): ...@@ -115,15 +116,15 @@ class TestROIPoolOp(OpTest):
for bno in range(self.batch_size): for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1) self.rois_lod[0].append(bno + 1)
for i in range(bno + 1): for i in range(bno + 1):
x1 = np.random.random_integers( x1 = np.random.randint(
0, self.width // self.spatial_scale - self.pooled_width) 0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers( y1 = np.random.randint(
0, self.height // self.spatial_scale - self.pooled_height) 0, self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width, x2 = np.random.randint(x1 + self.pooled_width,
self.width // self.spatial_scale) self.width // self.spatial_scale)
y2 = np.random.random_integers( y2 = np.random.randint(y1 + self.pooled_height,
y1 + self.pooled_height, self.height // self.spatial_scale) self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2] roi = [bno, x1, y1, x2, y2]
rois.append(roi) rois.append(roi)
...@@ -141,6 +142,31 @@ class TestROIPoolOp(OpTest): ...@@ -141,6 +142,31 @@ class TestROIPoolOp(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class BadInputTestRoiPool(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program()):
def test_bad_x():
x = fluid.layers.data(
name='data1', shape=[2, 1, 4, 4], dtype='int64')
label = fluid.layers.data(
name='label', shape=[2, 4], dtype='float32', lod_level=1)
output = fluid.layers.roi_pool(x, label, 1, 1, 1.0)
self.assertRaises(TypeError, test_bad_x)
def test_bad_y():
x = fluid.layers.data(
name='data2',
shape=[2, 1, 4, 4],
dtype='float32',
append_batch_size=False)
label = [[1, 2, 3, 4], [2, 3, 4, 5]]
output = fluid.layers.roi_pool(x, label, 1, 1, 1.0)
self.assertRaises(TypeError, test_bad_y)
class TestROIPoolInLodOp(TestROIPoolOp): class TestROIPoolInLodOp(TestROIPoolOp):
def set_data(self): def set_data(self):
self.init_test_case() self.init_test_case()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册