未验证 提交 b4be5ef5 编写于 作者: F FlyingQianMM 提交者: GitHub

OP(retinanet_detection_output, retinanet_target_assign, sigmoid_focal_loss,...

OP(retinanet_detection_output, retinanet_target_assign, sigmoid_focal_loss, deformable_roi_pooling) error message enhancement. test=develop (#23726)
上级 2383a9f7
......@@ -126,29 +126,33 @@ class DeformablePSROIPoolOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"),
"Input(Input) of DeformablePSROIPoolOp"
"should not be null.");
PADDLE_ENFORCE(ctx->HasInput("ROIs"),
"Input(ROIs) of DeformablePSROIPoolOp "
"should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Trans"),
"Input(Trans) of DeformablePSROIPoolOp "
"should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Output"),
"Output(Output) of DeformablePSROIPoolOp "
"should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("TopCount"),
"Output(TopCount) of DeformablePSROIPoolOp "
"should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input",
"deformable_psroi_pooling");
OP_INOUT_CHECK(ctx->HasInput("ROIs"), "Input", "ROIs",
"deformable_psroi_pooling");
OP_INOUT_CHECK(ctx->HasInput("Trans"), "Input", "Trans",
"deformable_psroi_pooling");
OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output",
"deformable_psroi_pooling");
OP_INOUT_CHECK(ctx->HasOutput("TopCount"), "Output", "TopCount",
"deformable_psroi_pooling");
auto input_dims = ctx->GetInputDim("Input");
auto rois_dims = ctx->GetInputDim("ROIs");
auto trans_dims = ctx->GetInputDim("Trans");
PADDLE_ENFORCE(rois_dims.size() == 2,
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4)"
"given as [[ x1, y1, x2, y2], ...].");
PADDLE_ENFORCE(trans_dims.size() == 4,
"The format of Input Trans is (N, 2, H, W).");
PADDLE_ENFORCE_EQ(
rois_dims.size(), 2,
platform::errors::InvalidArgument(
"Input(ROIs) should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [[ x1, y1, x2, y2], ...]. The rank of Input(ROIs) should "
"be 2, but received ROIs rank is:%d, ROIs shape is:[%s].",
rois_dims.size(), rois_dims));
PADDLE_ENFORCE_EQ(
trans_dims.size(), 4,
platform::errors::InvalidArgument("The rank of Input(Trans) should be "
"4 and the shape of Trans should be "
"(N, 2, H, W), but received Trans "
"rank is:%d and Trans shape is:[%s].",
trans_dims.size(), trans_dims));
auto pooled_height = ctx->Attrs().Get<int>("pooled_height");
auto pooled_width = ctx->Attrs().Get<int>("pooled_width");
auto spatial_scale = ctx->Attrs().Get<float>("spatial_scale");
......@@ -161,32 +165,92 @@ class DeformablePSROIPoolOp : public framework::OperatorWithKernel {
auto part_width = part_size[1];
auto sample_per_part = ctx->Attrs().Get<int>("sample_per_part");
auto trans_std = ctx->Attrs().Get<float>("trans_std");
PADDLE_ENFORCE(trans_std >= 0.0f, "trans_std must greater than 0.0");
PADDLE_ENFORCE(input_dims[1] >= output_channels,
"input channels must greater than out_channels");
PADDLE_ENFORCE_GT(pooled_height, 0,
"The pooled height must greater than 0");
PADDLE_ENFORCE_GT(pooled_width, 0, "The pooled width must greater than 0");
PADDLE_ENFORCE_GT(spatial_scale, 0.0f,
"The spatial scale must greater than 0");
PADDLE_ENFORCE_EQ(group_size.size(), 2,
"The size of group_size should be 2.");
PADDLE_ENFORCE_GT(group_height, 0,
"The group_height in group_size must greater than 0");
PADDLE_ENFORCE_GT(group_width, 0,
"The group_width in group_size must greater than 0");
PADDLE_ENFORCE_EQ(part_size.size(), 2,
"The size of part_size should be 2.");
PADDLE_ENFORCE_GT(part_height, 0,
"The part_height in part_size must greater than 0");
PADDLE_ENFORCE_GT(part_width, 0,
"The part_width in part_size must greater than 0");
PADDLE_ENFORCE(part_height <= trans_dims[2],
"The height of trans must greater than part_height");
PADDLE_ENFORCE(part_width <= trans_dims[3],
"The width of trans must greater than part_width");
PADDLE_ENFORCE_GT(sample_per_part, 0,
"The sample_per_part must greater than 0");
PADDLE_ENFORCE_GE(trans_std, 0., platform::errors::InvalidArgument(
"Input(trans_std) should not be lower "
"than 0.0, but received trans_std "
"is:%f",
trans_std));
PADDLE_ENFORCE_GE(
input_dims[1], output_channels,
platform::errors::InvalidArgument(
"The channel of Input(Input) should not be lower than "
"Input(output_dim), "
"but received Input channel is:%d and output_dim is:%d.",
input_dims[1], output_channels));
PADDLE_ENFORCE_GT(
pooled_height, 0,
platform::errors::InvalidArgument(
"Input(pooled_height) should be greater than 0, but received "
"pooled_height is:%d.",
pooled_height));
PADDLE_ENFORCE_GT(
pooled_width, 0,
platform::errors::InvalidArgument(
"Input(pooled_width) should be greater than 0, but received "
"pooled_width is:%d.",
pooled_width));
PADDLE_ENFORCE_GT(
spatial_scale, 0.,
platform::errors::InvalidArgument(
"Input(spatial_scale) should be greater than 0., but received "
"spatial_scale is:%f.",
spatial_scale));
PADDLE_ENFORCE_EQ(
group_size.size(), 2,
platform::errors::InvalidArgument(
"The length of Input(group_size) should be 2, but received "
"group_size length is:%d.",
group_size.size()));
PADDLE_ENFORCE_GT(
group_height, 0,
platform::errors::InvalidArgument(
"group_height in Input(group_size) should be greater than 0, "
"but received group_height is:%d.",
group_height));
PADDLE_ENFORCE_GT(
group_width, 0,
platform::errors::InvalidArgument(
"group_width in Input(group_size) should be greater than 0 "
"but received group_width is:%d.",
group_width));
PADDLE_ENFORCE_EQ(
part_size.size(), 2,
platform::errors::InvalidArgument(
"The length of Input(part_size) should be 2, but received "
"part_size length is:%d.",
part_size.size()));
PADDLE_ENFORCE_GT(
part_height, 0,
platform::errors::InvalidArgument(
"part_height in Input(part_size) should be greater than 0 "
"but received part_height is:%d.",
part_height));
PADDLE_ENFORCE_GT(
part_width, 0,
platform::errors::InvalidArgument(
"part_width in Input(part_size) should be greater than 0 "
"but received part_width is:%d.",
part_width));
PADDLE_ENFORCE_LE(
part_height, trans_dims[2],
platform::errors::InvalidArgument(
"part_height in Input(part_size) should not be greater than "
"the height of Input(Trans), but received part_height is:%d, "
"the height of Input(Trans) is:%d.",
part_height, trans_dims[2]));
PADDLE_ENFORCE_LE(
part_width, trans_dims[3],
platform::errors::InvalidArgument(
"part_width in Input(part_size) should not be greater than "
"the width of Input(Trans), but received part_width is:%d, "
"the width of Input(Trans) is:%d.",
part_width, trans_dims[3]));
PADDLE_ENFORCE_GT(
sample_per_part, 0,
platform::errors::InvalidArgument(
"Input(sample_per_part) should be greater than 0, but received "
"sample_per_part is:%d.",
sample_per_part));
auto out_dims = input_dims;
out_dims[0] = rois_dims[0];
out_dims[1] = output_channels;
......@@ -230,8 +294,8 @@ class DeformablePSROIPoolGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Output")),
"The gradient of Output should not be null.");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Output")), "Input",
"Output@GRAD", "deformable_psroi_pooling");
if (ctx->HasOutput(framework::GradVarName("Input"))) {
ctx->SetOutputDim(framework::GradVarName("Input"),
ctx->GetInputDim("Input"));
......
......@@ -200,14 +200,22 @@ class DeformablePSROIPoolCUDAKernel : public framework::OpKernel<T> {
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
PADDLE_ENFORCE_EQ(num_rois, out->dims()[0],
"number of rois should be same with number of output");
PADDLE_ENFORCE_EQ(
num_rois, out->dims()[0],
platform::errors::InvalidArgument(
"The number of Input(ROIs) should be same with the number of "
"Ouput(Output), but received ROIs number is:%d, Output number "
"is:%d.",
num_rois, out->dims()[0]));
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
PADDLE_ENFORCE(channels_each_class >= 1,
"channels_each must greater than 1");
PADDLE_ENFORCE_GE(channels_each_class, 1,
platform::errors::InvalidArgument(
"channels_each_class should not be lower than 1, but "
"channels_each_class is:%d.",
channels_each_class));
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
......@@ -221,10 +229,16 @@ class DeformablePSROIPoolCUDAKernel : public framework::OpKernel<T> {
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch,
"The rois_batch_size and imgs batch_size must be the same.");
platform::errors::InvalidArgument(
"rois_batch_size should be equal to the batch_size, but "
"rois_batch_size is:%d, batch_size is:%d.",
rois_batch_size, batch));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
......@@ -486,12 +500,17 @@ class DeformablePSROIPoolGradCUDAKernel : public framework::OpKernel<T> {
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch,
"The rois_batch_size and imgs batch_size must be the same.");
platform::errors::InvalidArgument(
"rois_batch_size should be equal to the batch_size, but "
"rois_batch_size is:%d, batch_size is:%d.",
rois_batch_size, batch));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
......
......@@ -171,9 +171,13 @@ class DeformablePSROIPoolCPUKernel : public framework::OpKernel<T> {
set_zero(dev_ctx, top_count, static_cast<T>(0));
const int num_rois = rois->dims()[0];
PADDLE_ENFORCE_EQ(num_rois, out->dims()[0],
"number of rois should be same with number of output");
PADDLE_ENFORCE_EQ(
num_rois, out->dims()[0],
platform::errors::InvalidArgument(
"The number of Input(ROIs) should be same with the number of "
"Ouput(Output), but received ROIs number is:%d, Output number "
"is:%d.",
num_rois, out->dims()[0]));
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
int* roi_batch_id_data =
......@@ -200,8 +204,11 @@ class DeformablePSROIPoolCPUKernel : public framework::OpKernel<T> {
auto count = num_rois * output_dim * pooled_height * pooled_width;
auto num_classes = no_trans ? 1 : channels_trans / 2;
auto channels_each_class = no_trans ? output_dim : output_dim / num_classes;
PADDLE_ENFORCE(channels_each_class >= 1,
"channels_each must greater than 1");
PADDLE_ENFORCE_GE(channels_each_class, 1,
platform::errors::InvalidArgument(
"channels_each_class should not be lower than 1, but "
"channels_each_class is:%d.",
channels_each_class));
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
......@@ -212,11 +219,18 @@ class DeformablePSROIPoolCPUKernel : public framework::OpKernel<T> {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(rois_batch_size, batch,
"The rois_batch_size must equal to batch_size of img.");
PADDLE_ENFORCE_EQ(
rois_batch_size, batch,
platform::errors::InvalidArgument(
"rois_batch_size should be equal to the batch_size, but "
"rois_batch_size is:%d, batch_size is:%d.",
rois_batch_size, batch));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
......@@ -467,7 +481,10 @@ class DeformablePSROIPoolGradCPUKernel : public framework::OpKernel<T> {
int rois_batch_size = rois_lod.size() - 1;
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
platform::errors::InvalidArgument(
"The rois_num from input and lod must be same, but"
"rois_num from input is:%d, rois_num from lod is:%d.",
num_rois, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
......
......@@ -27,25 +27,38 @@ class RetinanetDetectionOutputOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_GE(
ctx->Inputs("BBoxes").size(), 1UL,
"Input(BBoxes) of RetinanetDetectionOutput should not be null.");
platform::errors::InvalidArgument("The length of Input(BBoxes) should "
"be greater than 0, but received "
"BBoxes length is:%d.",
ctx->Inputs("BBoxes").size()));
PADDLE_ENFORCE_GE(
ctx->Inputs("Scores").size(), 1UL,
"Input(Scores) of RetinanetDetectionOutput should not be null.");
platform::errors::InvalidArgument("The length of Input(Scores) should "
"be greater than 0, but received "
"Scores length is:%d.",
ctx->Inputs("Scores").size()));
PADDLE_ENFORCE_GE(
ctx->Inputs("Anchors").size(), 1UL,
"Input(Anchors) of RetinanetDetectionOutput should not be null.");
platform::errors::InvalidArgument("The length of Input(Anchors) should "
"be greater than 0, but received "
"Anchors length is:%d.",
ctx->Inputs("Anchors").size()));
PADDLE_ENFORCE_EQ(
ctx->Inputs("BBoxes").size(), ctx->Inputs("Scores").size(),
"Input tensors(BBoxes and Scores) should have the same size.");
platform::errors::InvalidArgument(
"Input(BBoxes) and Input(Scores) should have the same length, but "
"received BBoxes length is:%d, Scores length is:%d.",
ctx->Inputs("BBoxes").size(), ctx->Inputs("Scores").size()));
PADDLE_ENFORCE_EQ(
ctx->Inputs("BBoxes").size(), ctx->Inputs("Anchors").size(),
"Input tensors(BBoxes and Anchors) should have the same size.");
PADDLE_ENFORCE(
ctx->HasInput("ImInfo"),
"Input(ImInfo) of RetinanetDetectionOutput should not be null");
PADDLE_ENFORCE(
ctx->HasOutput("Out"),
"Output(Out) of RetinanetDetectionOutput should not be null.");
platform::errors::InvalidArgument(
"Input(BBoxes) and Input(Anchors) should have the same length, but "
"received BBoxes length is:%d, Anchors length is:%d.",
ctx->Inputs("BBoxes").size(), ctx->Inputs("Anchors").size()));
OP_INOUT_CHECK(ctx->HasInput("ImInfo"), "Input", "ImInfo",
"retinanet_detection_output");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out",
"retinanet_detection_output");
auto bboxes_dims = ctx->GetInputsDim("BBoxes");
auto scores_dims = ctx->GetInputsDim("Scores");
......@@ -53,37 +66,77 @@ class RetinanetDetectionOutputOp : public framework::OperatorWithKernel {
auto im_info_dims = ctx->GetInputDim("ImInfo");
const size_t b_n = bboxes_dims.size();
PADDLE_ENFORCE_GT(b_n, 0, "Input bbox tensors count should > 0.");
PADDLE_ENFORCE_GT(b_n, 0, platform::errors::InvalidArgument(
"The number of Variables in Input(BBoxes) "
"should be greater than 0, "
"but received number is:%d.",
b_n));
const size_t s_n = scores_dims.size();
PADDLE_ENFORCE_GT(s_n, 0, "Input score tensors count should > 0.");
PADDLE_ENFORCE_GT(s_n, 0, platform::errors::InvalidArgument(
"The number of Variables in Input(Scores) "
"should be greater than 0, "
"but received number is:%d.",
s_n));
const size_t a_n = anchors_dims.size();
PADDLE_ENFORCE_GT(a_n, 0, "Input anchor tensors count should > 0.");
PADDLE_ENFORCE_GT(a_n, 0, platform::errors::InvalidArgument(
"The number of Variables in Input(Anchors) "
"should be greater than 0, "
"but received number is:%d.",
a_n));
auto bbox_dims = bboxes_dims[0];
auto score_dims = scores_dims[0];
auto anchor_dims = anchors_dims[0];
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(score_dims.size(), 3,
"The rank of Input(Scores) must be 3");
PADDLE_ENFORCE_EQ(bbox_dims.size(), 3,
"The rank of Input(BBoxes) must be 3");
PADDLE_ENFORCE_EQ(anchor_dims.size(), 2,
"The rank of Input(Anchors) must be 2");
PADDLE_ENFORCE(bbox_dims[2] == 4,
"The last dimension of Input(BBoxes) must be 4, "
"represents the layout of coordinate "
"[xmin, ymin, xmax, ymax]");
PADDLE_ENFORCE_EQ(
score_dims.size(), 3,
platform::errors::InvalidArgument(
"The rank of each Variable in Input(Scores) must be 3, "
"but received rank is:%d.",
score_dims.size()));
PADDLE_ENFORCE_EQ(
bbox_dims.size(), 3,
platform::errors::InvalidArgument(
"The rank of each Variable in Input(BBoxes) must be 3, "
"but received rank is:%d.",
bbox_dims.size()));
PADDLE_ENFORCE_EQ(
anchor_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of each Variable in Input(Anchors) must be 2, "
"but received rank is:%d.",
anchor_dims.size()));
PADDLE_ENFORCE_EQ(
bbox_dims[2], 4,
platform::errors::InvalidArgument(
"The last dimension of each Variable in Input(BBoxes) must be 4 "
"representing the layout of coordinate [xmin, ymin, xmax, ymax], "
"but received dimension is:%d.",
bbox_dims[2]));
PADDLE_ENFORCE_EQ(bbox_dims[1], score_dims[1],
"The 2nd dimension of Input(BBoxes) must be equal to "
"2nd dimension of Input(Scores), which represents the "
"number of the predicted boxes.");
PADDLE_ENFORCE_EQ(anchor_dims[0], bbox_dims[1],
"The 1st dimension of Input(Anchors) must be equal to "
"2nd dimension of Input(BBoxes), which represents the "
"number of the predicted boxes.");
platform::errors::InvalidArgument(
"The 2nd dimension of Variables in Input(BBoxes) "
"and Input(Scores) "
"must be same, which represents the number of the "
"predicted boxes, "
"but received BBoxes 2nd dimension is:%d, Scores "
"2nd dimension is:%d.",
bbox_dims[1], score_dims[1]));
PADDLE_ENFORCE_EQ(
anchor_dims[0], bbox_dims[1],
platform::errors::InvalidArgument(
"The 1st dimension of each Variables in Input(Anchors) must be "
"equal "
"to the 2nd dimension of corresponding Variables in "
"Input(BBoxes), "
"which represents the number of the predicted boxes, but "
"received "
"Anchors 1st dimension is:%d, BBoxes 2nd dimension is:%d.",
anchor_dims[0], bbox_dims[1]));
PADDLE_ENFORCE_EQ(im_info_dims.size(), 2,
"The rank of Input(ImInfo) must be 2.");
platform::errors::InvalidArgument(
"The rank of Input(ImInfo) must be 2, but "
"received ImInfo rank is:%d.",
im_info_dims.size()));
}
// Here the box_dims[0] is not the real dimension of output.
// It will be rewritten in the computing kernel.
......
......@@ -665,54 +665,58 @@ class RetinanetTargetAssignOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(
ctx->HasInput("Anchor"),
"Input(Anchor) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasInput("GtBoxes"),
"Input(GtBoxes) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasInput("GtLabels"),
"Input(GtLabels) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasInput("IsCrowd"),
"Input(Anchor) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasInput("ImInfo"),
"Input(ImInfo) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasOutput("LocationIndex"),
"Output(LocationIndex) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasOutput("ScoreIndex"),
"Output(ScoreIndex) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasOutput("TargetLabel"),
"Output(TargetLabel) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(
ctx->HasOutput("TargetBBox"),
"Output(TargetBBox) of RetinanetTargetAssignOp should not be null");
PADDLE_ENFORCE(ctx->HasOutput("BBoxInsideWeight"),
"Output(BBoxInsideWeight) of RetinanetTargetAssignOp should "
"not be null");
PADDLE_ENFORCE(ctx->HasOutput("ForegroundNumber"),
"Output(ForegroundNumber) of RetinanetTargetAssignOp should "
"not be null");
OP_INOUT_CHECK(ctx->HasInput("Anchor"), "Input", "Anchor",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasInput("GtBoxes"), "Input", "GtBoxes",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasInput("GtLabels"), "Input", "GtLabels",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasInput("IsCrowd"), "Input", "IsCrowd",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasInput("ImInfo"), "Input", "ImInfo",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasOutput("LocationIndex"), "Output", "LocationIndex",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasOutput("ScoreIndex"), "Output", "ScoreIndex",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasOutput("TargetLabel"), "Output", "TargetLabel",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasOutput("TargetBBox"), "Output", "TargetBBox",
"retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasOutput("BBoxInsideWeight"), "Output",
"BBoxInsideWeight", "retinanet_target_assign");
OP_INOUT_CHECK(ctx->HasOutput("ForegroundNumber"), "Output",
"ForegroundNumber", "retinanet_target_assign");
auto anchor_dims = ctx->GetInputDim("Anchor");
auto gt_boxes_dims = ctx->GetInputDim("GtBoxes");
auto gt_labels_dims = ctx->GetInputDim("GtLabels");
auto im_info_dims = ctx->GetInputDim("ImInfo");
PADDLE_ENFORCE_EQ(anchor_dims.size(), 2,
"The rank of Input(Anchor) must be 2.");
PADDLE_ENFORCE_EQ(gt_boxes_dims.size(), 2,
"The rank of Input(GtBoxes) must be 2.");
PADDLE_ENFORCE_EQ(gt_labels_dims.size(), 2,
"The rank of Input(GtLabels) must be 2.");
PADDLE_ENFORCE_EQ(im_info_dims.size(), 2,
"The rank of Input(ImInfo) must be 2.");
PADDLE_ENFORCE_EQ(
anchor_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(Anchor) should be 2, but received Anchor "
"rank is :%d, Anchor shape is:[%s].",
anchor_dims.size(), anchor_dims));
PADDLE_ENFORCE_EQ(
gt_boxes_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(GtBoxes) should be 2, but received GtBoxes "
"rank is :%d, GtBoxes shape is:[%s].",
gt_boxes_dims.size(), gt_boxes_dims));
PADDLE_ENFORCE_EQ(
gt_labels_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(GtLabels) should be 2, but received GtLabels "
"rank is :%d, GtLabels shape is:[%s].",
gt_labels_dims.size(), gt_labels_dims));
PADDLE_ENFORCE_EQ(
im_info_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(ImInfo) should be 2, but received ImInfo "
"rank is :%d, ImInfo shape is:[%s].",
im_info_dims.size(), im_info_dims));
ctx->SetOutputDim("LocationIndex", {gt_labels_dims[0]});
ctx->SetOutputDim("ScoreIndex", {gt_labels_dims[0]});
......@@ -862,12 +866,24 @@ class RetinanetTargetAssignKernel : public framework::OpKernel<T> {
auto* bbox_inside_weight = context.Output<LoDTensor>("BBoxInsideWeight");
auto* fg_num = context.Output<LoDTensor>("ForegroundNumber");
PADDLE_ENFORCE_EQ(gt_boxes->lod().size(), 1UL,
"RetinanetTargetAssignOp gt_boxes needs 1 level of LoD");
PADDLE_ENFORCE_EQ(gt_labels->lod().size(), 1UL,
"RetinanetTargetAssignOp gt_boxes needs 1 level of LoD");
PADDLE_ENFORCE_EQ(is_crowd->lod().size(), 1UL,
"RetinanetTargetAssignOp is_crowd needs 1 level of LoD");
PADDLE_ENFORCE_EQ(
gt_boxes->lod().size(), 1UL,
platform::errors::InvalidArgument(
"The LoD level of Input(GtBoxes) should be 1, but received GtBoxes "
"LoD level is :%d.",
gt_boxes->lod().size()));
PADDLE_ENFORCE_EQ(
gt_labels->lod().size(), 1UL,
platform::errors::InvalidArgument("The LoD level of Input(GtLabels) "
"should be 1, but received GtLabels "
"LoD level is :%d.",
gt_labels->lod().size()));
PADDLE_ENFORCE_EQ(
is_crowd->lod().size(), 1UL,
platform::errors::InvalidArgument(
"The LoD level of Input(IsCrowd) should be 1, but received IsCrowd "
"LoD level is :%d.",
is_crowd->lod().size()));
int64_t anchor_num = static_cast<int64_t>(anchor->dims()[0]);
int64_t batch_num = static_cast<int64_t>(gt_boxes->lod().back().size() - 1);
......@@ -996,9 +1012,26 @@ class RetinanetTargetAssignKernel : public framework::OpKernel<T> {
lod0_fg.emplace_back(total_fg_num);
}
PADDLE_ENFORCE_LE(total_loc_num, max_num);
PADDLE_ENFORCE_LE(total_score_num, max_num);
PADDLE_ENFORCE_LE(total_fg_num, batch_num);
PADDLE_ENFORCE_LE(
total_loc_num, max_num,
platform::errors::InvalidArgument(
"The number of sampled bboxes should not be greater than the "
"number of all anchor boxes(%d), but the number of sampled "
"bboxes is :%d.",
max_num, total_loc_num));
PADDLE_ENFORCE_LE(
total_score_num, max_num,
platform::errors::InvalidArgument(
"The number of sampled scores should not be greater than the "
"number of all anchor boxes(%d), but the number of sampled "
"scores is :%d.",
max_num, total_score_num));
PADDLE_ENFORCE_LE(
total_fg_num, batch_num,
platform::errors::InvalidArgument(
"The number of foreground numbers should not be greater than the "
"batch size(%d), but the number of foreground numbers is :%d.",
batch_num, total_fg_num));
lod_loc.emplace_back(lod0_loc);
loc_score.emplace_back(lod0_score);
......
......@@ -27,19 +27,32 @@ class SigmoidFocalLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("FgNum"), "Input(FgNum) should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "sigmoid_focal_loss");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
"sigmoid_focal_loss");
OP_INOUT_CHECK(ctx->HasInput("FgNum"), "Input", "FgNum",
"sigmoid_focal_loss");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out",
"sigmoid_focal_loss");
auto x_dims = ctx->GetInputDim("X");
auto labels_dims = ctx->GetInputDim("Label");
auto fg_dims = ctx->GetInputDim("FgNum");
int rank = x_dims.size();
PADDLE_ENFORCE_EQ(rank, labels_dims.size(),
"Input(X) and Input(Label) shall have the same rank.");
PADDLE_ENFORCE_EQ(fg_dims.size(), 1, "The rank of Input(FgNum) must be 1.");
PADDLE_ENFORCE_EQ(
rank, labels_dims.size(),
platform::errors::InvalidArgument(
"The rank of Input(X) should be equal to the rank of Input(Label), "
"but received X rank is:%d, X shape is:[%s], "
"Label rank is:%d, Label shape is:[%s].",
rank, x_dims, labels_dims.size(), labels_dims));
PADDLE_ENFORCE_EQ(
fg_dims.size(), 1,
platform::errors::InvalidArgument(
"The rank of Input(FgNum) must be 1, but received FgNum rank is "
":%d, FgNum shape is:[%s].",
fg_dims.size(), fg_dims));
bool check = true;
if ((!ctx->IsRuntime()) && (framework::product(x_dims) <= 0 ||
framework::product(labels_dims) <= 0)) {
......@@ -47,14 +60,22 @@ class SigmoidFocalLossOp : public framework::OperatorWithKernel {
}
if (check) {
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(labels_dims, 0, rank - 1),
"Input(X) and Input(Label) shall have the same shape "
"except the last dimension.");
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(labels_dims, 0, rank - 1),
platform::errors::InvalidArgument(
"Input(X) and Input(Label) should have the same shape "
"except the last dimension, but received X shape is:[%s], "
"Label shape is:[%s].",
x_dims, labels_dims));
}
PADDLE_ENFORCE_EQ(labels_dims[rank - 1], 1UL,
"The last dimension of input(Label) should be 1.");
PADDLE_ENFORCE_EQ(
labels_dims[rank - 1], 1UL,
platform::errors::InvalidArgument(
"The last dimension of Input(Label) should be 1, but received "
"Label shape is:[%s].",
labels_dims));
ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out");
......@@ -74,13 +95,15 @@ class SigmoidFocalLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("FgNum"), "Input(FgNum) should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Output(X@GRAD) should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "sigmoid_focal_loss");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
"sigmoid_focal_loss");
OP_INOUT_CHECK(ctx->HasInput("FgNum"), "Input", "FgNum",
"sigmoid_focal_loss");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out@GRAD", "sigmoid_focal_loss");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"X@GRAD", "sigmoid_focal_loss");
auto x_dims = ctx->GetInputDim("X");
auto labels_dims = ctx->GetInputDim("Label");
......@@ -88,9 +111,19 @@ class SigmoidFocalLossGradOp : public framework::OperatorWithKernel {
auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out"));
int rank = x_dims.size();
PADDLE_ENFORCE_EQ(rank, labels_dims.size(),
"Input(X) and Input(Label) shall have the same rank.");
PADDLE_ENFORCE_EQ(fg_dims.size(), 1, "The rank of Input(FgNum) must be 1.");
PADDLE_ENFORCE_EQ(
rank, labels_dims.size(),
platform::errors::InvalidArgument(
"The rank of Input(X) should be equal to the rank of Input(Label), "
"but received X rank is:%d, X shape is:[%s], "
"Label rank is:%d, Label shape is:[%s].",
rank, x_dims, labels_dims.size(), labels_dims));
PADDLE_ENFORCE_EQ(
fg_dims.size(), 1,
platform::errors::InvalidArgument(
"The rank of Input(FgNum) must be 1, but received FgNum rank is "
":%d, FgNum shape is:[%s].",
fg_dims.size(), fg_dims));
bool check = true;
if ((!ctx->IsRuntime()) && (framework::product(x_dims) <= 0 ||
framework::product(labels_dims) <= 0)) {
......@@ -98,17 +131,29 @@ class SigmoidFocalLossGradOp : public framework::OperatorWithKernel {
}
if (check) {
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(labels_dims, 0, rank - 1),
"Input(X) and Input(Label) shall have the same shape.");
PADDLE_ENFORCE_EQ(labels_dims[rank - 1], 1UL,
"The last dimension of input(Label) should be 1.");
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(labels_dims, 0, rank - 1),
platform::errors::InvalidArgument(
"Input(X) and Input(Label) should have the same shape "
"except the last dimension, but received X shape is:[%s], "
"Label shape is:[%s].",
x_dims, labels_dims));
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 0, rank),
framework::slice_ddim(dout_dims, 0, rank),
"Input(X) and Input(Out@Grad) shall have the same shape.");
labels_dims[rank - 1], 1UL,
platform::errors::InvalidArgument(
"The last dimension of Input(Label) should be 1, but received "
"Label shape is:[%s].",
labels_dims));
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank),
framework::slice_ddim(dout_dims, 0, rank),
platform::errors::InvalidArgument(
"Input(X) and Input(Out@Grad) should have the same "
"shape, but received "
"X shape is:[%s], Out@Grad shape is:[%s].",
x_dims, dout_dims));
}
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
......
......@@ -26,9 +26,10 @@ from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
......@@ -240,6 +241,23 @@ def retinanet_target_assign(bbox_pred,
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
......@@ -500,6 +518,11 @@ def sigmoid_focal_loss(x, label, fg_num, gamma=2, alpha=0.25):
alpha=0.25)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -2984,6 +3007,24 @@ def retinanet_detection_output(bboxes,
nms_eta=1.)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
......
......@@ -13833,6 +13833,18 @@ def deformable_roi_pooling(input,
position_sensitive=False)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
'deformable_roi_pooling')
check_variable_and_dtype(trans, 'trans', ['float32', 'float64'],
'deformable_roi_pooling')
check_type(group_size, 'group_size', (list, tuple),
'deformable_roi_pooling')
if part_size is not None:
check_type(part_size, 'part_size', (list, tuple),
'deformable_roi_pooling')
input_channels = input.shape[1]
if position_sensitive == False:
output_channels = input_channels
......
......@@ -16,6 +16,9 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import Program, program_guard
def set_input(input, rois, trans):
......@@ -249,14 +252,14 @@ class TestDeformablePSROIPoolOp(OpTest):
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x_1 = np.random.random_integers(
x_1 = np.random.randint(
0, self.width // self.spatial_scale - self.pooled_width)
y_1 = np.random.random_integers(
y_1 = np.random.randint(
0, self.height // self.spatial_scale - self.pooled_height)
x_2 = np.random.random_integers(
x_1 + self.pooled_width, self.width // self.spatial_scale)
y_2 = np.random.random_integers(
y_1 + self.pooled_height, self.height // self.spatial_scale)
x_2 = np.random.randint(x_1 + self.pooled_width,
self.width // self.spatial_scale)
y_2 = np.random.randint(y_1 + self.pooled_height,
self.height // self.spatial_scale)
roi = [bno, x_1, y_1, x_2, y_2]
rois.append(roi)
self.rois_num = len(rois)
......@@ -365,5 +368,134 @@ class TestDeformablePSROIPoolOp(OpTest):
self.check_grad(['Input'], 'Output')
class TestDeformablePSROIPoolOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input1 = fluid.data(
name="input1", shape=[2, 192, 64, 64], dtype='float32')
rois1 = fluid.data(
name="rois1", shape=[-1, 4], dtype='float32', lod_level=1)
trans1 = fluid.data(
name="trans1", shape=[2, 384, 64, 64], dtype='float32')
# The `input` must be Variable and the data type of `input` Tensor must be one of float32 and float64.
def test_input_type():
fluid.layers.deformable_roi_pooling(
input=[3, 4],
rois=rois1,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_input_type)
def test_input_tensor_dtype():
input2 = fluid.data(
name="input2", shape=[2, 192, 64, 64], dtype='int32')
fluid.layers.deformable_roi_pooling(
input=input2,
rois=rois1,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_input_tensor_dtype)
# The `rois` must be Variable and the data type of `rois` Tensor must be one of float32 and float64.
def test_rois_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=2,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_rois_type)
def test_rois_tensor_dtype():
rois2 = fluid.data(
name="rois2", shape=[-1, 4], dtype='int32', lod_level=1)
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois2,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_rois_tensor_dtype)
# The `trans` must be Variable and the data type of `trans` Tensor must be one of float32 and float64.
def test_trans_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=[2],
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_trans_type)
def test_trans_tensor_dtype():
trans2 = fluid.data(
name="trans2", shape=[2, 384, 64, 64], dtype='int32')
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=trans2,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_trans_tensor_dtype)
# The `group_size` must be one of list and tuple.
# Each element must be int.
def test_group_size_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=trans1,
group_size=1,
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_group_size_type)
# The `part_size` must be one of list, tuple and None.
# Each element must be int.
def test_part_size_type():
fluid.layers.deformable_roi_pooling(
input=input1,
rois=rois1,
trans=trans1,
pooled_height=8,
pooled_width=8,
part_size=8,
sample_per_part=4,
position_sensitive=True)
self.assertRaises(TypeError, test_part_size_type)
if __name__ == '__main__':
unittest.main()
......@@ -3432,12 +3432,12 @@ class TestBook(LayerTest):
name='gt_labels',
shape=[10, 1],
append_batch_size=False,
dtype='float32')
dtype='int32')
is_crowd = layers.data(
name='is_crowd',
shape=[1],
append_batch_size=False,
dtype='float32')
dtype='int32')
im_info = layers.data(
name='im_info',
shape=[1, 3],
......
......@@ -21,6 +21,8 @@ from op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_multiclass_nms_op import iou
from test_multiclass_nms_op import nms
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold):
......@@ -408,5 +410,112 @@ class TestRetinanetDetectionOutOpNo5(TestRetinanetDetectionOutOp1):
self.layer_w.append(2**(num_levels - i))
class TestRetinanetDetectionOutOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
bboxes_low1 = fluid.data(
name='bboxes_low1', shape=[1, 44, 4], dtype='float32')
bboxes_high1 = fluid.data(
name='bboxes_high1', shape=[1, 11, 4], dtype='float32')
scores_low1 = fluid.data(
name='scores_low1', shape=[1, 44, 10], dtype='float32')
scores_high1 = fluid.data(
name='scores_high1', shape=[1, 11, 10], dtype='float32')
anchors_low1 = fluid.data(
name='anchors_low1', shape=[44, 4], dtype='float32')
anchors_high1 = fluid.data(
name='anchors_high1', shape=[11, 4], dtype='float32')
im_info1 = fluid.data(
name="im_info1", shape=[1, 3], dtype='float32')
# The `bboxes` must be list, each element must be Variable and
# its Tensor data type must be one of float32 and float64.
def test_bboxes_type():
fluid.layers.retinanet_detection_output(
bboxes=bboxes_low1,
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1)
self.assertRaises(TypeError, test_bboxes_type)
def test_bboxes_tensor_dtype():
bboxes_high2 = fluid.data(
name='bboxes_high2', shape=[1, 11, 4], dtype='int32')
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_high2, 5],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1)
self.assertRaises(TypeError, test_bboxes_tensor_dtype)
# The `scores` must be list, each element must be Variable and its
# Tensor data type must be one of float32 and float64.
def test_scores_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=scores_low1,
anchors=[anchors_low1, anchors_high1],
im_info=im_info1)
self.assertRaises(TypeError, test_scores_type)
def test_scores_tensor_dtype():
scores_high2 = fluid.data(
name='scores_high2', shape=[1, 11, 10], dtype='int32')
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_high2, 5],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1)
self.assertRaises(TypeError, test_scores_tensor_dtype)
# The `anchors` must be list, each element must be Variable and its
# Tensor data type must be one of float32 and float64.
def test_anchors_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=anchors_low1,
im_info=im_info1)
self.assertRaises(TypeError, test_anchors_type)
def test_anchors_tensor_dtype():
anchors_high2 = fluid.data(
name='anchors_high2', shape=[11, 4], dtype='int32')
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_high2, 5],
im_info=im_info1)
self.assertRaises(TypeError, test_anchors_tensor_dtype)
# The `im_info` must be Variable and the data type of `im_info`
# Tensor must be one of float32 and float64.
def test_iminfo_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=[2, 3, 4])
self.assertRaises(TypeError, test_iminfo_type)
def test_iminfo_tensor_dtype():
im_info2 = fluid.data(
name='im_info2', shape=[1, 3], dtype='int32')
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info2)
self.assertRaises(TypeError, test_iminfo_tensor_dtype)
if __name__ == '__main__':
unittest.main()
......@@ -16,7 +16,9 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposal_labels_op import _generate_groundtruth
......@@ -393,5 +395,169 @@ class TestRetinanetTargetAssignOp(OpTest):
self.check_output()
class TestRetinanetTargetAssignOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
bbox_pred1 = fluid.data(
name='bbox_pred1', shape=[1, 100, 4], dtype='float32')
cls_logits1 = fluid.data(
name='cls_logits1', shape=[1, 100, 10], dtype='float32')
anchor_box1 = fluid.data(
name='anchor_box1', shape=[100, 4], dtype='float32')
anchor_var1 = fluid.data(
name='anchor_var1', shape=[100, 4], dtype='float32')
gt_boxes1 = fluid.data(
name='gt_boxes1', shape=[10, 4], dtype='float32')
gt_labels1 = fluid.data(
name='gt_labels1', shape=[10, 1], dtype='int32')
is_crowd1 = fluid.data(name='is_crowd1', shape=[1], dtype='float32')
im_info1 = fluid.data(
name='im_info1', shape=[1, 3], dtype='float32')
# The `bbox_pred` must be Variable and the data type of `bbox_pred` Tensor
# one of float32 and float64.
def test_bbox_pred_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign([1], cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_bbox_pred_type)
def test_bbox_pred_tensor_dtype():
bbox_pred2 = fluid.data(
name='bbox_pred2', shape=[1, 100, 4], dtype='intt32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred2, cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_bbox_pred_tensor_dtype)
# The `cls_logits` must be Variable and the data type of `cls_logits` Tensor
# one of float32 and float64.
def test_cls_logits_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, 2, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_cls_logits_type)
def test_cls_logits_tensor_dtype():
cls_logits2 = fluid.data(
name='cls_logits2', shape=[1, 100, 10], dtype='int32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits2, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_cls_logits_tensor_dtype)
# The `anchor_box` must be Variable and the data type of `anchor_box` Tensor
# one of float32 and float64.
def test_anchor_box_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, [5],
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_anchor_box_type)
def test_anchor_box_tensor_dtype():
anchor_box2 = fluid.data(
name='anchor_box2', shape=[100, 4], dtype='int32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box2,
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_anchor_box_tensor_dtype)
# The `anchor_var` must be Variable and the data type of `anchor_var` Tensor
# one of float32 and float64.
def test_anchor_var_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
5, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_anchor_var_type)
def test_anchor_var_tensor_dtype():
anchor_var2 = fluid.data(
name='anchor_var2', shape=[100, 4], dtype='int32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var2, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_anchor_var_tensor_dtype)
# The `gt_boxes` must be Variable and the data type of `gt_boxes` Tensor
# one of float32 and float64.
def test_gt_boxes_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, [4], gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_gt_boxes_type)
def test_gt_boxes_tensor_dtype():
gt_boxes2 = fluid.data(
name='gt_boxes2', shape=[10, 4], dtype='int32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, gt_boxes2, gt_labels1, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_gt_boxes_tensor_dtype)
# The `gt_label` must be Variable and the data type of `gt_label` Tensor
# int32.
def test_gt_label_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, 9, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_gt_label_type)
def test_gt_label_tensor_dtype():
gt_labels2 = fluid.data(
name='label2', shape=[10, 1], dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, gt_labels2, is_crowd1, im_info1, 10)
self.assertRaises(TypeError, test_gt_label_tensor_dtype)
# The `is_crowd` must be Variable and the data type of `is_crowd` Tensor
# int32.
def test_is_crowd_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, [10], im_info1, 10)
self.assertRaises(TypeError, test_is_crowd_type)
def test_is_crowd_tensor_dtype():
is_crowd2 = fluid.data(
name='is_crowd2', shape=[10, 1], dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, is_crowd2, im_info1, 10)
self.assertRaises(TypeError, test_is_crowd_tensor_dtype)
# The `im_info` must be Variable and the data type of `im_info` Tensor
# must be one of float32 and float64.
def test_im_info_type():
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, 1, 10)
self.assertRaises(TypeError, test_im_info_type)
def test_im_info_tensor_dtype():
im_info2 = fluid.data(
name='im_info2', shape=[1, 3], dtype='int32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \
fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1,
anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info2, 10)
self.assertRaises(TypeError, test_im_info_tensor_dtype)
if __name__ == '__main__':
unittest.main()
......@@ -19,7 +19,9 @@ import numpy as np
import math
import copy
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import Program, program_guard
def sigmoid_focal_loss_forward(x_data, label_data, fg_num_data, gamma, alpha,
......@@ -128,5 +130,63 @@ class TestSigmoidFocalLossOp4(TestSigmoidFocalLossOp3):
place, ['X'], 'Out', max_relative_error=0.002)
class TestSigmoidFocalLossOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
label1 = fluid.layers.fill_constant(
shape=[10, 1], dtype="int32", value=1)
fg_num1 = fluid.layers.fill_constant(
shape=[1], dtype="int32", value=5)
# The `x` must be Variable and the data type of `x` Tensor must be one of float32 and float64.
def test_x_type():
x1 = [2]
fluid.layers.sigmoid_focal_loss(
x=x1, label=label1, fg_num=fg_num1, gamma=2., alpha=0.25)
self.assertRaises(TypeError, test_x_type)
def test_x_tensor_dtype():
x2 = fluid.layers.data(name='x2', shape=[10, 10], dtype="int16")
fluid.layers.sigmoid_focal_loss(
x=x2, label=label1, fg_num=fg_num1, gamma=2., alpha=0.25)
self.assertRaises(TypeError, test_x_tensor_dtype)
x3 = fluid.layers.data(name='x3', shape=[10, 10], dtype="float64")
# The `label` must be Variable and the data type of `label` Tensor must be int32.
def test_label_type():
label2 = [2]
fluid.layers.sigmoid_focal_loss(
x=x3, label=label2, fg_num=fg_num1, gamma=2., alpha=0.25)
self.assertRaises(TypeError, test_label_type)
def test_label_tensor_dtype():
label3 = fluid.layers.fill_constant(
shape=[10, 1], dtype="float32", value=1.)
fluid.layers.sigmoid_focal_loss(
x=x3, label=label3, fg_num=fg_num1, gamma=2., alpha=0.25)
self.assertRaises(TypeError, test_label_tensor_dtype)
# The `fg_num` must be Variable and the data type of `fg_num` Tensor must be int32.
def test_fgnum_type():
fg_num2 = [2]
fluid.layers.sigmoid_focal_loss(
x=x3, label=label1, fg_num=fg_num2, gamma=2., alpha=0.25)
self.assertRaises(TypeError, test_fgnum_type)
def test_fgnum_tensor_dtype():
fg_num3 = fluid.layers.fill_constant(
shape=[1], dtype="float32", value=5.)
fluid.layers.sigmoid_focal_loss(
x=x3, label=label1, fg_num=fg_num3, gamma=2., alpha=0.25)
self.assertRaises(TypeError, test_fgnum_tensor_dtype)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册