未验证 提交 ba5fa2c2 编写于 作者: B Bai Yifan 提交者: GitHub

enhance some error message, test=release/2.0 (#23840)

上级 d4b4fa04
...@@ -109,21 +109,14 @@ class DeformableConvOp : public framework::OperatorWithKernel { ...@@ -109,21 +109,14 @@ class DeformableConvOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"), OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "deformable_conv");
"Input(Input) of DeformableConvOp " OP_INOUT_CHECK(ctx->HasInput("Offset"), "Input", "Offset",
"should not be null"); "deformable_conv)");
PADDLE_ENFORCE(ctx->HasInput("Offset"), OP_INOUT_CHECK(ctx->HasInput("Mask"), "Input", "Mask", "deformable_conv");
"Input(Offset) of DeformableConvOp " OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter",
"should not be null"); "deformable_conv");
PADDLE_ENFORCE(ctx->HasInput("Mask"), OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output",
"Input(Mask) of DeformableConvOp " "deformable_conv");
"should not be null");
PADDLE_ENFORCE(ctx->HasInput("Filter"),
"Input(Filter) of DeformableConvOp "
"should not be null");
PADDLE_ENFORCE(ctx->HasOutput("Output"),
"Output(Output) of DeformableConvOp "
"should not be null.");
auto in_dims = ctx->GetInputDim("Input"); auto in_dims = ctx->GetInputDim("Input");
auto filter_dims = ctx->GetInputDim("Filter"); auto filter_dims = ctx->GetInputDim("Filter");
...@@ -138,39 +131,62 @@ class DeformableConvOp : public framework::OperatorWithKernel { ...@@ -138,39 +131,62 @@ class DeformableConvOp : public framework::OperatorWithKernel {
int deformable_groups = ctx->Attrs().Get<int>("deformable_groups"); int deformable_groups = ctx->Attrs().Get<int>("deformable_groups");
int im2col_step = ctx->Attrs().Get<int>("im2col_step"); int im2col_step = ctx->Attrs().Get<int>("im2col_step");
PADDLE_ENFORCE(in_dims.size() == 4,
"Conv input should be 4-D tensor, get %u", in_dims.size());
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_dims.size(), filter_dims.size(), in_dims.size(), 4,
"Conv input dimension and filter dimension should be the same."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ( "Conv input should be 4-D tensor, get %u", in_dims.size()));
in_dims.size() - strides.size(), 2U, PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(),
"Conv input dimension and strides dimension should be consistent."); platform::errors::InvalidArgument(
"Conv input dimension and filter dimension should be "
"the same. The difference is [%d]: [%d]",
in_dims.size(), filter_dims.size()));
PADDLE_ENFORCE_EQ(in_dims.size() - strides.size(), 2U,
platform::errors::InvalidArgument(
"Conv input dimension and strides "
"dimension should be consistent. But received input "
"dimension:[%d], strides dimension:[%d]",
in_dims.size(), strides.size()));
PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), PADDLE_ENFORCE_EQ(paddings.size(), strides.size(),
platform::errors::InvalidArgument(
"Conv paddings dimension and Conv strides dimension " "Conv paddings dimension and Conv strides dimension "
"should be the same."); "should be the same. The difference is [%d]: [%d]",
paddings.size(), strides.size()));
PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups, PADDLE_ENFORCE_EQ(
in_dims[1], filter_dims[1] * groups,
platform::errors::InvalidArgument(
"The number of input channels should be equal to filter " "The number of input channels should be equal to filter "
"channels * groups."); "channels * groups. The difference is [%d]: [%d]",
in_dims[1], filter_dims[1] * groups));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
filter_dims[0] % groups, 0, filter_dims[0] % groups, 0,
"The number of output channels should be divided by groups."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(filter_dims[0] % deformable_groups, 0, "The number of output channels should be divided by groups. But "
"received output channels:[%d], groups:[%d]",
filter_dims[0], groups));
PADDLE_ENFORCE_EQ(
filter_dims[0] % deformable_groups, 0,
platform::errors::InvalidArgument(
"The number of output channels should be " "The number of output channels should be "
"divided by deformable groups."); "divided by deformable groups. The difference is [%d]: [%d]",
filter_dims[0] % groups, 0));
if (in_dims[0] > im2col_step) { if (in_dims[0] > im2col_step) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_dims[0] % im2col_step, 0U, in_dims[0] % im2col_step, 0U,
"Input batchsize must be smaller than or divide im2col_step"); platform::errors::InvalidArgument(
"Input batchsize must be smaller than or divide im2col_step. But "
"received Input batchsize:[%d], im2col_step:[%d]",
in_dims[0], im2col_step));
} }
for (size_t i = 0; i < strides.size(); ++i) { for (size_t i = 0; i < strides.size(); ++i) {
PADDLE_ENFORCE_GT(strides[i], 0U, "stride %d size incorrect", i); PADDLE_ENFORCE_GT(strides[i], 0U, platform::errors::InvalidArgument(
"stride %d size incorrect", i));
} }
for (size_t i = 0; i < dilations.size(); ++i) { for (size_t i = 0; i < dilations.size(); ++i) {
PADDLE_ENFORCE_GT(dilations[i], 0U, "dilation %d size incorrect", i); PADDLE_ENFORCE_GT(dilations[i], 0U, platform::errors::InvalidArgument(
"dilation %d size incorrect", i));
} }
std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]}); std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
...@@ -185,29 +201,61 @@ class DeformableConvOp : public framework::OperatorWithKernel { ...@@ -185,29 +201,61 @@ class DeformableConvOp : public framework::OperatorWithKernel {
} }
} }
PADDLE_ENFORCE_EQ(output_shape[1] % deformable_groups, 0U, PADDLE_ENFORCE_EQ(
"output num_filter must divide deformable group size."); output_shape[1] % deformable_groups, 0U,
platform::errors::InvalidArgument(
"output num_filter must divide deformable group size. But received "
"output num_filter:[%d], deformable group size:[%d]",
output_shape[1], deformable_groups));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(output_shape[2], offset_dims[2], PADDLE_ENFORCE_EQ(output_shape[2], offset_dims[2],
"output height must equal to offset map height."); platform::errors::InvalidArgument(
"output height must equal to offset map height. "
"The difference is [%d]: [%d]",
output_shape[2], offset_dims[2]));
PADDLE_ENFORCE_EQ(output_shape[3], offset_dims[3], PADDLE_ENFORCE_EQ(output_shape[3], offset_dims[3],
"output width must equal to offset map width."); platform::errors::InvalidArgument(
"output width must equal to offset map width. The "
"difference is [%d]: [%d]",
output_shape[3], offset_dims[3]));
PADDLE_ENFORCE_EQ(offset_dims[1] % (filter_dims[2] * filter_dims[3]), 0U, PADDLE_ENFORCE_EQ(offset_dims[1] % (filter_dims[2] * filter_dims[3]), 0U,
"offset filter must divide deformable group size."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]), "offset filter must divide deformable group size. "
"But received [%d]: [%d]",
offset_dims[1], filter_dims[2] * filter_dims[3]));
PADDLE_ENFORCE_EQ(
offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]),
deformable_groups, deformable_groups,
"offset filter must divide deformable group size."); platform::errors::InvalidArgument(
"offset filter must divide deformable group size. But received "
"[%d]: [%d]",
offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]),
deformable_groups));
PADDLE_ENFORCE_EQ(output_shape[2], mask_dims[2], PADDLE_ENFORCE_EQ(output_shape[2], mask_dims[2],
"output height must equal to mask map height."); platform::errors::InvalidArgument(
"output height must equal to mask map height. The "
"difference is [%d] vs [%d]",
output_shape[2], mask_dims[2]));
PADDLE_ENFORCE_EQ(output_shape[3], mask_dims[3], PADDLE_ENFORCE_EQ(output_shape[3], mask_dims[3],
"output width must equal to mask map width."); platform::errors::InvalidArgument(
"output width must equal to mask map width. The "
"difference is [%d] vs [%d]",
output_shape[3], mask_dims[3]));
PADDLE_ENFORCE_EQ(mask_dims[1] % (filter_dims[2] * filter_dims[3]), 0U, PADDLE_ENFORCE_EQ(mask_dims[1] % (filter_dims[2] * filter_dims[3]), 0U,
"mask filter must divide deformable group size."); platform::errors::InvalidArgument(
"mask filter must divide deformable group size. "
"But received [%d]: [%d]",
mask_dims[1], filter_dims[2] * filter_dims[3]));
PADDLE_ENFORCE_EQ(mask_dims[1] / (filter_dims[2] * filter_dims[3]), PADDLE_ENFORCE_EQ(mask_dims[1] / (filter_dims[2] * filter_dims[3]),
deformable_groups, deformable_groups,
"mask filter must divide deformable group size."); platform::errors::InvalidArgument(
"mask filter must divide deformable group size. "
"But received [%d]: [%d]",
mask_dims[1] / (filter_dims[2] * filter_dims[3]),
deformable_groups));
} }
ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
...@@ -255,8 +303,8 @@ class DeformableConvGradOp : public framework::OperatorWithKernel { ...@@ -255,8 +303,8 @@ class DeformableConvGradOp : public framework::OperatorWithKernel {
auto offset_dims = ctx->GetInputDim("Offset"); auto offset_dims = ctx->GetInputDim("Offset");
auto mask_dims = ctx->GetInputDim("Mask"); auto mask_dims = ctx->GetInputDim("Mask");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Output")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Output")), "Input",
"the gradient of output(Out) must not be null"); "Output@Grad", "deformable_conv_grad");
if (ctx->HasOutput(framework::GradVarName("Input"))) { if (ctx->HasOutput(framework::GradVarName("Input"))) {
ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
} }
......
...@@ -114,18 +114,14 @@ class DeformableConvV1Op : public framework::OperatorWithKernel { ...@@ -114,18 +114,14 @@ class DeformableConvV1Op : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input",
"Input(Input) of DeformableConvOp " "deformable_conv_v1");
"should not be null"); OP_INOUT_CHECK(ctx->HasInput("Offset"), "Input", "Offset",
PADDLE_ENFORCE_EQ(ctx->HasInput("Offset"), true, "deformable_conv_v1");
"Input(Offset) of DeformableConvOp " OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter",
"should not be null"); "deformable_conv_v1");
PADDLE_ENFORCE_EQ(ctx->HasInput("Filter"), true, OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output",
"Input(Filter) of DeformableConvOp " "deformable_conv_v1");
"should not be null");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Output"), true,
"Output(Output) of DeformableConvOp "
"should not be null.");
auto in_dims = ctx->GetInputDim("Input"); auto in_dims = ctx->GetInputDim("Input");
auto filter_dims = ctx->GetInputDim("Filter"); auto filter_dims = ctx->GetInputDim("Filter");
...@@ -139,40 +135,61 @@ class DeformableConvV1Op : public framework::OperatorWithKernel { ...@@ -139,40 +135,61 @@ class DeformableConvV1Op : public framework::OperatorWithKernel {
int deformable_groups = ctx->Attrs().Get<int>("deformable_groups"); int deformable_groups = ctx->Attrs().Get<int>("deformable_groups");
int im2col_step = ctx->Attrs().Get<int>("im2col_step"); int im2col_step = ctx->Attrs().Get<int>("im2col_step");
PADDLE_ENFORCE_EQ(in_dims.size(), 4,
"Conv input should be 4-D tensor, get %u",
in_dims.size());
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_dims.size(), filter_dims.size(), in_dims.size(), 4,
"Conv input dimension and filter dimension should be the same."); platform::errors::InvalidArgument(
"Conv input should be 4-D tensor, get %u", in_dims.size()));
PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(),
platform::errors::InvalidArgument(
"Conv input dimension and filter dimension should be "
"the same. the difference is [%d] vs [%d]",
in_dims.size(), filter_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_dims.size() - strides.size(), 2U, in_dims.size() - strides.size(), 2U,
"Conv input dimension and strides dimension should be consistent."); platform::errors::InvalidArgument(
"Conv input dimension and strides "
"dimension should be consistent., But received [%d]: [%d]",
in_dims.size(), strides.size()));
PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), PADDLE_ENFORCE_EQ(paddings.size(), strides.size(),
platform::errors::InvalidArgument(
"Conv paddings dimension and Conv strides dimension " "Conv paddings dimension and Conv strides dimension "
"should be the same."); "should be the same. The difference is [%d] vs [%d]",
paddings.size(), strides.size()));
PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups, PADDLE_ENFORCE_EQ(
in_dims[1], filter_dims[1] * groups,
platform::errors::InvalidArgument(
"The number of input channels should be equal to filter " "The number of input channels should be equal to filter "
"channels * groups."); "channels * groups. The difference is [%d]: [%d]",
in_dims[1], filter_dims[1] * groups));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
filter_dims[0] % groups, 0, filter_dims[0] % groups, 0,
"The number of output channels should be divided by groups."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(filter_dims[0] % deformable_groups, 0, "The number of output channels should be divided by groups. But"
"received output channels: [%d], groups: [%d]",
filter_dims[0], groups));
PADDLE_ENFORCE_EQ(
filter_dims[0] % deformable_groups, 0,
platform::errors::InvalidArgument(
"The number of output channels should be " "The number of output channels should be "
"divided by deformable groups."); "divided by deformable groups. But received [%d]: [%d]",
filter_dims[0], deformable_groups));
if (in_dims[0] > im2col_step) { if (in_dims[0] > im2col_step) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(in_dims[0] % im2col_step, 0U,
in_dims[0] % im2col_step, 0U, platform::errors::InvalidArgument(
"Input batchsize must be smaller than or divide im2col_step"); "Input batchsize must be smaller than or divide "
"im2col_step, But received [%d]: [%d]",
in_dims[0], im2col_step));
} }
for (size_t i = 0; i < strides.size(); ++i) { for (size_t i = 0; i < strides.size(); ++i) {
PADDLE_ENFORCE_GT(strides[i], 0U, "stride %d size incorrect", i); PADDLE_ENFORCE_GT(strides[i], 0U, platform::errors::InvalidArgument(
"stride %d size incorrect", i));
} }
for (size_t i = 0; i < dilations.size(); ++i) { for (size_t i = 0; i < dilations.size(); ++i) {
PADDLE_ENFORCE_GT(dilations[i], 0U, "dilation %d size incorrect", i); PADDLE_ENFORCE_GT(dilations[i], 0U, platform::errors::InvalidArgument(
"dilation %d size incorrect", i));
} }
std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]}); std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
...@@ -187,25 +204,34 @@ class DeformableConvV1Op : public framework::OperatorWithKernel { ...@@ -187,25 +204,34 @@ class DeformableConvV1Op : public framework::OperatorWithKernel {
} }
} }
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(output_shape[1] % deformable_groups, 0U,
output_shape[1] % deformable_groups, 0U,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"output num_filter must divide deformable group size.")); "output num_filter must divide deformable group "
"size. But received [%d]: [%d]",
output_shape[1], deformable_groups));
PADDLE_ENFORCE_EQ(output_shape[2], offset_dims[2], PADDLE_ENFORCE_EQ(output_shape[2], offset_dims[2],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"output height must equal to offset map height.")); "output height must equal to offset map height. "
"The difference is [%d]: [%d]",
output_shape[2], offset_dims[2]));
PADDLE_ENFORCE_EQ(output_shape[3], offset_dims[3], PADDLE_ENFORCE_EQ(output_shape[3], offset_dims[3],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"output width must equal to offset map width.")); "output width must equal to offset map width. The "
PADDLE_ENFORCE_EQ( "difference is [%d]: [%d]",
offset_dims[1] % (filter_dims[2] * filter_dims[3]), 0U, output_shape[3], offset_dims[3]));
PADDLE_ENFORCE_EQ(offset_dims[1] % (filter_dims[2] * filter_dims[3]), 0U,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"offset filter must divide deformable group size.")); "offset filter must divide deformable group size. "
"But received [%d]: [%d]",
offset_dims[1], filter_dims[2] * filter_dims[3]));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]), offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]),
deformable_groups, deformable_groups,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"offset filter must divide deformable group size.")); "offset filter must divide deformable group size. But received "
"[%d]: [%d]",
offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]),
deformable_groups));
} }
ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
} }
...@@ -249,8 +275,8 @@ class DeformableConvV1GradOp : public framework::OperatorWithKernel { ...@@ -249,8 +275,8 @@ class DeformableConvV1GradOp : public framework::OperatorWithKernel {
auto filter_dims = ctx->GetInputDim("Filter"); auto filter_dims = ctx->GetInputDim("Filter");
auto offset_dims = ctx->GetInputDim("Offset"); auto offset_dims = ctx->GetInputDim("Offset");
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Output")), true, OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Output")), "Input",
"the gradient of output(Out) must not be null"); "Output@Grad", "deformable_conv_v1_grad");
if (ctx->HasOutput(framework::GradVarName("Input"))) { if (ctx->HasOutput(framework::GradVarName("Input"))) {
ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
} }
......
...@@ -23,8 +23,9 @@ template <typename DeviceContext, typename T> ...@@ -23,8 +23,9 @@ template <typename DeviceContext, typename T>
class PolygonBoxTransformCPUKernel : public framework::OpKernel<T> { class PolygonBoxTransformCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), PADDLE_ENFORCE_EQ(
"It must use CUDAPlace."); platform::is_cpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
auto* in = ctx.Input<Tensor>("Input"); auto* in = ctx.Input<Tensor>("Input");
auto in_dims = in->dims(); auto in_dims = in->dims();
const T* in_data = in->data<T>(); const T* in_data = in->data<T>();
...@@ -56,18 +57,23 @@ class PolygonBoxTransformOp : public framework::OperatorWithKernel { ...@@ -56,18 +57,23 @@ class PolygonBoxTransformOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE( OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input",
ctx->HasInput("Input"), "polygon_box_transform");
"Input (Input) of polygon_box transform op should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output",
PADDLE_ENFORCE( "polygon_box_transform");
ctx->HasOutput("Output"),
"Output (Output) of polygon_box transform op should not be null.");
auto in_dim = ctx->GetInputDim("Input"); auto in_dim = ctx->GetInputDim("Input");
PADDLE_ENFORCE_EQ(in_dim.size(), 4, "input's rank must be 4."); PADDLE_ENFORCE_EQ(
in_dim.size(), 4,
platform::errors::InvalidArgument(
"input's rank must be 4. But received: Input rank is [%d]",
in_dim.size()));
PADDLE_ENFORCE_EQ(in_dim[1] % 2, 0, PADDLE_ENFORCE_EQ(in_dim[1] % 2, 0,
"input's second dimension must be even."); platform::errors::InvalidArgument(
"input's second dimension must be even. But "
"received: Input 2nd dimension is [%d]",
in_dim[1]));
ctx->SetOutputDim("Output", in_dim); ctx->SetOutputDim("Output", in_dim);
} }
......
...@@ -43,8 +43,9 @@ template <typename T> ...@@ -43,8 +43,9 @@ template <typename T>
class PolygonBoxTransformOpCUDAKernel : public framework::OpKernel<T> { class PolygonBoxTransformOpCUDAKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), PADDLE_ENFORCE_EQ(
"It must use CUDAPlace."); platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
auto* in = ctx.Input<Tensor>("Input"); auto* in = ctx.Input<Tensor>("Input");
auto in_dims = in->dims(); auto in_dims = in->dims();
const T* in_data = in->data<T>(); const T* in_data = in->data<T>();
......
...@@ -28,22 +28,38 @@ class TeacherStudentSigmoidLossOp : public framework::OperatorWithKernel { ...@@ -28,22 +28,38 @@ class TeacherStudentSigmoidLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X",
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); "teacher_student_sigmoid_loss");
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
"teacher_student_sigmoid_loss");
OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y",
"teacher_student_sigmoid_loss");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(x_dims.size(), 2UL,
platform::errors::InvalidArgument(
"Input(X)'s rank should be 2. But received: "
"Input(X)'s rank is [%d]",
x_dims.size()));
PADDLE_ENFORCE_EQ(label_dims.size(), 2UL, PADDLE_ENFORCE_EQ(label_dims.size(), 2UL,
"Input(Label)'s rank should be 2."); platform::errors::InvalidArgument(
"Input(Label)'s rank should be 2. But "
"received Input(Label)'s rank is [%d]",
label_dims.size()));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], PADDLE_ENFORCE_EQ(
x_dims[0], label_dims[0],
platform::errors::InvalidArgument(
"The 1st dimension of Input(X) and Input(Label) should " "The 1st dimension of Input(X) and Input(Label) should "
"be equal."); "be equal. The difference is [%d]: [%d]",
x_dims[0], label_dims[0]));
PADDLE_ENFORCE_EQ(label_dims[1], 1UL, PADDLE_ENFORCE_EQ(label_dims[1], 1UL,
platform::errors::InvalidArgument(
"The 2nd dimension of " "The 2nd dimension of "
"Input(Label) should be 1."); "Input(Label) should be 1. But received "
"Input(Label)'s 2nd dim is [%d]",
label_dims[1]));
} }
ctx->SetOutputDim("Y", {x_dims[0], 1}); ctx->SetOutputDim("Y", {x_dims[0], 1});
ctx->ShareLoD("X", /*->*/ "Y"); ctx->ShareLoD("X", /*->*/ "Y");
...@@ -87,32 +103,60 @@ class TeacherStudentSigmoidLossGradientOp ...@@ -87,32 +103,60 @@ class TeacherStudentSigmoidLossGradientOp
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X",
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); "teacher_student_sigmoid_loss_grad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "X",
"Input(Y@GRAD) should be not null."); "teacher_student_sigmoid_loss_grad");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
"Output(X@GRAD) should be not null."); "Y@Grad", "teacher_student_sigmoid_loss_grad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Input",
"X@Grad", "teacher_student_sigmoid_loss_grad");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y"));
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2."); x_dims.size(), 2,
PADDLE_ENFORCE_EQ(label_dims.size(), 2, "Input(Label)'s rank should be 2."); platform::errors::InvalidArgument(
"Input(X)'s rank should be 2. But received Input(X)'s rank is [%d]",
x_dims.size()));
PADDLE_ENFORCE_EQ(dy_dims.size(), 2,
platform::errors::InvalidArgument(
"Input(Y@Grad)'s rank should be 2. But received "
"Input(Y@Grad)'s rank is [%d]",
dy_dims.size()));
PADDLE_ENFORCE_EQ(label_dims.size(), 2,
platform::errors::InvalidArgument(
"Input(Label)'s rank should be 2. But received "
"Input(Y@Grad)'s rank is [%d]",
label_dims.size()));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], PADDLE_ENFORCE_EQ(
x_dims[0], label_dims[0],
platform::errors::InvalidArgument(
"The 1st dimension of Input(X) and Input(Label) should " "The 1st dimension of Input(X) and Input(Label) should "
"be equal."); "be equal. The difference is [%d]: [%d]",
x_dims[0], label_dims[0]));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims[0], dy_dims[0], x_dims[0], dy_dims[0],
platform::errors::InvalidArgument(
"The 1st dimension of Input(X) and Input(Y@Grad) should " "The 1st dimension of Input(X) and Input(Y@Grad) should "
"be equal."); "be equal. The difference is [%d]: [%d]",
PADDLE_ENFORCE_EQ(dy_dims[1], 1, x_dims[0], dy_dims[0]));
"The 2nd dimension of Input(Y@Grad) should be 1."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(label_dims[1], 1, dy_dims[1], 1,
platform::errors::InvalidArgument(
"The 2nd dimension of Input(Y@Grad) should be 1. "
"But received Input(Y@Grad)'s 2nd dimension is [%d]",
dy_dims[1]));
PADDLE_ENFORCE_EQ(
label_dims[1], 1,
platform::errors::InvalidArgument(
"When Attr(soft_label) == false, the 2nd dimension of " "When Attr(soft_label) == false, the 2nd dimension of "
"Input(Label) should be 1."); "Input(Label) should be 1. But received Input(Label)'s 2nd "
"dimemsion "
"is [%d]",
label_dims[1]));
} }
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
ctx->ShareLoD("X", framework::GradVarName("X")); ctx->ShareLoD("X", framework::GradVarName("X"));
......
...@@ -891,6 +891,8 @@ def polygon_box_transform(input, name=None): ...@@ -891,6 +891,8 @@ def polygon_box_transform(input, name=None):
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32') input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input) out = fluid.layers.polygon_box_transform(input)
""" """
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals()) helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype) output = helper.create_variable_for_type_inference(dtype=input.dtype)
......
...@@ -33,6 +33,7 @@ from ..framework import default_main_program, Parameter, unique_name, name_scope ...@@ -33,6 +33,7 @@ from ..framework import default_main_program, Parameter, unique_name, name_scope
from ..framework import Variable from ..framework import Variable
from ..framework import in_dygraph_mode from ..framework import in_dygraph_mode
from ..dygraph import learning_rate_scheduler as imperate_lr from ..dygraph import learning_rate_scheduler as imperate_lr
from ..data_feeder import check_variable_and_dtype, check_type
__all__ = [ __all__ = [
'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay',
...@@ -449,6 +450,8 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): ...@@ -449,6 +450,8 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
lr = fluid.layers.cosine_decay( lr = fluid.layers.cosine_decay(
learning_rate = base_lr, step_each_epoch=10000, epochs=120) learning_rate = base_lr, step_each_epoch=10000, epochs=120)
""" """
check_type(learning_rate, 'learning_rate', (float, tensor.Variable),
'cosine_decay')
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if in_dygraph_mode(): if in_dygraph_mode():
......
...@@ -334,6 +334,10 @@ def square_error_cost(input, label): ...@@ -334,6 +334,10 @@ def square_error_cost(input, label):
# [0.04000002] # [0.04000002]
""" """
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'square_error_cost')
check_variable_and_dtype(label, "label", ['float32', 'float64'],
'square_error_cost')
helper = LayerHelper('square_error_cost', **locals()) helper = LayerHelper('square_error_cost', **locals())
minus_out = helper.create_variable_for_type_inference(dtype=input.dtype) minus_out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
...@@ -1481,6 +1485,11 @@ def teacher_student_sigmoid_loss(input, ...@@ -1481,6 +1485,11 @@ def teacher_student_sigmoid_loss(input,
cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label) cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label)
""" """
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'teacher_student_sigmoid_loss')
check_variable_and_dtype(label, "label", ['float32', 'float64'],
'teacher_student_sigmoid_loss')
helper = LayerHelper('teacher_student_sigmoid_loss', **locals()) helper = LayerHelper('teacher_student_sigmoid_loss', **locals())
out = helper.create_variable(dtype=input.dtype) out = helper.create_variable(dtype=input.dtype)
helper.append_op( helper.append_op(
...@@ -1715,4 +1724,6 @@ def mse_loss(input, label): ...@@ -1715,4 +1724,6 @@ def mse_loss(input, label):
# [0.04000002] # [0.04000002]
""" """
check_variable_and_dtype(input, "input", ['float32', 'float64'], 'mse_loss')
check_variable_and_dtype(label, "label", ['float32', 'float64'], 'mse_loss')
return nn.reduce_mean(square_error_cost(input, label)) return nn.reduce_mean(square_error_cost(input, label))
...@@ -13554,6 +13554,12 @@ def deformable_conv(input, ...@@ -13554,6 +13554,12 @@ def deformable_conv(input,
num_filters=2, filter_size=filter_size, padding=1, modulated=False) num_filters=2, filter_size=filter_size, padding=1, modulated=False)
""" """
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1] num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here." assert param_attr is not False, "param_attr should not be False here."
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
...@@ -256,5 +257,31 @@ class TestWithGroup(TestModulatedDeformableConvOp): ...@@ -256,5 +257,31 @@ class TestWithGroup(TestModulatedDeformableConvOp):
self.groups = 2 self.groups = 2
class TestModulatedDeformableConvInvalidInput(unittest.TestCase):
def test_error(self):
def test_invalid_input():
input = [1, 3, 32, 32]
offset = fluid.data(
name='offset', shape=[None, 3, 32, 32], dtype='float32')
mask = fluid.data(
name='mask', shape=[None, 3, 32, 32], dtype='float32')
loss = fluid.layers.deformable_conv(
input, offset, mask, num_filters=4, filter_size=1)
self.assertRaises(TypeError, test_invalid_input)
def test_invalid_offset():
input = fluid.data(
name='input', shape=[None, 3, 32, 32], dtype='int32')
offset = fluid.data(
name='offset', shape=[None, 3, 32, 32], dtype='float32')
mask = fluid.data(
name='mask', shape=[None, 3, 32, 32], dtype='float32')
loss = fluid.layers.deformable_conv(
input, offset, mask, num_filters=4, filter_size=1)
self.assertRaises(TypeError, test_invalid_offset)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
...@@ -252,5 +253,37 @@ class TestWithGroup(TestModulatedDeformableConvOp): ...@@ -252,5 +253,37 @@ class TestWithGroup(TestModulatedDeformableConvOp):
self.groups = 2 self.groups = 2
class TestModulatedDeformableConvV1InvalidInput(unittest.TestCase):
def test_error(self):
def test_invalid_input():
input = [1, 3, 32, 32]
offset = fluid.data(
name='offset', shape=[None, 3, 32, 32], dtype='float32')
loss = fluid.layers.deformable_conv(
input,
offset,
mask=None,
num_filters=4,
filter_size=1,
modulated=False)
self.assertRaises(TypeError, test_invalid_input)
def test_invalid_offset():
input = fluid.data(
name='input', shape=[None, 3, 32, 32], dtype='int32')
offset = fluid.data(
name='offset', shape=[None, 3, 32, 32], dtype='float32')
loss = fluid.layers.deformable_conv(
input,
offset,
mask=None,
num_filters=4,
filter_size=1,
modulated=False)
self.assertRaises(TypeError, test_invalid_offset)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -47,5 +47,22 @@ class TestMseLoss(unittest.TestCase): ...@@ -47,5 +47,22 @@ class TestMseLoss(unittest.TestCase):
self.assertTrue(np.isclose(np_result, result).all()) self.assertTrue(np.isclose(np_result, result).all())
class TestMseInvalidInput(unittest.TestCase):
def test_error(self):
def test_invalid_input():
input = [256, 3]
label = fluid.data(name='label', shape=[None, 3], dtype='float32')
loss = fluid.layers.mse_loss(input, label)
self.assertRaises(TypeError, test_invalid_input)
def test_invalid_label():
input = fluid.data(name='input1', shape=[None, 3], dtype='float32')
label = [256, 3]
loss = fluid.layers.mse_loss(input, label)
self.assertRaises(TypeError, test_invalid_label)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -16,6 +16,7 @@ from __future__ import print_function ...@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
...@@ -66,5 +67,15 @@ class TestCase2(TestPolygonBoxRestoreOp): ...@@ -66,5 +67,15 @@ class TestCase2(TestPolygonBoxRestoreOp):
self.input_shape = (3, 12, 4, 5) self.input_shape = (3, 12, 4, 5)
class TestPolygonBoxInvalidInput(unittest.TestCase):
def test_error(self):
def test_invalid_input():
input = fluid.data(
name='input', shape=[None, 3, 32, 32], dtype='int64')
out = fluid.layers.polygon_box_transform(input)
self.assertRaises(TypeError, test_invalid_input)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -48,5 +48,22 @@ class TestSquareErrorCost(unittest.TestCase): ...@@ -48,5 +48,22 @@ class TestSquareErrorCost(unittest.TestCase):
self.assertTrue(np.isclose(np_result, result).all()) self.assertTrue(np.isclose(np_result, result).all())
class TestSquareErrorInvalidInput(unittest.TestCase):
def test_error(self):
def test_invalid_input():
input = [256, 3]
label = fluid.data(name='label1', shape=[None, 3], dtype='float32')
loss = fluid.layers.square_error_cost(input, label)
self.assertRaises(TypeError, test_invalid_input)
def test_invalid_label():
input = fluid.data(name='input2', shape=[None, 3], dtype='float32')
label = [256, 3]
loss = fluid.layers.square_error_cost(input, label)
self.assertRaises(TypeError, test_invalid_label)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ from op_test import OpTest ...@@ -19,6 +19,7 @@ from op_test import OpTest
from scipy.special import logit from scipy.special import logit
from scipy.special import expit from scipy.special import expit
import unittest import unittest
import paddle.fluid as fluid
class TestTeacherStudentSigmoidLossOp(OpTest): class TestTeacherStudentSigmoidLossOp(OpTest):
...@@ -57,3 +58,20 @@ class TestTeacherStudentSigmoidLossOp(OpTest): ...@@ -57,3 +58,20 @@ class TestTeacherStudentSigmoidLossOp(OpTest):
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Y", numeric_grad_delta=0.005) self.check_grad(["X"], "Y", numeric_grad_delta=0.005)
class TestTeacherStudentSigmoidLossInvalidInput(unittest.TestCase):
def test_error(self):
def test_invalid_input():
input = [512, 1]
label = fluid.data(name='label', shape=[None, 1], dtype='float32')
loss = fluid.layers.teacher_student_sigmoid_loss(input, label)
self.assertRaises(TypeError, test_invalid_input)
def test_invalid_label():
input = fluid.data(name='input1', shape=[None, 1], dtype='float32')
label = [512, 1]
loss = fluid.layers.teacher_student_sigmoid_loss(input, label)
self.assertRaises(TypeError, test_invalid_label)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册