未验证 提交 05c9642d 编写于 作者: S suytingwan 提交者: GitHub

Update paddle enforce message (#24498)

* test=develop error message update
上级 9f83f0fe
...@@ -28,39 +28,61 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { ...@@ -28,39 +28,61 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); ctx->HasInput("X"), true,
PADDLE_ENFORCE(ctx->HasInput("Weight"), platform::errors::InvalidArgument("Input(X) should not be null."));
"Input(Weight) should not be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null."); ctx->HasInput("Y"), true,
platform::errors::InvalidArgument("Input(Y) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Weight"), true,
platform::errors::InvalidArgument("Input(Weight) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument("Output(Out) should not be null."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
auto weight_dims = ctx->GetInputDim("Weight"); auto weight_dims = ctx->GetInputDim("Weight");
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "The input(X) must be a 2D Tensor."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(y_dims.size(), 2UL, "The input(Y) must be a 2D Tensor."); x_dims.size(), 2UL,
platform::errors::InvalidArgument("The input(X) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(
y_dims.size(), 2UL,
platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL,
"The input(Weight) must be a 3D tensor."); platform::errors::InvalidArgument(
"The input(Weight) must be a 3D tensor."));
if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) { if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) {
PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], PADDLE_ENFORCE_EQ(
"The first dimension(batch_size) of input(X) must be " x_dims[0], y_dims[0],
"equal to the first dimension of the input(Y)."); platform::errors::InvalidArgument(
"The first dimension(batch_size) of input(X) must be "
"equal to the first dimension of the input(Y)."));
} }
PADDLE_ENFORCE_EQ(x_dims[1], weight_dims[1], PADDLE_ENFORCE_EQ(x_dims[1], weight_dims[1],
"The second dimension of input(X) must be equal to " platform::errors::InvalidArgument(
"the second dimension of the input(Weight)."); "The second dimension of input(X) must be equal to "
"the second dimension of the input(Weight)."));
PADDLE_ENFORCE_EQ(y_dims[1], weight_dims[2], PADDLE_ENFORCE_EQ(y_dims[1], weight_dims[2],
"The second dimension of input(Y) must be equal to " platform::errors::InvalidArgument(
"the third dimension of the input(Weight)."); "The second dimension of input(Y) must be equal to "
"the third dimension of the input(Weight)."));
if (ctx->HasInput("Bias")) { if (ctx->HasInput("Bias")) {
auto bias_dims = ctx->GetInputDim("Bias"); auto bias_dims = ctx->GetInputDim("Bias");
PADDLE_ENFORCE(bias_dims.size() == 2UL && bias_dims[0] == 1UL, PADDLE_ENFORCE_EQ(bias_dims.size(), 2UL,
"The Input(Bias) must be a 2-D tensor with " platform::errors::InvalidArgument(
"the 2nd dimension fixed to 1 (a row vector)."); "The Input(Bias) must be a 2-D tensor with "
"the 2nd dimension fixed to 1 (a row vector)."));
PADDLE_ENFORCE_EQ(bias_dims[0], 1UL,
platform::errors::InvalidArgument(
"The Input(Bias) must be a 2-D tensor with "
"the 2nd dimension fixed to 1 (a row vector)."));
PADDLE_ENFORCE_EQ(bias_dims[1], weight_dims[0], PADDLE_ENFORCE_EQ(bias_dims[1], weight_dims[0],
"The second dimension of input(Bias) must be equal " platform::errors::InvalidArgument(
"to the first dimension of the input(Weight)."); "The second dimension of input(Bias) must be equal "
"to the first dimension of the input(Weight)."));
} }
ctx->SetOutputDim("Out", {x_dims[0], weight_dims[0]}); ctx->SetOutputDim("Out", {x_dims[0], weight_dims[0]});
...@@ -104,27 +126,36 @@ class BilinearTensorProductOpGrad : public framework::OperatorWithKernel { ...@@ -104,27 +126,36 @@ class BilinearTensorProductOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); ctx->HasInput("X"), true,
PADDLE_ENFORCE(ctx->HasInput("Weight"), platform::errors::InvalidArgument("Input(X) should not be null."));
"Input(Weight) should not be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), ctx->HasInput("Y"), true,
"Input(Out@GRAD) should not be null."); platform::errors::InvalidArgument("Input(Y) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Weight"), true,
platform::errors::InvalidArgument("Input(Weight) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
auto weight_dims = ctx->GetInputDim("Weight"); auto weight_dims = ctx->GetInputDim("Weight");
auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
PADDLE_ENFORCE_EQ(out_dims.size(), 2UL, PADDLE_ENFORCE_EQ(out_dims.size(), 2UL,
"The input(Out@GRAD) must be a 2D Tensor."); platform::errors::InvalidArgument(
"The input(Out@GRAD) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims[0], out_dims[0], x_dims[0], out_dims[0],
"The first dimension(batch_size) of input(Out@GRAD) must be " platform::errors::InvalidArgument(
"equal to the first dimension of the Input(X)."); "The first dimension(batch_size) of input(Out@GRAD) must be "
"equal to the first dimension of the Input(X)."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
weight_dims[0], out_dims[1], weight_dims[0], out_dims[1],
"The second dimension of input(Out@GRAD) must be equal to " platform::errors::InvalidArgument(
"the third dimension of the Input(Weight)."); "The second dimension of input(Out@GRAD) must be equal to "
"the third dimension of the Input(Weight)."));
auto bias_grad_name = framework::GradVarName("Bias"); auto bias_grad_name = framework::GradVarName("Bias");
if (ctx->HasOutput(bias_grad_name)) { if (ctx->HasOutput(bias_grad_name)) {
......
...@@ -22,16 +22,23 @@ class AnchorGeneratorOp : public framework::OperatorWithKernel { ...@@ -22,16 +22,23 @@ class AnchorGeneratorOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"), PADDLE_ENFORCE_EQ(
"Input(Input) of AnchorGeneratorOp should not be null."); ctx->HasInput("Input"), true,
PADDLE_ENFORCE(ctx->HasOutput("Anchors"), platform::errors::InvalidArgument(
"Output(Anchors) of AnchorGeneratorOp should not be null."); "Input(Input) of AnchorGeneratorOp should not be null."));
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
ctx->HasOutput("Variances"), ctx->HasOutput("Anchors"), true,
"Output(Variances) of AnchorGeneratorOp should not be null."); platform::errors::InvalidArgument(
"Output(Anchors) of AnchorGeneratorOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Variances"), true,
platform::errors::InvalidArgument(
"Output(Variances) of AnchorGeneratorOp should not be null."));
auto input_dims = ctx->GetInputDim("Input"); auto input_dims = ctx->GetInputDim("Input");
PADDLE_ENFORCE(input_dims.size() == 4, "The layout of input is NCHW."); PADDLE_ENFORCE_EQ(
input_dims.size(), 4,
platform::errors::InvalidArgument("The layout of input is NCHW."));
auto anchor_sizes = ctx->Attrs().Get<std::vector<float>>("anchor_sizes"); auto anchor_sizes = ctx->Attrs().Get<std::vector<float>>("anchor_sizes");
auto aspect_ratios = ctx->Attrs().Get<std::vector<float>>("aspect_ratios"); auto aspect_ratios = ctx->Attrs().Get<std::vector<float>>("aspect_ratios");
...@@ -87,10 +94,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -87,10 +94,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
"equals to 64**2.") "equals to 64**2.")
.AddCustomChecker([](const std::vector<float>& anchor_sizes) { .AddCustomChecker([](const std::vector<float>& anchor_sizes) {
PADDLE_ENFORCE_GT(anchor_sizes.size(), 0UL, PADDLE_ENFORCE_GT(anchor_sizes.size(), 0UL,
"Size of anchor_sizes must be at least 1."); platform::errors::InvalidArgument(
"Size of anchor_sizes must be at least 1."));
for (size_t i = 0; i < anchor_sizes.size(); ++i) { for (size_t i = 0; i < anchor_sizes.size(); ++i) {
PADDLE_ENFORCE_GT(anchor_sizes[i], 0.0, PADDLE_ENFORCE_GT(anchor_sizes[i], 0.0,
"anchor_sizes[%d] must be positive.", i); platform::errors::InvalidArgument(
"anchor_sizes[%d] must be positive.", i));
} }
}); });
AddAttr<std::vector<float>>( AddAttr<std::vector<float>>(
...@@ -105,10 +114,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -105,10 +114,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
"in box regression deltas") "in box regression deltas")
.AddCustomChecker([](const std::vector<float>& variances) { .AddCustomChecker([](const std::vector<float>& variances) {
PADDLE_ENFORCE_EQ(variances.size(), 4UL, PADDLE_ENFORCE_EQ(variances.size(), 4UL,
"Must and only provide 4 variance."); platform::errors::InvalidArgument(
"Must provide 4 variance only."));
for (size_t i = 0; i < variances.size(); ++i) { for (size_t i = 0; i < variances.size(); ++i) {
PADDLE_ENFORCE_GT(variances[i], 0.0, PADDLE_ENFORCE_GT(variances[i], 0.0,
"variance[%d] must be greater than 0.", i); platform::errors::InvalidArgument(
"variance[%d] must be greater than 0.", i));
} }
}); });
...@@ -119,10 +130,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -119,10 +130,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
.AddCustomChecker([](const std::vector<float>& stride) { .AddCustomChecker([](const std::vector<float>& stride) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
stride.size(), 2UL, stride.size(), 2UL,
"Must and only provide 2 stride for width and height."); platform::errors::InvalidArgument(
"Must provide 2 stride for width and height only."));
for (size_t i = 0; i < stride.size(); ++i) { for (size_t i = 0; i < stride.size(); ++i) {
PADDLE_ENFORCE_GT(stride[i], 0.0, PADDLE_ENFORCE_GT(stride[i], 0.0,
"stride[%d] should be larger than 0.", i); platform::errors::InvalidArgument(
"stride[%d] should be larger than 0.", i));
} }
}); });
......
...@@ -26,17 +26,23 @@ class BipartiteMatchOp : public framework::OperatorWithKernel { ...@@ -26,17 +26,23 @@ class BipartiteMatchOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("DistMat"), PADDLE_ENFORCE_EQ(
"Input(DistMat) of BipartiteMatch should not be null."); ctx->HasInput("DistMat"), true,
PADDLE_ENFORCE( platform::errors::InvalidArgument(
ctx->HasOutput("ColToRowMatchIndices"), "Input(DistMat) of BipartiteMatch should not be null."));
"Output(ColToRowMatchIndices) of BipartiteMatch should not be null."); PADDLE_ENFORCE_EQ(ctx->HasOutput("ColToRowMatchIndices"), true,
PADDLE_ENFORCE( platform::errors::InvalidArgument(
ctx->HasOutput("ColToRowMatchDist"), "Output(ColToRowMatchIndices) of BipartiteMatch "
"Output(ColToRowMatchDist) of BipartiteMatch should not be null."); "should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("ColToRowMatchDist"), true,
platform::errors::InvalidArgument(
"Output(ColToRowMatchDist) of BipartiteMatch should not be null."));
auto dims = ctx->GetInputDim("DistMat"); auto dims = ctx->GetInputDim("DistMat");
PADDLE_ENFORCE_EQ(dims.size(), 2, "The rank of Input(DistMat) must be 2."); PADDLE_ENFORCE_EQ(dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(DistMat) must be 2."));
ctx->SetOutputDim("ColToRowMatchIndices", dims); ctx->SetOutputDim("ColToRowMatchIndices", dims);
ctx->SetOutputDim("ColToRowMatchDist", dims); ctx->SetOutputDim("ColToRowMatchDist", dims);
...@@ -64,7 +70,9 @@ class BipartiteMatchKernel : public framework::OpKernel<T> { ...@@ -64,7 +70,9 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
// The match_dist must be initialized to 0 at first. // The match_dist must be initialized to 0 at first.
void BipartiteMatch(const Tensor& dist, int* match_indices, void BipartiteMatch(const Tensor& dist, int* match_indices,
T* match_dist) const { T* match_dist) const {
PADDLE_ENFORCE_EQ(dist.dims().size(), 2, "The rank of dist must be 2."); PADDLE_ENFORCE_EQ(
dist.dims().size(), 2,
platform::errors::InvalidArgument("The rank of dist must be 2."));
int64_t row = dist.dims()[0]; int64_t row = dist.dims()[0];
int64_t col = dist.dims()[1]; int64_t col = dist.dims()[1];
auto* dist_data = dist.data<T>(); auto* dist_data = dist.data<T>();
...@@ -127,7 +135,11 @@ class BipartiteMatchKernel : public framework::OpKernel<T> { ...@@ -127,7 +135,11 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
// Cannot find good match. // Cannot find good match.
break; break;
} else { } else {
PADDLE_ENFORCE_EQ(match_indices[max_idx], -1); PADDLE_ENFORCE_EQ(
match_indices[max_idx], -1,
platform::errors::InvalidArgument(
"The match_indices must be initialized to -1 at [%d].",
max_idx));
match_indices[max_idx] = max_row_idx; match_indices[max_idx] = max_row_idx;
match_dist[max_idx] = max_dist; match_dist[max_idx] = max_dist;
// Erase the row index. // Erase the row index.
...@@ -163,7 +175,10 @@ class BipartiteMatchKernel : public framework::OpKernel<T> { ...@@ -163,7 +175,10 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
} }
} }
if (max_row_idx != -1) { if (max_row_idx != -1) {
PADDLE_ENFORCE_EQ(match_indices[j], -1); PADDLE_ENFORCE_EQ(
match_indices[j], -1,
platform::errors::InvalidArgument(
"The match_indices must be initialized to -1 at [%d].", j));
match_indices[j] = max_row_idx; match_indices[j] = max_row_idx;
match_dist[j] = max_dist; match_dist[j] = max_dist;
} }
...@@ -183,8 +198,9 @@ class BipartiteMatchKernel : public framework::OpKernel<T> { ...@@ -183,8 +198,9 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
? 1 ? 1
: static_cast<int64_t>(dist_mat->lod().back().size() - 1); : static_cast<int64_t>(dist_mat->lod().back().size() - 1);
if (dist_mat->lod().size()) { if (dist_mat->lod().size()) {
PADDLE_ENFORCE_EQ(dist_mat->lod().size(), 1UL, PADDLE_ENFORCE_EQ(
"Only support 1 level of LoD."); dist_mat->lod().size(), 1UL,
platform::errors::InvalidArgument("Only support 1 level of LoD."));
} }
match_indices->mutable_data<int>({n, col}, context.GetPlace()); match_indices->mutable_data<int>({n, col}, context.GetPlace());
match_dist->mutable_data<T>({n, col}, context.GetPlace()); match_dist->mutable_data<T>({n, col}, context.GetPlace());
......
...@@ -40,35 +40,49 @@ class GenerateMaskLabelsOp : public framework::OperatorWithKernel { ...@@ -40,35 +40,49 @@ class GenerateMaskLabelsOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("ImInfo"), "Input(ImInfo) shouldn't be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasInput("GtClasses"), ctx->HasInput("ImInfo"), true,
"Input(GtClasses) shouldn't be null."); platform::errors::InvalidArgument("Input(ImInfo) shouldn't be null."));
PADDLE_ENFORCE(ctx->HasInput("IsCrowd"), PADDLE_ENFORCE_EQ(ctx->HasInput("GtClasses"), true,
"Input(IsCrowd) shouldn't be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasInput("GtSegms"), "Input(GtClasses) shouldn't be null."));
"Input(GtSegms) shouldn't be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasInput("Rois"), "Input(Rois) shouldn't be null."); ctx->HasInput("IsCrowd"), true,
PADDLE_ENFORCE(ctx->HasInput("LabelsInt32"), platform::errors::InvalidArgument("Input(IsCrowd) shouldn't be null."));
"Input(LabelsInt32) shouldn't be null."); PADDLE_ENFORCE_EQ(
ctx->HasInput("GtSegms"), true,
PADDLE_ENFORCE( platform::errors::InvalidArgument("Input(GtSegms) shouldn't be null."));
ctx->HasOutput("MaskRois"), PADDLE_ENFORCE_EQ(
"Output(MaskRois) of GenerateMaskLabelsOp should not be null"); ctx->HasInput("Rois"), true,
PADDLE_ENFORCE( platform::errors::InvalidArgument("Input(Rois) shouldn't be null."));
ctx->HasOutput("RoiHasMaskInt32"), PADDLE_ENFORCE_EQ(ctx->HasInput("LabelsInt32"), true,
"Output(RoiHasMaskInt32) of GenerateMaskLabelsOp should not be null"); platform::errors::InvalidArgument(
PADDLE_ENFORCE( "Input(LabelsInt32) shouldn't be null."));
ctx->HasOutput("MaskInt32"),
"Output(MaskInt32) of GenerateMaskLabelsOp should not be null"); PADDLE_ENFORCE_EQ(
ctx->HasOutput("MaskRois"), true,
platform::errors::InvalidArgument(
"Output(MaskRois) of GenerateMaskLabelsOp should not be null"));
PADDLE_ENFORCE_EQ(ctx->HasOutput("RoiHasMaskInt32"), true,
platform::errors::InvalidArgument(
"Output(RoiHasMaskInt32) of GenerateMaskLabelsOp "
"should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("MaskInt32"), true,
platform::errors::InvalidArgument(
"Output(MaskInt32) of GenerateMaskLabelsOp should not be null"));
auto im_info_dims = ctx->GetInputDim("ImInfo"); auto im_info_dims = ctx->GetInputDim("ImInfo");
auto gt_segms_dims = ctx->GetInputDim("GtSegms"); auto gt_segms_dims = ctx->GetInputDim("GtSegms");
PADDLE_ENFORCE_EQ(im_info_dims.size(), 2, PADDLE_ENFORCE_EQ(im_info_dims.size(), 2,
"The rank of Input(ImInfo) must be 2."); platform::errors::InvalidArgument(
"The rank of Input(ImInfo) must be 2."));
PADDLE_ENFORCE_EQ(gt_segms_dims.size(), 2, PADDLE_ENFORCE_EQ(gt_segms_dims.size(), 2,
"The rank of Input(GtSegms) must be 2."); platform::errors::InvalidArgument(
"The rank of Input(GtSegms) must be 2."));
PADDLE_ENFORCE_EQ(gt_segms_dims[1], 2, PADDLE_ENFORCE_EQ(gt_segms_dims[1], 2,
"The second dim of Input(GtSegms) must be 2."); platform::errors::InvalidArgument(
"The second dim of Input(GtSegms) must be 2."));
int num_classes = ctx->Attrs().Get<int>("num_classes"); int num_classes = ctx->Attrs().Get<int>("num_classes");
int resolution = ctx->Attrs().Get<int>("resolution"); int resolution = ctx->Attrs().Get<int>("resolution");
...@@ -134,7 +148,11 @@ std::vector<Tensor> SampleMaskForOneImage( ...@@ -134,7 +148,11 @@ std::vector<Tensor> SampleMaskForOneImage(
const int* gt_classes_data = gt_classes.data<int>(); const int* gt_classes_data = gt_classes.data<int>();
const int* is_crowd_data = is_crowd.data<int>(); const int* is_crowd_data = is_crowd.data<int>();
const int* label_int32_data = label_int32.data<int>(); const int* label_int32_data = label_int32.data<int>();
PADDLE_ENFORCE_EQ(roi_size, label_int32.dims()[0]); PADDLE_ENFORCE_EQ(roi_size, label_int32.dims()[0],
platform::errors::InvalidArgument(
"The first dim of label [%d] is the different from "
"roi_size [%d], they should be same.",
label_int32.dims()[0], roi_size));
std::vector<int> mask_gt_inds, fg_inds; std::vector<int> mask_gt_inds, fg_inds;
std::vector<std::vector<std::vector<T>>> gt_polys; std::vector<std::vector<std::vector<T>>> gt_polys;
...@@ -155,7 +173,12 @@ std::vector<Tensor> SampleMaskForOneImage( ...@@ -155,7 +173,12 @@ std::vector<Tensor> SampleMaskForOneImage(
for (int j = 0; j < poly_num; ++j) { for (int j = 0; j < poly_num; ++j) {
int s = lod2[s_idx + j]; int s = lod2[s_idx + j];
int e = lod2[s_idx + j + 1]; int e = lod2[s_idx + j + 1];
PADDLE_ENFORCE_NE(s, e); PADDLE_ENFORCE_NE(s, e,
platform::errors::InvalidArgument(
"The start point and the end point in the poly "
"segment [%d] should not be same, but received "
"the start point [%d] and the end point [%d].",
i, s, e));
std::vector<T> plts(polys_data + s * 2, polys_data + e * 2); std::vector<T> plts(polys_data + s * 2, polys_data + e * 2);
polys.push_back(plts); polys.push_back(plts);
} }
...@@ -295,19 +318,34 @@ class GenerateMaskLabelsKernel : public framework::OpKernel<T> { ...@@ -295,19 +318,34 @@ class GenerateMaskLabelsKernel : public framework::OpKernel<T> {
int num_classes = ctx.Attr<int>("num_classes"); int num_classes = ctx.Attr<int>("num_classes");
int resolution = ctx.Attr<int>("resolution"); int resolution = ctx.Attr<int>("resolution");
PADDLE_ENFORCE_EQ(gt_classes->lod().size(), 1UL, PADDLE_ENFORCE_EQ(
"GenerateMaskLabelsOp gt_classes needs 1 level of LoD"); gt_classes->lod().size(), 1UL,
PADDLE_ENFORCE_EQ(is_crowd->lod().size(), 1UL, platform::errors::InvalidArgument(
"GenerateMaskLabelsOp is_crowd needs 1 level of LoD"); "GenerateMaskLabelsOp gt_classes needs 1 level of LoD"));
PADDLE_ENFORCE_EQ(
is_crowd->lod().size(), 1UL,
platform::errors::InvalidArgument(
"GenerateMaskLabelsOp is_crowd needs 1 level of LoD"));
PADDLE_ENFORCE_EQ(rois->lod().size(), 1UL, PADDLE_ENFORCE_EQ(rois->lod().size(), 1UL,
"GenerateMaskLabelsOp rois needs 1 level of LoD"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(label_int32->lod().size(), 1UL, "GenerateMaskLabelsOp rois needs 1 level of LoD"));
"GenerateMaskLabelsOp label_int32 needs 1 level of LoD"); PADDLE_ENFORCE_EQ(
label_int32->lod().size(), 1UL,
PADDLE_ENFORCE_EQ(gt_segms->lod().size(), 3UL); platform::errors::InvalidArgument(
"GenerateMaskLabelsOp label_int32 needs 1 level of LoD"));
PADDLE_ENFORCE_EQ(
gt_segms->lod().size(), 3UL,
platform::errors::InvalidArgument(
"GenerateMaskLabelsOp gt_segms needs 3 level of LoD"));
int64_t n = static_cast<int64_t>(gt_classes->lod().back().size() - 1); int64_t n = static_cast<int64_t>(gt_classes->lod().back().size() - 1);
PADDLE_ENFORCE_EQ(gt_segms->lod()[0].size() - 1, n); PADDLE_ENFORCE_EQ(
gt_segms->lod()[0].size() - 1, n,
platform::errors::InvalidArgument(
"Batchsize of Input(gt_segms) and Input(gt_classes) should be "
"same, but received gt_segms[%d], gt_classes[%d].",
gt_segms->lod()[0].size() - 1, n));
int mask_dim = num_classes * resolution * resolution; int mask_dim = num_classes * resolution * resolution;
int roi_num = rois->lod().back()[n]; int roi_num = rois->lod().back()[n];
......
...@@ -22,29 +22,41 @@ class TargetAssignOp : public framework::OperatorWithKernel { ...@@ -22,29 +22,41 @@ class TargetAssignOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of TargetAssignOp should not be null"); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasInput("MatchIndices"), "Input(X) of TargetAssignOp should not be null"));
"Input(MatchIndices) of TargetAssignOp should not be null"); PADDLE_ENFORCE_EQ(
ctx->HasInput("MatchIndices"), true,
PADDLE_ENFORCE(ctx->HasOutput("Out"), platform::errors::InvalidArgument(
"Output(Out) of TargetAssignOp should not be null."); "Input(MatchIndices) of TargetAssignOp should not be null"));
PADDLE_ENFORCE(ctx->HasOutput("OutWeight"),
"Output(OutWeight) of TargetAssignOp should not be null."); PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of TargetAssignOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("OutWeight"), true,
platform::errors::InvalidArgument(
"Output(OutWeight) of TargetAssignOp should not be null."));
auto in_dims = ctx->GetInputDim("X"); auto in_dims = ctx->GetInputDim("X");
auto mi_dims = ctx->GetInputDim("MatchIndices"); auto mi_dims = ctx->GetInputDim("MatchIndices");
PADDLE_ENFORCE_EQ(in_dims.size(), 3, "The rank of Input(X) must be 3."); PADDLE_ENFORCE_EQ(
in_dims.size(), 3,
platform::errors::InvalidArgument("The rank of Input(X) must be 3."));
PADDLE_ENFORCE_EQ(mi_dims.size(), 2, PADDLE_ENFORCE_EQ(mi_dims.size(), 2,
"The rank of Input(MatchIndices) must be 2."); platform::errors::InvalidArgument(
"The rank of Input(MatchIndices) must be 2."));
if (ctx->HasInput("NegIndices")) { if (ctx->HasInput("NegIndices")) {
auto neg_dims = ctx->GetInputDim("NegIndices"); auto neg_dims = ctx->GetInputDim("NegIndices");
PADDLE_ENFORCE_EQ(neg_dims.size(), 2, PADDLE_ENFORCE_EQ(neg_dims.size(), 2,
"The rank of Input(NegIndices) must be 2."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(neg_dims[1], 1, "The rank of Input(NegIndices) must be 2."));
"The last dimension of Out(NegIndices) must be 1."); PADDLE_ENFORCE_EQ(
neg_dims[1], 1,
platform::errors::InvalidArgument(
"The last dimension of Out(NegIndices) must be 1."));
} }
auto n = mi_dims[0]; auto n = mi_dims[0];
......
...@@ -90,7 +90,9 @@ class TargetAssignKernel : public framework::OpKernel<T> { ...@@ -90,7 +90,9 @@ class TargetAssignKernel : public framework::OpKernel<T> {
auto* out = ctx.Output<framework::Tensor>("Out"); auto* out = ctx.Output<framework::Tensor>("Out");
auto* out_wt = ctx.Output<framework::Tensor>("OutWeight"); auto* out_wt = ctx.Output<framework::Tensor>("OutWeight");
PADDLE_ENFORCE_EQ(x->lod().size(), 1UL); PADDLE_ENFORCE_EQ(x->lod().size(), 1UL,
platform::errors::InvalidArgument(
"TargetAssignOp input(X) needs 1 level of LoD"));
int mismatch_value = ctx.Attr<int>("mismatch_value"); int mismatch_value = ctx.Attr<int>("mismatch_value");
const T* x_data = x->data<T>(); const T* x_data = x->data<T>();
...@@ -121,7 +123,10 @@ class TargetAssignKernel : public framework::OpKernel<T> { ...@@ -121,7 +123,10 @@ class TargetAssignKernel : public framework::OpKernel<T> {
auto* neg_indices = ctx.Input<framework::LoDTensor>("NegIndices"); auto* neg_indices = ctx.Input<framework::LoDTensor>("NegIndices");
if (neg_indices) { if (neg_indices) {
PADDLE_ENFORCE_EQ(neg_indices->lod().size(), 1UL); PADDLE_ENFORCE_EQ(
neg_indices->lod().size(), 1UL,
platform::errors::InvalidArgument(
"TargetAssignOp input(NegIndices) needs 1 level of LoD"));
const int* neg_idx_data = neg_indices->data<int>(); const int* neg_idx_data = neg_indices->data<int>();
auto neg_lod = neg_indices->lod().back(); auto neg_lod = neg_indices->lod().back();
#if defined(PADDLE_WITH_CUDA) #if defined(PADDLE_WITH_CUDA)
......
...@@ -24,19 +24,25 @@ class FilterByInstagOp : public framework::OperatorWithKernel { ...@@ -24,19 +24,25 @@ class FilterByInstagOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Ins"), true, PADDLE_ENFORCE_EQ(
"Input(Ins) should be not null."); ctx->HasInput("Ins"), true,
platform::errors::InvalidArgument("Input(Ins) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Ins_tag"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("Ins_tag"), true,
"Input(Ins_tag) should be not null."); platform::errors::InvalidArgument(
"Input(Ins_tag) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Filter_tag"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("Filter_tag"), true,
"Input(Filter_tag) should be not null."); platform::errors::InvalidArgument(
"Input(Filter_tag) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, PADDLE_ENFORCE_EQ(
"Output(Out) should be not null."); ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument("Output(Out) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("LossWeight"), true, PADDLE_ENFORCE_EQ(ctx->HasOutput("LossWeight"), true,
"Output(LossWeight) shoudl not be null."); platform::errors::InvalidArgument(
"Output(LossWeight) shoudl not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("IndexMap"), true, PADDLE_ENFORCE_EQ(ctx->HasOutput("IndexMap"), true,
"Output(IndexMap) should be not null."); platform::errors::InvalidArgument(
"Output(IndexMap) should be not null."));
auto x1_dims = ctx->GetInputDim("Ins"); // batch_size * vec auto x1_dims = ctx->GetInputDim("Ins"); // batch_size * vec
...@@ -85,15 +91,20 @@ class FilterByInstagOpGrad : public framework::OperatorWithKernel { ...@@ -85,15 +91,20 @@ class FilterByInstagOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("IndexMap"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("IndexMap"), true,
"Input(IndexMap) should be not null"); platform::errors::InvalidArgument(
"Input(IndexMap) should be not null"));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
"Grad Input(Out) should be not null"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(ctx->HasInput("Ins"), true, "Grad Input(Out) should be not null"));
"Input(Ins) should be not null"); PADDLE_ENFORCE_EQ(
ctx->HasInput("Ins"), true,
platform::errors::InvalidArgument("Input(Ins) should be not null"));
PADDLE_ENFORCE_EQ(ctx->HasInput("LossWeight"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("LossWeight"), true,
"Input(LossWeight) should be not null"); platform::errors::InvalidArgument(
"Input(LossWeight) should be not null"));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Ins")), true, PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Ins")), true,
"Grad Output(Ins) should be not null"); platform::errors::InvalidArgument(
"Grad Output(Ins) should be not null"));
auto grad_out_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto grad_out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
auto x1_dims = ctx->GetInputDim("Ins"); auto x1_dims = ctx->GetInputDim("Ins");
......
...@@ -51,11 +51,19 @@ struct OneHotV2OpFunctor { ...@@ -51,11 +51,19 @@ struct OneHotV2OpFunctor {
} }
} else { } else {
for (int i = 0; i < numel; ++i) { for (int i = 0; i < numel; ++i) {
PADDLE_ENFORCE_GE(p_in_data[i], 0, PADDLE_ENFORCE_GE(
"Illegal index value, should be at least 0."); p_in_data[i], 0,
platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be at least 0, "
"but received input (%d) less than 0",
p_in_data[i]));
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
p_in_data[i], depth_, p_in_data[i], depth_,
"Illegal index value, should be less than depth (%d).", depth_); platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be less than "
"Input(depth), "
"but received input (%d) not less than depth (%d)",
p_in_data[i], depth_));
*(p_out_data + i * depth_ + p_in_data[i]) = 1.0; *(p_out_data + i * depth_ + p_in_data[i]) = 1.0;
} }
} }
......
...@@ -29,12 +29,15 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -29,12 +29,15 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of Pooling should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Input(X) of Pooling should not be null."));
"Output(Out) of Pooling should not be null."); PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
PADDLE_ENFORCE(ctx->HasOutput("Mask"), platform::errors::InvalidArgument(
"Output(Mask) of Pooling should not be null."); "Output(Out) of Pooling should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Mask"), true,
platform::errors::InvalidArgument(
"Output(Mask) of Pooling should not be null."));
auto in_x_dims = ctx->GetInputDim("X"); auto in_x_dims = ctx->GetInputDim("X");
...@@ -54,12 +57,16 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -54,12 +57,16 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
} }
} }
PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U, PADDLE_ENFORCE_EQ(in_x_dims.size() - ksize.size(), 2U,
"Input size and pooling size should be consistent."); platform::errors::InvalidArgument(
"Input size and pooling size should be consistent."));
PADDLE_ENFORCE_EQ(ksize.size(), strides.size(), PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
"Strides size and pooling size should be the same."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(), "Strides size and pooling size should be the same."));
"Paddings size and pooling size should be the same."); PADDLE_ENFORCE_EQ(
ksize.size(), paddings.size(),
platform::errors::InvalidArgument(
"Paddings size and pooling size should be the same."));
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
if (adaptive) { if (adaptive) {
...@@ -90,15 +97,16 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { ...@@ -90,15 +97,16 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->HasInput("Mask"), true, ctx->HasInput("Mask"), true,
platform::errors::NotFound("Input(Mask) must not be null.")); platform::errors::InvalidArgument("Input(Mask) must not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::NotFound("Input(X) must not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::NotFound("Input(Out@GRAD) should not be null."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->HasOutput(framework::GradVarName("X")), true, ctx->HasInput("X"), true,
platform::errors::NotFound("Output(X@GRAD) should not be null.")); platform::errors::InvalidArgument("Input(X) must not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::InvalidArgument(
"Output(X@GRAD) should not be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
} }
......
...@@ -81,43 +81,57 @@ class PSROIPoolOp : public framework::OperatorWithKernel { ...@@ -81,43 +81,57 @@ class PSROIPoolOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of PSROIPoolOp should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasInput("ROIs"), "Input(X) of PSROIPoolOp should not be null."));
"Input(ROIs) of PSROIPoolOp should not be null."); PADDLE_ENFORCE_EQ(ctx->HasInput("ROIs"), true,
PADDLE_ENFORCE(ctx->HasOutput("Out"), platform::errors::InvalidArgument(
"Output(Out) of PSROIPoolOp should not be null."); "Input(ROIs) of PSROIPoolOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of PSROIPoolOp should not be null."));
auto input_dims = ctx->GetInputDim("X"); auto input_dims = ctx->GetInputDim("X");
auto rois_dims = ctx->GetInputDim("ROIs"); auto rois_dims = ctx->GetInputDim("ROIs");
PADDLE_ENFORCE(input_dims.size() == 4, PADDLE_ENFORCE_EQ(input_dims.size(), 4,
"The format of input tensor is NCHW"); platform::errors::InvalidArgument(
PADDLE_ENFORCE(rois_dims.size() == 2, "The format of input tensor is NCHW"));
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " PADDLE_ENFORCE_EQ(
"given as [(x1, y1, x2, y2), ...]"); rois_dims.size(), 2,
PADDLE_ENFORCE(rois_dims[1] == 4, platform::errors::InvalidArgument(
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]"); "given as [(x1, y1, x2, y2), ...]"));
PADDLE_ENFORCE_EQ(
rois_dims[1], 4,
platform::errors::InvalidArgument(
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]"));
int pooled_height = ctx->Attrs().Get<int>("pooled_height"); int pooled_height = ctx->Attrs().Get<int>("pooled_height");
int pooled_width = ctx->Attrs().Get<int>("pooled_width"); int pooled_width = ctx->Attrs().Get<int>("pooled_width");
int output_channels = ctx->Attrs().Get<int>("output_channels"); int output_channels = ctx->Attrs().Get<int>("output_channels");
float spatial_scale = ctx->Attrs().Get<float>("spatial_scale"); float spatial_scale = ctx->Attrs().Get<float>("spatial_scale");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
input_dims[1] == output_channels * pooled_height * pooled_width, input_dims[1], output_channels * pooled_height * pooled_width,
"the channel of X(%d) should be equal to the product of " platform::errors::InvalidArgument(
"output_channels(%d), pooled_height(%d) and pooled_width(%d)", "the channel of X(%d) "
input_dims[1], output_channels, pooled_height, pooled_width); "should be equal to the product of "
"output_channels(%d), pooled_height(%d) and pooled_width(%d)",
input_dims[1], output_channels, pooled_height, pooled_width));
PADDLE_ENFORCE_GT(pooled_height, 0, PADDLE_ENFORCE_GT(pooled_height, 0,
"The pooled output height must be greater than 0"); platform::errors::InvalidArgument(
"The pooled output height must be greater than 0"));
PADDLE_ENFORCE_GT(pooled_width, 0, PADDLE_ENFORCE_GT(pooled_width, 0,
"The pooled output width must be greater than 0"); platform::errors::InvalidArgument(
"The pooled output width must be greater than 0"));
PADDLE_ENFORCE_GT(output_channels, 1, PADDLE_ENFORCE_GT(output_channels, 1,
"The pooled output channels must greater than 1"); platform::errors::InvalidArgument(
"The pooled output channels must greater than 1"));
PADDLE_ENFORCE_GT(spatial_scale, 0.0f, PADDLE_ENFORCE_GT(spatial_scale, 0.0f,
"The spatial scale must greater than 0."); platform::errors::InvalidArgument(
"The spatial scale must greater than 0."));
auto out_dims = input_dims; auto out_dims = input_dims;
out_dims[0] = rois_dims[0]; out_dims[0] = rois_dims[0];
...@@ -142,10 +156,12 @@ class PSROIPoolGradOp : public framework::OperatorWithKernel { ...@@ -142,10 +156,12 @@ class PSROIPoolGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
"The gradient of Out should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "The gradient of Out should not be null."));
"The gradient of X should not be null."); PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::InvalidArgument(
"The gradient of X should not be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
} }
......
...@@ -54,15 +54,19 @@ class CPUPSROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -54,15 +54,19 @@ class CPUPSROIPoolOpKernel : public framework::OpKernel<T> {
int rois_batch_size = rois_lod.size() - 1; int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size, rois_batch_size, batch_size,
"the rois_batch_size and input(X) batch_size should be the same."); platform::errors::InvalidArgument("the rois_batch_size and input(X) "
"batch_size should be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size]; int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num_with_lod, rois_num, PADDLE_ENFORCE_EQ(rois_num_with_lod, rois_num,
"the rois_num from input and lod must be the same"); platform::errors::InvalidArgument(
"the rois_num from input and lod must be the same"));
PADDLE_ENFORCE_EQ(input_channels, PADDLE_ENFORCE_EQ(input_channels,
output_channels * pooled_height * pooled_width, output_channels * pooled_height * pooled_width,
"the channels of input X should equal the product of " platform::errors::InvalidArgument(
"output_channels x pooled_height x pooled_width"); "the channels of input "
"X should equal the product of "
"output_channels x pooled_height x pooled_width"));
// calculate batch id index for each roi according to LoD // calculate batch id index for each roi according to LoD
for (int n = 0; n < rois_batch_size; ++n) { for (int n = 0; n < rois_batch_size; ++n) {
......
...@@ -36,7 +36,10 @@ class ROIPoolOp : public framework::OperatorWithKernel { ...@@ -36,7 +36,10 @@ class ROIPoolOp : public framework::OperatorWithKernel {
if (ctx->HasInput("RoisLod")) { if (ctx->HasInput("RoisLod")) {
auto rois_lod_dims = ctx->GetInputDim("RoisLod"); auto rois_lod_dims = ctx->GetInputDim("RoisLod");
PADDLE_ENFORCE(rois_lod_dims.size() == 1, ""); PADDLE_ENFORCE_EQ(rois_lod_dims.size(), 1,
platform::errors::InvalidArgument(
"The lod information tensor of ROIs should "
"be one-dimensional"));
} }
PADDLE_ENFORCE_EQ(input_dims.size(), 4, PADDLE_ENFORCE_EQ(input_dims.size(), 4,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
......
...@@ -63,7 +63,8 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -63,7 +63,8 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> {
rois_batch_size = rois_lod_t->numel(); rois_batch_size = rois_lod_t->numel();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
rois_batch_size - 1, batch_size, rois_batch_size - 1, batch_size,
"The rois_batch_size and imgs batch_size must be the same."); platform::errors::InvalidArgument("The rois_batch_size and imgs "
"batch_size must be the same."));
auto* rois_lod = rois_lod_t->data<int64_t>(); auto* rois_lod = rois_lod_t->data<int64_t>();
for (int n = 0; n < rois_batch_size - 1; ++n) { for (int n = 0; n < rois_batch_size - 1; ++n) {
for (int i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { for (int i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
...@@ -75,10 +76,13 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -75,10 +76,13 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> {
rois_batch_size = rois_lod.size() - 1; rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size, rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same."); platform::errors::InvalidArgument("The rois_batch_size and imgs "
"batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size]; int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, PADDLE_ENFORCE_EQ(
"The rois_num from input and lod must be the same."); rois_num, rois_num_with_lod,
platform::errors::InvalidArgument("The rois_num from input "
"and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) { for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n; roi_batch_id_data[i] = n;
......
...@@ -34,21 +34,30 @@ class SoftmaxOp : public framework::OperatorWithKernel { ...@@ -34,21 +34,30 @@ class SoftmaxOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(
"Input(X) of SoftmaxOp should not be null."); ctx->HasInput("X"), true,
PADDLE_ENFORCE(ctx->HasOutput("Out"), platform::errors::NotFound("Input(X) of SoftmaxOp is not found."));
"Output(Out) of SoftmaxOp should not be null."); PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::NotFound("Output(Out) of SoftmaxOp is not found."));
auto dim_x = ctx->GetInputDim("X"); auto dim_x = ctx->GetInputDim("X");
auto rank_x = dim_x.size(); auto rank_x = dim_x.size();
auto axis = ctx->Attrs().Get<int>("axis"); auto axis = ctx->Attrs().Get<int>("axis");
PADDLE_ENFORCE(axis >= -rank_x && axis < rank_x, PADDLE_ENFORCE_GE(axis, -rank_x,
"Attr(axis) value should be in range [-R, R-1], " platform::errors::InvalidArgument(
"R is the rank of Input(X)."); "Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."));
PADDLE_ENFORCE_LT(axis, rank_x,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."));
auto use_cudnn = ctx->Attrs().Get<bool>("use_cudnn"); auto use_cudnn = ctx->Attrs().Get<bool>("use_cudnn");
if (axis != rank_x - 1 && axis != -1) { if (axis != rank_x - 1 && axis != -1) {
PADDLE_ENFORCE(!use_cudnn, "CUDNN kernel only support axis as -1."); PADDLE_ENFORCE_EQ(use_cudnn, false,
platform::errors::InvalidArgument(
"CUDNN kernel only support axis as -1."));
} }
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
...@@ -78,8 +87,9 @@ class SoftmaxOp : public framework::OperatorWithKernel { ...@@ -78,8 +87,9 @@ class SoftmaxOp : public framework::OperatorWithKernel {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
if (input_data_type == framework::proto::VarType::FP16) { if (input_data_type == framework::proto::VarType::FP16) {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"float16 can only be used on GPU place"); platform::errors::InvalidArgument(
"float16 can only be used on GPU place"));
} }
return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
...@@ -157,12 +167,17 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ...@@ -157,12 +167,17 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) should be not null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), ctx->HasInput("Out"), true,
"Input(Out@GRAD) should be not null."); platform::errors::InvalidArgument("Input(Out) is not found."));
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Out"), PADDLE_ENFORCE_EQ(
ctx->GetInputDim(framework::GradVarName("Out")), ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out) and its gradients should have a same shape."); platform::errors::InvalidArgument("Input(Out@GRAD) is not found."));
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Out"),
ctx->GetInputDim(framework::GradVarName("Out")),
platform::errors::InvalidArgument("Input(Out) and its gradients "
"should have a same shape."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->SetOutputDim(framework::GradVarName("X"),
ctx->GetInputDim(framework::GradVarName("Out"))); ctx->GetInputDim(framework::GradVarName("Out")));
...@@ -191,8 +206,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ...@@ -191,8 +206,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
auto input_data_type = OperatorWithKernel::IndicateVarDataType( auto input_data_type = OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")); ctx, framework::GradVarName("Out"));
if (input_data_type == framework::proto::VarType::FP16) { if (input_data_type == framework::proto::VarType::FP16) {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"float16 can only be used on GPU place"); platform::errors::InvalidArgument(
"float16 can only be used on GPU place"));
} }
return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
......
...@@ -108,39 +108,51 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { ...@@ -108,39 +108,51 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Logits"), PADDLE_ENFORCE_EQ(
"Input(Logits) should be not null."); ctx->HasInput("Logits"), true,
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); platform::errors::InvalidArgument("Input(Logits) should be not null."));
PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasOutput("Softmax"), ctx->HasInput("Label"), true,
"Output(Softmax) should be not null."); platform::errors::InvalidArgument("Input(Label) should be not null."));
PADDLE_ENFORCE(ctx->HasOutput("Loss"), "Output(Loss) should be not null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Softmax"), true,
platform::errors::InvalidArgument(
"Output(Softmax) should be not null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Loss"), true,
platform::errors::InvalidArgument("Output(Loss) should be not null."));
auto axis = ctx->Attrs().Get<int>("axis"); auto axis = ctx->Attrs().Get<int>("axis");
auto logits_dims = ctx->GetInputDim("Logits"); auto logits_dims = ctx->GetInputDim("Logits");
auto labels_dims = ctx->GetInputDim("Label"); auto labels_dims = ctx->GetInputDim("Label");
auto logits_rank = logits_dims.size(); auto logits_rank = logits_dims.size();
PADDLE_ENFORCE(axis >= -logits_rank && axis < logits_rank, PADDLE_ENFORCE_GE(axis, -logits_rank,
"Attr(axis) value should be in range [-R, R-1], " platform::errors::InvalidArgument(
"R is the rank of Input(Logits)."); "Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
PADDLE_ENFORCE_LT(axis, logits_rank,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
axis = CanonicalAxis(axis, logits_rank); axis = CanonicalAxis(axis, logits_rank);
for (int i = 0; i < logits_rank; i++) { for (int i = 0; i < logits_rank; i++) {
if (i != axis) { if (i != axis) {
if (ctx->IsRuntime() || (logits_dims[i] > 0 && labels_dims[i] > 0)) { if (ctx->IsRuntime() || (logits_dims[i] > 0 && labels_dims[i] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(logits_dims[i], labels_dims[i],
logits_dims[i], labels_dims[i], platform::errors::InvalidArgument(
"Input(Logits) and Input(Label) should in same shape in " "Input(Logits) and Input(Label) should in "
"dimensions except axis."); "same shape in dimensions except axis."));
} }
} }
} }
auto numeric_stable_mode = ctx->Attrs().Get<bool>("numeric_stable_mode"); auto numeric_stable_mode = ctx->Attrs().Get<bool>("numeric_stable_mode");
if (axis != logits_rank - 1) { if (axis != logits_rank - 1) {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(numeric_stable_mode, true,
numeric_stable_mode, platform::errors::InvalidArgument(
"Attr(axis) can only be -1 when not in numeric_stable_mode."); "Attr(axis) can only be -1 "
"when not in numeric_stable_mode."));
} }
bool soft_label = ctx->Attrs().Get<bool>("soft_label"); bool soft_label = ctx->Attrs().Get<bool>("soft_label");
...@@ -148,14 +160,18 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { ...@@ -148,14 +160,18 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
if (ctx->IsRuntime() || if (ctx->IsRuntime() ||
(logits_dims[axis] > 0 && labels_dims[axis] > 0)) { (logits_dims[axis] > 0 && labels_dims[axis] > 0)) {
PADDLE_ENFORCE_EQ(logits_dims[axis], labels_dims[axis], PADDLE_ENFORCE_EQ(logits_dims[axis], labels_dims[axis],
"If Attr(soft_label) == true, the axis dimension of " platform::errors::InvalidArgument(
"Input(X) and Input(Label) should be equal."); "If Attr(soft_label) == true, "
"the axis dimension of "
"Input(X) and Input(Label) should be equal."));
} }
} else { } else {
if (ctx->IsRuntime() || labels_dims[axis] > 0) { if (ctx->IsRuntime() || labels_dims[axis] > 0) {
PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL, PADDLE_ENFORCE_EQ(
"If Attr(soft_label) == false, the axis dimension of " labels_dims[axis], 1UL,
"Input(Label) should be 1."); platform::errors::InvalidArgument("If Attr(soft_label) == false, "
"the axis dimension of "
"Input(Label) should be 1."));
} }
} }
...@@ -182,21 +198,31 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { ...@@ -182,21 +198,31 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Loss")), true,
"Input(Loss@Grad) should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasInput("Softmax"), "Input(Loss@Grad) should not be null."));
"Input(Softmax) should be not null."); PADDLE_ENFORCE_EQ(ctx->HasInput("Softmax"), true,
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")), "Input(Softmax) should be not null."));
"Output(Logits@Grad) should be not null."); PADDLE_ENFORCE_EQ(
ctx->HasInput("Label"), true,
platform::errors::InvalidArgument("Input(Label) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Logits")), true,
platform::errors::InvalidArgument(
"Output(Logits@Grad) should be not null."));
auto axis = ctx->Attrs().Get<int>("axis"); auto axis = ctx->Attrs().Get<int>("axis");
auto softmax_dims = ctx->GetInputDim("Softmax"); auto softmax_dims = ctx->GetInputDim("Softmax");
auto labels_dims = ctx->GetInputDim("Label"); auto labels_dims = ctx->GetInputDim("Label");
auto softmax_rank = softmax_dims.size(); auto softmax_rank = softmax_dims.size();
PADDLE_ENFORCE(axis >= -softmax_rank && axis < softmax_rank, PADDLE_ENFORCE_GE(axis, -softmax_rank,
"Attr(axis) value should be in range [-R, R-1], " platform::errors::InvalidArgument(
"R is the rank of Input(Logits)."); "Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
PADDLE_ENFORCE_LT(axis, softmax_rank,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
axis = CanonicalAxis(axis, softmax_rank); axis = CanonicalAxis(axis, softmax_rank);
for (int i = 0; i < softmax_rank; i++) { for (int i = 0; i < softmax_rank; i++) {
...@@ -204,8 +230,9 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { ...@@ -204,8 +230,9 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
if (ctx->IsRuntime() || (softmax_dims[i] > 0 && labels_dims[i] > 0)) { if (ctx->IsRuntime() || (softmax_dims[i] > 0 && labels_dims[i] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
softmax_dims[i], labels_dims[i], softmax_dims[i], labels_dims[i],
"Input(Logits) and Input(Label) should in same shape in " platform::errors::InvalidArgument(
"dimensions except axis."); "Input(Logits) and Input(Label) should in same shape in "
"dimensions except axis."));
} }
} }
} }
...@@ -215,14 +242,18 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { ...@@ -215,14 +242,18 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
if (ctx->IsRuntime() || if (ctx->IsRuntime() ||
(softmax_dims[axis] > 0 && labels_dims[axis] > 0)) { (softmax_dims[axis] > 0 && labels_dims[axis] > 0)) {
PADDLE_ENFORCE_EQ(softmax_dims[axis], labels_dims[axis], PADDLE_ENFORCE_EQ(softmax_dims[axis], labels_dims[axis],
"If Attr(soft_label) == true, the axis dimension of " platform::errors::InvalidArgument(
"Input(X) and Input(Label) should be equal."); "If Attr(soft_label) == true, "
"the axis dimension of "
"Input(X) and Input(Label) should be equal."));
} }
} else { } else {
if (ctx->IsRuntime() || labels_dims[axis] > 0) { if (ctx->IsRuntime() || labels_dims[axis] > 0) {
PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL, PADDLE_ENFORCE_EQ(
"If Attr(soft_label) == false, the axis dimension of " labels_dims[axis], 1UL,
"Input(Label) should be 1."); platform::errors::InvalidArgument("If Attr(soft_label) == false, "
"the axis dimension of "
"Input(Label) should be 1."));
} }
} }
......
...@@ -31,8 +31,9 @@ template <typename T> ...@@ -31,8 +31,9 @@ template <typename T>
class SoftmaxWithCrossEntropyKernel : public framework::OpKernel<T> { class SoftmaxWithCrossEntropyKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE(platform::is_cpu_place(context.GetPlace()), PADDLE_ENFORCE_EQ(
"This kernel only runs on CPU."); platform::is_cpu_place(context.GetPlace()), true,
platform::errors::Unimplemented("This kernel only runs on CPU."));
const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* logits = context.Input<Tensor>("Logits");
const Tensor* labels = context.Input<Tensor>("Label"); const Tensor* labels = context.Input<Tensor>("Label");
Tensor* softmax = context.Output<Tensor>("Softmax"); Tensor* softmax = context.Output<Tensor>("Softmax");
......
...@@ -62,15 +62,17 @@ class SppOp : public framework::OperatorWithKernel { ...@@ -62,15 +62,17 @@ class SppOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SppOp" platform::errors::InvalidArgument(
"should not be null."); "Input(X) of SppOp should not be null."));
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SppOp should not be null."); platform::errors::InvalidArgument(
"Output(Out) of SppOp should not be null."));
auto in_x_dims = ctx->GetInputDim("X"); auto in_x_dims = ctx->GetInputDim("X");
int pyramid_height = ctx->Attrs().Get<int>("pyramid_height"); int pyramid_height = ctx->Attrs().Get<int>("pyramid_height");
PADDLE_ENFORCE(in_x_dims.size() == 4, PADDLE_ENFORCE_EQ(in_x_dims.size(), 4,
"Spping intput must be of 4-dimensional."); platform::errors::InvalidArgument(
"Spping intput must be of 4-dimensional."));
int outlen = ((std::pow(4, pyramid_height) - 1) / (4 - 1)) * in_x_dims[1]; int outlen = ((std::pow(4, pyramid_height) - 1) / (4 - 1)) * in_x_dims[1];
std::vector<int64_t> output_shape({in_x_dims[0], outlen}); std::vector<int64_t> output_shape({in_x_dims[0], outlen});
ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
...@@ -81,9 +83,12 @@ class SppOpGrad : public framework::OperatorWithKernel { ...@@ -81,9 +83,12 @@ class SppOpGrad : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), ctx->HasInput("X"), true,
"Input(X@GRAD) should not be null."); platform::errors::InvalidArgument("Input(X) must not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::InvalidArgument("Input(X@GRAD) should not be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
} }
}; };
......
...@@ -27,16 +27,22 @@ class UnsqueezeOp : public framework::OperatorWithKernel { ...@@ -27,16 +27,22 @@ class UnsqueezeOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of Unsqueeze operator should not be null."); platform::errors::InvalidArgument(
"Input(X) of "
"Unsqueeze operator should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of Unsqueeze operator should not be null."); platform::errors::InvalidArgument(
"Output(Out) of "
"Unsqueeze operator should not be null."));
const auto &axes = ctx->Attrs().Get<std::vector<int>>("axes"); const auto &axes = ctx->Attrs().Get<std::vector<int>>("axes");
const auto &x_dims = ctx->GetInputDim("X"); const auto &x_dims = ctx->GetInputDim("X");
// Validity Check: input tensor dims (<6). // Validity Check: input tensor dims (<6).
PADDLE_ENFORCE_LE(x_dims.size(), 6, PADDLE_ENFORCE_LE(x_dims.size(), 6,
"Invalid dimensions, the rank of Input(X) " platform::errors::InvalidArgument(
"should be in the range of [1, 6] (Eigen limit)"); "Invalid "
"dimensions, the rank of Input(X) "
"should be in the range of [1, 6] (Eigen limit)"));
if (!axes.empty()) { if (!axes.empty()) {
auto out_dims = GetOutputShape(axes, x_dims); auto out_dims = GetOutputShape(axes, x_dims);
ctx->SetOutputDim("Out", out_dims); ctx->SetOutputDim("Out", out_dims);
...@@ -49,24 +55,29 @@ class UnsqueezeOp : public framework::OperatorWithKernel { ...@@ -49,24 +55,29 @@ class UnsqueezeOp : public framework::OperatorWithKernel {
auto AxesTensorList = ctx->Inputs("AxesTensorList"); auto AxesTensorList = ctx->Inputs("AxesTensorList");
int output_size = x_dims.size() + static_cast<int>(AxesTensorList.size()); int output_size = x_dims.size() + static_cast<int>(AxesTensorList.size());
PADDLE_ENFORCE_LE(output_size, 6, PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6."); platform::errors::InvalidArgument(
"The output tensor's rank should be less than 6."));
std::vector<int> vec_out_dims(output_size, -1); std::vector<int> vec_out_dims(output_size, -1);
ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims)); ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims));
} else if (ctx->HasInput("AxesTensor")) { } else if (ctx->HasInput("AxesTensor")) {
auto axes_dims = ctx->GetInputDim("AxesTensor"); auto axes_dims = ctx->GetInputDim("AxesTensor");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(axes_dims.size(), 1,
axes_dims.size(), 1, platform::errors::InvalidArgument(
"Input(AxesTensor)'s dimension of Op(unsqueeze) must be 1. " "Input(AxesTensor)'s dimension of "
"But received AxesTensor's shape = [%s], " "Op(unsqueeze) must be 1. "
"AxesTensor's dimension = %d.", "But received AxesTensor's shape = [%s], "
axes_dims, axes_dims.size()); "AxesTensor's dimension = %d.",
PADDLE_ENFORCE_GE(axes_dims[0], 0, axes_dims, axes_dims.size()));
"Input(AxesTensor)'s shape must be known. But received " PADDLE_ENFORCE_GE(
"AxesTensor's shape = [%s]", axes_dims[0], 0,
axes_dims); platform::errors::InvalidArgument(
"Input(AxesTensor)'s shape must be known. But received "
"AxesTensor's shape = [%s]",
axes_dims));
int output_size = x_dims.size() + static_cast<int>(axes_dims[0]); int output_size = x_dims.size() + static_cast<int>(axes_dims[0]);
PADDLE_ENFORCE_LE(output_size, 6, PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6."); platform::errors::InvalidArgument(
"The output tensor's rank should be less than 6."));
std::vector<int> vec_out_dims(output_size, -1); std::vector<int> vec_out_dims(output_size, -1);
ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims)); ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims));
} }
...@@ -80,13 +91,19 @@ class UnsqueezeOp : public framework::OperatorWithKernel { ...@@ -80,13 +91,19 @@ class UnsqueezeOp : public framework::OperatorWithKernel {
// Validity Check: rank range. // Validity Check: rank range.
PADDLE_ENFORCE_LE(output_size, 6, PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6."); platform::errors::InvalidArgument(
"The output tensor's rank should be less than 6."));
for (int axis : unsqz_dims) { for (int axis : unsqz_dims) {
int cur = axis < 0 ? axis + cur_output_size + 1 : axis; int cur = axis < 0 ? axis + cur_output_size + 1 : axis;
// Vaildity Check: the axis bound // Vaildity Check: the axis bound
PADDLE_ENFORCE_GE(cur, 0); PADDLE_ENFORCE_GE(cur, 0, platform::errors::InvalidArgument(
PADDLE_ENFORCE_LE(cur, cur_output_size); "The insert dimension value should "
"not be less than 0"));
PADDLE_ENFORCE_LE(cur, cur_output_size,
platform::errors::InvalidArgument(
"The insert dimension value shoud not be larger "
"than the dimension size of input tensor"));
// Move old axis, and insert new axis // Move old axis, and insert new axis
for (int i = cur_output_size; i >= cur; --i) { for (int i = cur_output_size; i >= cur; --i) {
if (output_shape[i] == 1) { if (output_shape[i] == 1) {
...@@ -151,13 +168,17 @@ class UnsqueezeOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -151,13 +168,17 @@ class UnsqueezeOpMaker : public framework::OpProtoAndCheckerMaker {
.AddCustomChecker([](const std::vector<int> &axes) { .AddCustomChecker([](const std::vector<int> &axes) {
// Validity Check: axes dims (<6). // Validity Check: axes dims (<6).
PADDLE_ENFORCE_LT(static_cast<int>(axes.size()), 6, PADDLE_ENFORCE_LT(static_cast<int>(axes.size()), 6,
"Invalid dimensions, dynamic dimensions should be " platform::errors::InvalidArgument(
"within [1, 6] dimensions (Eigen limit)."); "Invalid "
"dimensions, dynamic dimensions should be "
"within [1, 6] dimensions (Eigen limit)."));
// Validity Check: the range of unsqueeze axis. // Validity Check: the range of unsqueeze axis.
for (int axis : axes) { for (int axis : axes) {
PADDLE_ENFORCE_LT(axis, 6, PADDLE_ENFORCE_LT(axis, 6,
"Invalid dimensions, input axis should be" platform::errors::InvalidArgument(
" within [1, 6] dimensions (Eigen limit)."); "Invalid "
"dimensions, input axis should be"
"within [1, 6] dimensions (Eigen limit)."));
} }
}); });
AddComment(R"DOC( AddComment(R"DOC(
...@@ -219,7 +240,8 @@ class Unsqueeze2Op : public UnsqueezeOp { ...@@ -219,7 +240,8 @@ class Unsqueeze2Op : public UnsqueezeOp {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->HasOutput("XShape"), true, ctx->HasOutput("XShape"), true,
"Output(XShape) of Unsqueeze operator should not be null."); platform::errors::InvalidArgument("Output(XShape) of Unsqueeze "
"operator should not be null."));
std::vector<int64_t> xshape_dims(x_dims.size() + 1); std::vector<int64_t> xshape_dims(x_dims.size() + 1);
xshape_dims[0] = 0; xshape_dims[0] = 0;
for (int i = 0; i < x_dims.size(); ++i) { for (int i = 0; i < x_dims.size(); ++i) {
...@@ -259,10 +281,12 @@ class Unsqueeze2GradOp : public framework::OperatorWithKernel { ...@@ -259,10 +281,12 @@ class Unsqueeze2GradOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *context) const override { void InferShape(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE_EQ(context->HasInput("XShape"), true, PADDLE_ENFORCE_EQ(
"Input(XShape) shouldn't be null."); context->HasInput("XShape"), true,
platform::errors::InvalidArgument("Input(XShape) shouldn't be null."));
PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true, PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) shouldn't be null."); platform::errors::InvalidArgument(
"Input(Out@GRAD) shouldn't be null."));
auto xshape_dims = context->GetInputDim("XShape"); auto xshape_dims = context->GetInputDim("XShape");
auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
context->SetOutputDim(framework::GradVarName("X"), x_dims); context->SetOutputDim(framework::GradVarName("X"), x_dims);
......
...@@ -66,13 +66,20 @@ class UnsqueezeKernel : public framework::OpKernel<T> { ...@@ -66,13 +66,20 @@ class UnsqueezeKernel : public framework::OpKernel<T> {
// Validity Check: rank range. // Validity Check: rank range.
PADDLE_ENFORCE_LE(output_size, 6, PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6."); platform::errors::InvalidArgument(
"The output "
"tensor's rank should be less than 6."));
for (int axis : unsqz_dims) { for (int axis : unsqz_dims) {
int cur = axis < 0 ? axis + cur_output_size + 1 : axis; int cur = axis < 0 ? axis + cur_output_size + 1 : axis;
// Vaildity Check: the axis bound // Vaildity Check: the axis bound
PADDLE_ENFORCE_GE(cur, 0); PADDLE_ENFORCE_GE(cur, 0, platform::errors::InvalidArgument(
PADDLE_ENFORCE_LE(cur, cur_output_size); "The insert dimension value should "
"not be less than 0"));
PADDLE_ENFORCE_LE(cur, cur_output_size,
platform::errors::InvalidArgument(
"The insert dimension value shoule not be larger "
"than the dimension size of input tensor"));
// Move old axis, and insert new axis // Move old axis, and insert new axis
for (int i = cur_output_size; i >= cur; --i) { for (int i = cur_output_size; i >= cur; --i) {
if (output_shape[i] == 1) { if (output_shape[i] == 1) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册