未验证 提交 05c9642d 编写于 作者: S suytingwan 提交者: GitHub

Update paddle enforce message (#24498)

* test=develop error message update
上级 9f83f0fe
......@@ -28,39 +28,61 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Weight"),
"Input(Weight) should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::InvalidArgument("Input(X) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Y"), true,
platform::errors::InvalidArgument("Input(Y) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Weight"), true,
platform::errors::InvalidArgument("Input(Weight) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument("Output(Out) should not be null."));
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
auto weight_dims = ctx->GetInputDim("Weight");
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "The input(X) must be a 2D Tensor.");
PADDLE_ENFORCE_EQ(y_dims.size(), 2UL, "The input(Y) must be a 2D Tensor.");
PADDLE_ENFORCE_EQ(
x_dims.size(), 2UL,
platform::errors::InvalidArgument("The input(X) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(
y_dims.size(), 2UL,
platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL,
"The input(Weight) must be a 3D tensor.");
platform::errors::InvalidArgument(
"The input(Weight) must be a 3D tensor."));
if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) {
PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0],
"The first dimension(batch_size) of input(X) must be "
"equal to the first dimension of the input(Y).");
PADDLE_ENFORCE_EQ(
x_dims[0], y_dims[0],
platform::errors::InvalidArgument(
"The first dimension(batch_size) of input(X) must be "
"equal to the first dimension of the input(Y)."));
}
PADDLE_ENFORCE_EQ(x_dims[1], weight_dims[1],
"The second dimension of input(X) must be equal to "
"the second dimension of the input(Weight).");
platform::errors::InvalidArgument(
"The second dimension of input(X) must be equal to "
"the second dimension of the input(Weight)."));
PADDLE_ENFORCE_EQ(y_dims[1], weight_dims[2],
"The second dimension of input(Y) must be equal to "
"the third dimension of the input(Weight).");
platform::errors::InvalidArgument(
"The second dimension of input(Y) must be equal to "
"the third dimension of the input(Weight)."));
if (ctx->HasInput("Bias")) {
auto bias_dims = ctx->GetInputDim("Bias");
PADDLE_ENFORCE(bias_dims.size() == 2UL && bias_dims[0] == 1UL,
"The Input(Bias) must be a 2-D tensor with "
"the 2nd dimension fixed to 1 (a row vector).");
PADDLE_ENFORCE_EQ(bias_dims.size(), 2UL,
platform::errors::InvalidArgument(
"The Input(Bias) must be a 2-D tensor with "
"the 2nd dimension fixed to 1 (a row vector)."));
PADDLE_ENFORCE_EQ(bias_dims[0], 1UL,
platform::errors::InvalidArgument(
"The Input(Bias) must be a 2-D tensor with "
"the 2nd dimension fixed to 1 (a row vector)."));
PADDLE_ENFORCE_EQ(bias_dims[1], weight_dims[0],
"The second dimension of input(Bias) must be equal "
"to the first dimension of the input(Weight).");
platform::errors::InvalidArgument(
"The second dimension of input(Bias) must be equal "
"to the first dimension of the input(Weight)."));
}
ctx->SetOutputDim("Out", {x_dims[0], weight_dims[0]});
......@@ -104,27 +126,36 @@ class BilinearTensorProductOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Weight"),
"Input(Weight) should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::InvalidArgument("Input(X) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Y"), true,
platform::errors::InvalidArgument("Input(Y) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Weight"), true,
platform::errors::InvalidArgument("Input(Weight) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null."));
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
auto weight_dims = ctx->GetInputDim("Weight");
auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
PADDLE_ENFORCE_EQ(out_dims.size(), 2UL,
"The input(Out@GRAD) must be a 2D Tensor.");
platform::errors::InvalidArgument(
"The input(Out@GRAD) must be a 2D Tensor."));
PADDLE_ENFORCE_EQ(
x_dims[0], out_dims[0],
"The first dimension(batch_size) of input(Out@GRAD) must be "
"equal to the first dimension of the Input(X).");
platform::errors::InvalidArgument(
"The first dimension(batch_size) of input(Out@GRAD) must be "
"equal to the first dimension of the Input(X)."));
PADDLE_ENFORCE_EQ(
weight_dims[0], out_dims[1],
"The second dimension of input(Out@GRAD) must be equal to "
"the third dimension of the Input(Weight).");
platform::errors::InvalidArgument(
"The second dimension of input(Out@GRAD) must be equal to "
"the third dimension of the Input(Weight)."));
auto bias_grad_name = framework::GradVarName("Bias");
if (ctx->HasOutput(bias_grad_name)) {
......
......@@ -22,16 +22,23 @@ class AnchorGeneratorOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"),
"Input(Input) of AnchorGeneratorOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Anchors"),
"Output(Anchors) of AnchorGeneratorOp should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("Variances"),
"Output(Variances) of AnchorGeneratorOp should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("Input"), true,
platform::errors::InvalidArgument(
"Input(Input) of AnchorGeneratorOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Anchors"), true,
platform::errors::InvalidArgument(
"Output(Anchors) of AnchorGeneratorOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Variances"), true,
platform::errors::InvalidArgument(
"Output(Variances) of AnchorGeneratorOp should not be null."));
auto input_dims = ctx->GetInputDim("Input");
PADDLE_ENFORCE(input_dims.size() == 4, "The layout of input is NCHW.");
PADDLE_ENFORCE_EQ(
input_dims.size(), 4,
platform::errors::InvalidArgument("The layout of input is NCHW."));
auto anchor_sizes = ctx->Attrs().Get<std::vector<float>>("anchor_sizes");
auto aspect_ratios = ctx->Attrs().Get<std::vector<float>>("aspect_ratios");
......@@ -87,10 +94,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
"equals to 64**2.")
.AddCustomChecker([](const std::vector<float>& anchor_sizes) {
PADDLE_ENFORCE_GT(anchor_sizes.size(), 0UL,
"Size of anchor_sizes must be at least 1.");
platform::errors::InvalidArgument(
"Size of anchor_sizes must be at least 1."));
for (size_t i = 0; i < anchor_sizes.size(); ++i) {
PADDLE_ENFORCE_GT(anchor_sizes[i], 0.0,
"anchor_sizes[%d] must be positive.", i);
platform::errors::InvalidArgument(
"anchor_sizes[%d] must be positive.", i));
}
});
AddAttr<std::vector<float>>(
......@@ -105,10 +114,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
"in box regression deltas")
.AddCustomChecker([](const std::vector<float>& variances) {
PADDLE_ENFORCE_EQ(variances.size(), 4UL,
"Must and only provide 4 variance.");
platform::errors::InvalidArgument(
"Must provide 4 variance only."));
for (size_t i = 0; i < variances.size(); ++i) {
PADDLE_ENFORCE_GT(variances[i], 0.0,
"variance[%d] must be greater than 0.", i);
platform::errors::InvalidArgument(
"variance[%d] must be greater than 0.", i));
}
});
......@@ -119,10 +130,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker {
.AddCustomChecker([](const std::vector<float>& stride) {
PADDLE_ENFORCE_EQ(
stride.size(), 2UL,
"Must and only provide 2 stride for width and height.");
platform::errors::InvalidArgument(
"Must provide 2 stride for width and height only."));
for (size_t i = 0; i < stride.size(); ++i) {
PADDLE_ENFORCE_GT(stride[i], 0.0,
"stride[%d] should be larger than 0.", i);
platform::errors::InvalidArgument(
"stride[%d] should be larger than 0.", i));
}
});
......
......@@ -26,17 +26,23 @@ class BipartiteMatchOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("DistMat"),
"Input(DistMat) of BipartiteMatch should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("ColToRowMatchIndices"),
"Output(ColToRowMatchIndices) of BipartiteMatch should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("ColToRowMatchDist"),
"Output(ColToRowMatchDist) of BipartiteMatch should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("DistMat"), true,
platform::errors::InvalidArgument(
"Input(DistMat) of BipartiteMatch should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("ColToRowMatchIndices"), true,
platform::errors::InvalidArgument(
"Output(ColToRowMatchIndices) of BipartiteMatch "
"should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("ColToRowMatchDist"), true,
platform::errors::InvalidArgument(
"Output(ColToRowMatchDist) of BipartiteMatch should not be null."));
auto dims = ctx->GetInputDim("DistMat");
PADDLE_ENFORCE_EQ(dims.size(), 2, "The rank of Input(DistMat) must be 2.");
PADDLE_ENFORCE_EQ(dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(DistMat) must be 2."));
ctx->SetOutputDim("ColToRowMatchIndices", dims);
ctx->SetOutputDim("ColToRowMatchDist", dims);
......@@ -64,7 +70,9 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
// The match_dist must be initialized to 0 at first.
void BipartiteMatch(const Tensor& dist, int* match_indices,
T* match_dist) const {
PADDLE_ENFORCE_EQ(dist.dims().size(), 2, "The rank of dist must be 2.");
PADDLE_ENFORCE_EQ(
dist.dims().size(), 2,
platform::errors::InvalidArgument("The rank of dist must be 2."));
int64_t row = dist.dims()[0];
int64_t col = dist.dims()[1];
auto* dist_data = dist.data<T>();
......@@ -127,7 +135,11 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
// Cannot find good match.
break;
} else {
PADDLE_ENFORCE_EQ(match_indices[max_idx], -1);
PADDLE_ENFORCE_EQ(
match_indices[max_idx], -1,
platform::errors::InvalidArgument(
"The match_indices must be initialized to -1 at [%d].",
max_idx));
match_indices[max_idx] = max_row_idx;
match_dist[max_idx] = max_dist;
// Erase the row index.
......@@ -163,7 +175,10 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
}
}
if (max_row_idx != -1) {
PADDLE_ENFORCE_EQ(match_indices[j], -1);
PADDLE_ENFORCE_EQ(
match_indices[j], -1,
platform::errors::InvalidArgument(
"The match_indices must be initialized to -1 at [%d].", j));
match_indices[j] = max_row_idx;
match_dist[j] = max_dist;
}
......@@ -183,8 +198,9 @@ class BipartiteMatchKernel : public framework::OpKernel<T> {
? 1
: static_cast<int64_t>(dist_mat->lod().back().size() - 1);
if (dist_mat->lod().size()) {
PADDLE_ENFORCE_EQ(dist_mat->lod().size(), 1UL,
"Only support 1 level of LoD.");
PADDLE_ENFORCE_EQ(
dist_mat->lod().size(), 1UL,
platform::errors::InvalidArgument("Only support 1 level of LoD."));
}
match_indices->mutable_data<int>({n, col}, context.GetPlace());
match_dist->mutable_data<T>({n, col}, context.GetPlace());
......
......@@ -40,35 +40,49 @@ class GenerateMaskLabelsOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("ImInfo"), "Input(ImInfo) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("GtClasses"),
"Input(GtClasses) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("IsCrowd"),
"Input(IsCrowd) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("GtSegms"),
"Input(GtSegms) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("Rois"), "Input(Rois) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("LabelsInt32"),
"Input(LabelsInt32) shouldn't be null.");
PADDLE_ENFORCE(
ctx->HasOutput("MaskRois"),
"Output(MaskRois) of GenerateMaskLabelsOp should not be null");
PADDLE_ENFORCE(
ctx->HasOutput("RoiHasMaskInt32"),
"Output(RoiHasMaskInt32) of GenerateMaskLabelsOp should not be null");
PADDLE_ENFORCE(
ctx->HasOutput("MaskInt32"),
"Output(MaskInt32) of GenerateMaskLabelsOp should not be null");
PADDLE_ENFORCE_EQ(
ctx->HasInput("ImInfo"), true,
platform::errors::InvalidArgument("Input(ImInfo) shouldn't be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("GtClasses"), true,
platform::errors::InvalidArgument(
"Input(GtClasses) shouldn't be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("IsCrowd"), true,
platform::errors::InvalidArgument("Input(IsCrowd) shouldn't be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("GtSegms"), true,
platform::errors::InvalidArgument("Input(GtSegms) shouldn't be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Rois"), true,
platform::errors::InvalidArgument("Input(Rois) shouldn't be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("LabelsInt32"), true,
platform::errors::InvalidArgument(
"Input(LabelsInt32) shouldn't be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("MaskRois"), true,
platform::errors::InvalidArgument(
"Output(MaskRois) of GenerateMaskLabelsOp should not be null"));
PADDLE_ENFORCE_EQ(ctx->HasOutput("RoiHasMaskInt32"), true,
platform::errors::InvalidArgument(
"Output(RoiHasMaskInt32) of GenerateMaskLabelsOp "
"should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("MaskInt32"), true,
platform::errors::InvalidArgument(
"Output(MaskInt32) of GenerateMaskLabelsOp should not be null"));
auto im_info_dims = ctx->GetInputDim("ImInfo");
auto gt_segms_dims = ctx->GetInputDim("GtSegms");
PADDLE_ENFORCE_EQ(im_info_dims.size(), 2,
"The rank of Input(ImInfo) must be 2.");
platform::errors::InvalidArgument(
"The rank of Input(ImInfo) must be 2."));
PADDLE_ENFORCE_EQ(gt_segms_dims.size(), 2,
"The rank of Input(GtSegms) must be 2.");
platform::errors::InvalidArgument(
"The rank of Input(GtSegms) must be 2."));
PADDLE_ENFORCE_EQ(gt_segms_dims[1], 2,
"The second dim of Input(GtSegms) must be 2.");
platform::errors::InvalidArgument(
"The second dim of Input(GtSegms) must be 2."));
int num_classes = ctx->Attrs().Get<int>("num_classes");
int resolution = ctx->Attrs().Get<int>("resolution");
......@@ -134,7 +148,11 @@ std::vector<Tensor> SampleMaskForOneImage(
const int* gt_classes_data = gt_classes.data<int>();
const int* is_crowd_data = is_crowd.data<int>();
const int* label_int32_data = label_int32.data<int>();
PADDLE_ENFORCE_EQ(roi_size, label_int32.dims()[0]);
PADDLE_ENFORCE_EQ(roi_size, label_int32.dims()[0],
platform::errors::InvalidArgument(
"The first dim of label [%d] is the different from "
"roi_size [%d], they should be same.",
label_int32.dims()[0], roi_size));
std::vector<int> mask_gt_inds, fg_inds;
std::vector<std::vector<std::vector<T>>> gt_polys;
......@@ -155,7 +173,12 @@ std::vector<Tensor> SampleMaskForOneImage(
for (int j = 0; j < poly_num; ++j) {
int s = lod2[s_idx + j];
int e = lod2[s_idx + j + 1];
PADDLE_ENFORCE_NE(s, e);
PADDLE_ENFORCE_NE(s, e,
platform::errors::InvalidArgument(
"The start point and the end point in the poly "
"segment [%d] should not be same, but received "
"the start point [%d] and the end point [%d].",
i, s, e));
std::vector<T> plts(polys_data + s * 2, polys_data + e * 2);
polys.push_back(plts);
}
......@@ -295,19 +318,34 @@ class GenerateMaskLabelsKernel : public framework::OpKernel<T> {
int num_classes = ctx.Attr<int>("num_classes");
int resolution = ctx.Attr<int>("resolution");
PADDLE_ENFORCE_EQ(gt_classes->lod().size(), 1UL,
"GenerateMaskLabelsOp gt_classes needs 1 level of LoD");
PADDLE_ENFORCE_EQ(is_crowd->lod().size(), 1UL,
"GenerateMaskLabelsOp is_crowd needs 1 level of LoD");
PADDLE_ENFORCE_EQ(
gt_classes->lod().size(), 1UL,
platform::errors::InvalidArgument(
"GenerateMaskLabelsOp gt_classes needs 1 level of LoD"));
PADDLE_ENFORCE_EQ(
is_crowd->lod().size(), 1UL,
platform::errors::InvalidArgument(
"GenerateMaskLabelsOp is_crowd needs 1 level of LoD"));
PADDLE_ENFORCE_EQ(rois->lod().size(), 1UL,
"GenerateMaskLabelsOp rois needs 1 level of LoD");
PADDLE_ENFORCE_EQ(label_int32->lod().size(), 1UL,
"GenerateMaskLabelsOp label_int32 needs 1 level of LoD");
PADDLE_ENFORCE_EQ(gt_segms->lod().size(), 3UL);
platform::errors::InvalidArgument(
"GenerateMaskLabelsOp rois needs 1 level of LoD"));
PADDLE_ENFORCE_EQ(
label_int32->lod().size(), 1UL,
platform::errors::InvalidArgument(
"GenerateMaskLabelsOp label_int32 needs 1 level of LoD"));
PADDLE_ENFORCE_EQ(
gt_segms->lod().size(), 3UL,
platform::errors::InvalidArgument(
"GenerateMaskLabelsOp gt_segms needs 3 level of LoD"));
int64_t n = static_cast<int64_t>(gt_classes->lod().back().size() - 1);
PADDLE_ENFORCE_EQ(gt_segms->lod()[0].size() - 1, n);
PADDLE_ENFORCE_EQ(
gt_segms->lod()[0].size() - 1, n,
platform::errors::InvalidArgument(
"Batchsize of Input(gt_segms) and Input(gt_classes) should be "
"same, but received gt_segms[%d], gt_classes[%d].",
gt_segms->lod()[0].size() - 1, n));
int mask_dim = num_classes * resolution * resolution;
int roi_num = rois->lod().back()[n];
......
......@@ -22,29 +22,41 @@ class TargetAssignOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of TargetAssignOp should not be null");
PADDLE_ENFORCE(ctx->HasInput("MatchIndices"),
"Input(MatchIndices) of TargetAssignOp should not be null");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of TargetAssignOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("OutWeight"),
"Output(OutWeight) of TargetAssignOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of TargetAssignOp should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasInput("MatchIndices"), true,
platform::errors::InvalidArgument(
"Input(MatchIndices) of TargetAssignOp should not be null"));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of TargetAssignOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("OutWeight"), true,
platform::errors::InvalidArgument(
"Output(OutWeight) of TargetAssignOp should not be null."));
auto in_dims = ctx->GetInputDim("X");
auto mi_dims = ctx->GetInputDim("MatchIndices");
PADDLE_ENFORCE_EQ(in_dims.size(), 3, "The rank of Input(X) must be 3.");
PADDLE_ENFORCE_EQ(
in_dims.size(), 3,
platform::errors::InvalidArgument("The rank of Input(X) must be 3."));
PADDLE_ENFORCE_EQ(mi_dims.size(), 2,
"The rank of Input(MatchIndices) must be 2.");
platform::errors::InvalidArgument(
"The rank of Input(MatchIndices) must be 2."));
if (ctx->HasInput("NegIndices")) {
auto neg_dims = ctx->GetInputDim("NegIndices");
PADDLE_ENFORCE_EQ(neg_dims.size(), 2,
"The rank of Input(NegIndices) must be 2.");
PADDLE_ENFORCE_EQ(neg_dims[1], 1,
"The last dimension of Out(NegIndices) must be 1.");
platform::errors::InvalidArgument(
"The rank of Input(NegIndices) must be 2."));
PADDLE_ENFORCE_EQ(
neg_dims[1], 1,
platform::errors::InvalidArgument(
"The last dimension of Out(NegIndices) must be 1."));
}
auto n = mi_dims[0];
......
......@@ -90,7 +90,9 @@ class TargetAssignKernel : public framework::OpKernel<T> {
auto* out = ctx.Output<framework::Tensor>("Out");
auto* out_wt = ctx.Output<framework::Tensor>("OutWeight");
PADDLE_ENFORCE_EQ(x->lod().size(), 1UL);
PADDLE_ENFORCE_EQ(x->lod().size(), 1UL,
platform::errors::InvalidArgument(
"TargetAssignOp input(X) needs 1 level of LoD"));
int mismatch_value = ctx.Attr<int>("mismatch_value");
const T* x_data = x->data<T>();
......@@ -121,7 +123,10 @@ class TargetAssignKernel : public framework::OpKernel<T> {
auto* neg_indices = ctx.Input<framework::LoDTensor>("NegIndices");
if (neg_indices) {
PADDLE_ENFORCE_EQ(neg_indices->lod().size(), 1UL);
PADDLE_ENFORCE_EQ(
neg_indices->lod().size(), 1UL,
platform::errors::InvalidArgument(
"TargetAssignOp input(NegIndices) needs 1 level of LoD"));
const int* neg_idx_data = neg_indices->data<int>();
auto neg_lod = neg_indices->lod().back();
#if defined(PADDLE_WITH_CUDA)
......
......@@ -24,19 +24,25 @@ class FilterByInstagOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Ins"), true,
"Input(Ins) should be not null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("Ins"), true,
platform::errors::InvalidArgument("Input(Ins) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Ins_tag"), true,
"Input(Ins_tag) should be not null.");
platform::errors::InvalidArgument(
"Input(Ins_tag) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Filter_tag"), true,
"Input(Filter_tag) should be not null.");
platform::errors::InvalidArgument(
"Input(Filter_tag) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) should be not null.");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument("Output(Out) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("LossWeight"), true,
"Output(LossWeight) shoudl not be null.");
platform::errors::InvalidArgument(
"Output(LossWeight) shoudl not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("IndexMap"), true,
"Output(IndexMap) should be not null.");
platform::errors::InvalidArgument(
"Output(IndexMap) should be not null."));
auto x1_dims = ctx->GetInputDim("Ins"); // batch_size * vec
......@@ -85,15 +91,20 @@ class FilterByInstagOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("IndexMap"), true,
"Input(IndexMap) should be not null");
platform::errors::InvalidArgument(
"Input(IndexMap) should be not null"));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
"Grad Input(Out) should be not null");
PADDLE_ENFORCE_EQ(ctx->HasInput("Ins"), true,
"Input(Ins) should be not null");
platform::errors::InvalidArgument(
"Grad Input(Out) should be not null"));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Ins"), true,
platform::errors::InvalidArgument("Input(Ins) should be not null"));
PADDLE_ENFORCE_EQ(ctx->HasInput("LossWeight"), true,
"Input(LossWeight) should be not null");
platform::errors::InvalidArgument(
"Input(LossWeight) should be not null"));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Ins")), true,
"Grad Output(Ins) should be not null");
platform::errors::InvalidArgument(
"Grad Output(Ins) should be not null"));
auto grad_out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
auto x1_dims = ctx->GetInputDim("Ins");
......
......@@ -51,11 +51,19 @@ struct OneHotV2OpFunctor {
}
} else {
for (int i = 0; i < numel; ++i) {
PADDLE_ENFORCE_GE(p_in_data[i], 0,
"Illegal index value, should be at least 0.");
PADDLE_ENFORCE_GE(
p_in_data[i], 0,
platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be at least 0, "
"but received input (%d) less than 0",
p_in_data[i]));
PADDLE_ENFORCE_LT(
p_in_data[i], depth_,
"Illegal index value, should be less than depth (%d).", depth_);
platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be less than "
"Input(depth), "
"but received input (%d) not less than depth (%d)",
p_in_data[i], depth_));
*(p_out_data + i * depth_ + p_in_data[i]) = 1.0;
}
}
......
......@@ -29,12 +29,15 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of Pooling should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of Pooling should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Mask"),
"Output(Mask) of Pooling should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of Pooling should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of Pooling should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Mask"), true,
platform::errors::InvalidArgument(
"Output(Mask) of Pooling should not be null."));
auto in_x_dims = ctx->GetInputDim("X");
......@@ -54,12 +57,16 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
}
}
PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U,
"Input size and pooling size should be consistent.");
PADDLE_ENFORCE_EQ(in_x_dims.size() - ksize.size(), 2U,
platform::errors::InvalidArgument(
"Input size and pooling size should be consistent."));
PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
"Strides size and pooling size should be the same.");
PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(),
"Paddings size and pooling size should be the same.");
platform::errors::InvalidArgument(
"Strides size and pooling size should be the same."));
PADDLE_ENFORCE_EQ(
ksize.size(), paddings.size(),
platform::errors::InvalidArgument(
"Paddings size and pooling size should be the same."));
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
if (adaptive) {
......@@ -90,15 +97,16 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput("Mask"), true,
platform::errors::NotFound("Input(Mask) must not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::NotFound("Input(X) must not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::NotFound("Input(Out@GRAD) should not be null."));
platform::errors::InvalidArgument("Input(Mask) must not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::NotFound("Output(X@GRAD) should not be null."));
ctx->HasInput("X"), true,
platform::errors::InvalidArgument("Input(X) must not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::InvalidArgument(
"Output(X@GRAD) should not be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
......
......@@ -81,43 +81,57 @@ class PSROIPoolOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of PSROIPoolOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("ROIs"),
"Input(ROIs) of PSROIPoolOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of PSROIPoolOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of PSROIPoolOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("ROIs"), true,
platform::errors::InvalidArgument(
"Input(ROIs) of PSROIPoolOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of PSROIPoolOp should not be null."));
auto input_dims = ctx->GetInputDim("X");
auto rois_dims = ctx->GetInputDim("ROIs");
PADDLE_ENFORCE(input_dims.size() == 4,
"The format of input tensor is NCHW");
PADDLE_ENFORCE(rois_dims.size() == 2,
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]");
PADDLE_ENFORCE(rois_dims[1] == 4,
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]");
PADDLE_ENFORCE_EQ(input_dims.size(), 4,
platform::errors::InvalidArgument(
"The format of input tensor is NCHW"));
PADDLE_ENFORCE_EQ(
rois_dims.size(), 2,
platform::errors::InvalidArgument(
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]"));
PADDLE_ENFORCE_EQ(
rois_dims[1], 4,
platform::errors::InvalidArgument(
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]"));
int pooled_height = ctx->Attrs().Get<int>("pooled_height");
int pooled_width = ctx->Attrs().Get<int>("pooled_width");
int output_channels = ctx->Attrs().Get<int>("output_channels");
float spatial_scale = ctx->Attrs().Get<float>("spatial_scale");
PADDLE_ENFORCE(
input_dims[1] == output_channels * pooled_height * pooled_width,
"the channel of X(%d) should be equal to the product of "
"output_channels(%d), pooled_height(%d) and pooled_width(%d)",
input_dims[1], output_channels, pooled_height, pooled_width);
PADDLE_ENFORCE_EQ(
input_dims[1], output_channels * pooled_height * pooled_width,
platform::errors::InvalidArgument(
"the channel of X(%d) "
"should be equal to the product of "
"output_channels(%d), pooled_height(%d) and pooled_width(%d)",
input_dims[1], output_channels, pooled_height, pooled_width));
PADDLE_ENFORCE_GT(pooled_height, 0,
"The pooled output height must be greater than 0");
platform::errors::InvalidArgument(
"The pooled output height must be greater than 0"));
PADDLE_ENFORCE_GT(pooled_width, 0,
"The pooled output width must be greater than 0");
platform::errors::InvalidArgument(
"The pooled output width must be greater than 0"));
PADDLE_ENFORCE_GT(output_channels, 1,
"The pooled output channels must greater than 1");
platform::errors::InvalidArgument(
"The pooled output channels must greater than 1"));
PADDLE_ENFORCE_GT(spatial_scale, 0.0f,
"The spatial scale must greater than 0.");
platform::errors::InvalidArgument(
"The spatial scale must greater than 0."));
auto out_dims = input_dims;
out_dims[0] = rois_dims[0];
......@@ -142,10 +156,12 @@ class PSROIPoolGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"The gradient of Out should not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"The gradient of X should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"The gradient of Out should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::InvalidArgument(
"The gradient of X should not be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
......
......@@ -54,15 +54,19 @@ class CPUPSROIPoolOpKernel : public framework::OpKernel<T> {
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"the rois_batch_size and input(X) batch_size should be the same.");
platform::errors::InvalidArgument("the rois_batch_size and input(X) "
"batch_size should be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num_with_lod, rois_num,
"the rois_num from input and lod must be the same");
platform::errors::InvalidArgument(
"the rois_num from input and lod must be the same"));
PADDLE_ENFORCE_EQ(input_channels,
output_channels * pooled_height * pooled_width,
"the channels of input X should equal the product of "
"output_channels x pooled_height x pooled_width");
platform::errors::InvalidArgument(
"the channels of input "
"X should equal the product of "
"output_channels x pooled_height x pooled_width"));
// calculate batch id index for each roi according to LoD
for (int n = 0; n < rois_batch_size; ++n) {
......
......@@ -36,7 +36,10 @@ class ROIPoolOp : public framework::OperatorWithKernel {
if (ctx->HasInput("RoisLod")) {
auto rois_lod_dims = ctx->GetInputDim("RoisLod");
PADDLE_ENFORCE(rois_lod_dims.size() == 1, "");
PADDLE_ENFORCE_EQ(rois_lod_dims.size(), 1,
platform::errors::InvalidArgument(
"The lod information tensor of ROIs should "
"be one-dimensional"));
}
PADDLE_ENFORCE_EQ(input_dims.size(), 4,
platform::errors::InvalidArgument(
......
......@@ -63,7 +63,8 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> {
rois_batch_size = rois_lod_t->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size - 1, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
platform::errors::InvalidArgument("The rois_batch_size and imgs "
"batch_size must be the same."));
auto* rois_lod = rois_lod_t->data<int64_t>();
for (int n = 0; n < rois_batch_size - 1; ++n) {
for (int i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
......@@ -75,10 +76,13 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> {
rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
platform::errors::InvalidArgument("The rois_batch_size and imgs "
"batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument("The rois_num from input "
"and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
......
......@@ -34,21 +34,30 @@ class SoftmaxOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SoftmaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SoftmaxOp should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::NotFound("Input(X) of SoftmaxOp is not found."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::NotFound("Output(Out) of SoftmaxOp is not found."));
auto dim_x = ctx->GetInputDim("X");
auto rank_x = dim_x.size();
auto axis = ctx->Attrs().Get<int>("axis");
PADDLE_ENFORCE(axis >= -rank_x && axis < rank_x,
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X).");
PADDLE_ENFORCE_GE(axis, -rank_x,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."));
PADDLE_ENFORCE_LT(axis, rank_x,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."));
auto use_cudnn = ctx->Attrs().Get<bool>("use_cudnn");
if (axis != rank_x - 1 && axis != -1) {
PADDLE_ENFORCE(!use_cudnn, "CUDNN kernel only support axis as -1.");
PADDLE_ENFORCE_EQ(use_cudnn, false,
platform::errors::InvalidArgument(
"CUDNN kernel only support axis as -1."));
}
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
......@@ -78,8 +87,9 @@ class SoftmaxOp : public framework::OperatorWithKernel {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
if (input_data_type == framework::proto::VarType::FP16) {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"float16 can only be used on GPU place");
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument(
"float16 can only be used on GPU place"));
}
return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
......@@ -157,12 +167,17 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) should be not null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should be not null.");
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Out"),
ctx->GetInputDim(framework::GradVarName("Out")),
"Input(Out) and its gradients should have a same shape.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("Out"), true,
platform::errors::InvalidArgument("Input(Out) is not found."));
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument("Input(Out@GRAD) is not found."));
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Out"),
ctx->GetInputDim(framework::GradVarName("Out")),
platform::errors::InvalidArgument("Input(Out) and its gradients "
"should have a same shape."));
ctx->SetOutputDim(framework::GradVarName("X"),
ctx->GetInputDim(framework::GradVarName("Out")));
......@@ -191,8 +206,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out"));
if (input_data_type == framework::proto::VarType::FP16) {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"float16 can only be used on GPU place");
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument(
"float16 can only be used on GPU place"));
}
return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
......
......@@ -108,39 +108,51 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Logits"),
"Input(Logits) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("Softmax"),
"Output(Softmax) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("Loss"), "Output(Loss) should be not null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("Logits"), true,
platform::errors::InvalidArgument("Input(Logits) should be not null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Label"), true,
platform::errors::InvalidArgument("Input(Label) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Softmax"), true,
platform::errors::InvalidArgument(
"Output(Softmax) should be not null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Loss"), true,
platform::errors::InvalidArgument("Output(Loss) should be not null."));
auto axis = ctx->Attrs().Get<int>("axis");
auto logits_dims = ctx->GetInputDim("Logits");
auto labels_dims = ctx->GetInputDim("Label");
auto logits_rank = logits_dims.size();
PADDLE_ENFORCE(axis >= -logits_rank && axis < logits_rank,
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits).");
PADDLE_ENFORCE_GE(axis, -logits_rank,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
PADDLE_ENFORCE_LT(axis, logits_rank,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
axis = CanonicalAxis(axis, logits_rank);
for (int i = 0; i < logits_rank; i++) {
if (i != axis) {
if (ctx->IsRuntime() || (logits_dims[i] > 0 && labels_dims[i] > 0)) {
PADDLE_ENFORCE_EQ(
logits_dims[i], labels_dims[i],
"Input(Logits) and Input(Label) should in same shape in "
"dimensions except axis.");
PADDLE_ENFORCE_EQ(logits_dims[i], labels_dims[i],
platform::errors::InvalidArgument(
"Input(Logits) and Input(Label) should in "
"same shape in dimensions except axis."));
}
}
}
auto numeric_stable_mode = ctx->Attrs().Get<bool>("numeric_stable_mode");
if (axis != logits_rank - 1) {
PADDLE_ENFORCE(
numeric_stable_mode,
"Attr(axis) can only be -1 when not in numeric_stable_mode.");
PADDLE_ENFORCE_EQ(numeric_stable_mode, true,
platform::errors::InvalidArgument(
"Attr(axis) can only be -1 "
"when not in numeric_stable_mode."));
}
bool soft_label = ctx->Attrs().Get<bool>("soft_label");
......@@ -148,14 +160,18 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
if (ctx->IsRuntime() ||
(logits_dims[axis] > 0 && labels_dims[axis] > 0)) {
PADDLE_ENFORCE_EQ(logits_dims[axis], labels_dims[axis],
"If Attr(soft_label) == true, the axis dimension of "
"Input(X) and Input(Label) should be equal.");
platform::errors::InvalidArgument(
"If Attr(soft_label) == true, "
"the axis dimension of "
"Input(X) and Input(Label) should be equal."));
}
} else {
if (ctx->IsRuntime() || labels_dims[axis] > 0) {
PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL,
"If Attr(soft_label) == false, the axis dimension of "
"Input(Label) should be 1.");
PADDLE_ENFORCE_EQ(
labels_dims[axis], 1UL,
platform::errors::InvalidArgument("If Attr(soft_label) == false, "
"the axis dimension of "
"Input(Label) should be 1."));
}
}
......@@ -182,21 +198,31 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")),
"Input(Loss@Grad) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Softmax"),
"Input(Softmax) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")),
"Output(Logits@Grad) should be not null.");
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Loss")), true,
platform::errors::InvalidArgument(
"Input(Loss@Grad) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Softmax"), true,
platform::errors::InvalidArgument(
"Input(Softmax) should be not null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Label"), true,
platform::errors::InvalidArgument("Input(Label) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Logits")), true,
platform::errors::InvalidArgument(
"Output(Logits@Grad) should be not null."));
auto axis = ctx->Attrs().Get<int>("axis");
auto softmax_dims = ctx->GetInputDim("Softmax");
auto labels_dims = ctx->GetInputDim("Label");
auto softmax_rank = softmax_dims.size();
PADDLE_ENFORCE(axis >= -softmax_rank && axis < softmax_rank,
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits).");
PADDLE_ENFORCE_GE(axis, -softmax_rank,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
PADDLE_ENFORCE_LT(axis, softmax_rank,
platform::errors::InvalidArgument(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(Logits)."));
axis = CanonicalAxis(axis, softmax_rank);
for (int i = 0; i < softmax_rank; i++) {
......@@ -204,8 +230,9 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
if (ctx->IsRuntime() || (softmax_dims[i] > 0 && labels_dims[i] > 0)) {
PADDLE_ENFORCE_EQ(
softmax_dims[i], labels_dims[i],
"Input(Logits) and Input(Label) should in same shape in "
"dimensions except axis.");
platform::errors::InvalidArgument(
"Input(Logits) and Input(Label) should in same shape in "
"dimensions except axis."));
}
}
}
......@@ -215,14 +242,18 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
if (ctx->IsRuntime() ||
(softmax_dims[axis] > 0 && labels_dims[axis] > 0)) {
PADDLE_ENFORCE_EQ(softmax_dims[axis], labels_dims[axis],
"If Attr(soft_label) == true, the axis dimension of "
"Input(X) and Input(Label) should be equal.");
platform::errors::InvalidArgument(
"If Attr(soft_label) == true, "
"the axis dimension of "
"Input(X) and Input(Label) should be equal."));
}
} else {
if (ctx->IsRuntime() || labels_dims[axis] > 0) {
PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL,
"If Attr(soft_label) == false, the axis dimension of "
"Input(Label) should be 1.");
PADDLE_ENFORCE_EQ(
labels_dims[axis], 1UL,
platform::errors::InvalidArgument("If Attr(soft_label) == false, "
"the axis dimension of "
"Input(Label) should be 1."));
}
}
......
......@@ -31,8 +31,9 @@ template <typename T>
class SoftmaxWithCrossEntropyKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE(platform::is_cpu_place(context.GetPlace()),
"This kernel only runs on CPU.");
PADDLE_ENFORCE_EQ(
platform::is_cpu_place(context.GetPlace()), true,
platform::errors::Unimplemented("This kernel only runs on CPU."));
const Tensor* logits = context.Input<Tensor>("Logits");
const Tensor* labels = context.Input<Tensor>("Label");
Tensor* softmax = context.Output<Tensor>("Softmax");
......
......@@ -62,15 +62,17 @@ class SppOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SppOp"
"should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SppOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of SppOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of SppOp should not be null."));
auto in_x_dims = ctx->GetInputDim("X");
int pyramid_height = ctx->Attrs().Get<int>("pyramid_height");
PADDLE_ENFORCE(in_x_dims.size() == 4,
"Spping intput must be of 4-dimensional.");
PADDLE_ENFORCE_EQ(in_x_dims.size(), 4,
platform::errors::InvalidArgument(
"Spping intput must be of 4-dimensional."));
int outlen = ((std::pow(4, pyramid_height) - 1) / (4 - 1)) * in_x_dims[1];
std::vector<int64_t> output_shape({in_x_dims[0], outlen});
ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
......@@ -81,9 +83,12 @@ class SppOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Input(X@GRAD) should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::InvalidArgument("Input(X) must not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput(framework::GradVarName("X")), true,
platform::errors::InvalidArgument("Input(X@GRAD) should not be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
};
......
......@@ -27,16 +27,22 @@ class UnsqueezeOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of Unsqueeze operator should not be null.");
platform::errors::InvalidArgument(
"Input(X) of "
"Unsqueeze operator should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of Unsqueeze operator should not be null.");
platform::errors::InvalidArgument(
"Output(Out) of "
"Unsqueeze operator should not be null."));
const auto &axes = ctx->Attrs().Get<std::vector<int>>("axes");
const auto &x_dims = ctx->GetInputDim("X");
// Validity Check: input tensor dims (<6).
PADDLE_ENFORCE_LE(x_dims.size(), 6,
"Invalid dimensions, the rank of Input(X) "
"should be in the range of [1, 6] (Eigen limit)");
platform::errors::InvalidArgument(
"Invalid "
"dimensions, the rank of Input(X) "
"should be in the range of [1, 6] (Eigen limit)"));
if (!axes.empty()) {
auto out_dims = GetOutputShape(axes, x_dims);
ctx->SetOutputDim("Out", out_dims);
......@@ -49,24 +55,29 @@ class UnsqueezeOp : public framework::OperatorWithKernel {
auto AxesTensorList = ctx->Inputs("AxesTensorList");
int output_size = x_dims.size() + static_cast<int>(AxesTensorList.size());
PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6.");
platform::errors::InvalidArgument(
"The output tensor's rank should be less than 6."));
std::vector<int> vec_out_dims(output_size, -1);
ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims));
} else if (ctx->HasInput("AxesTensor")) {
auto axes_dims = ctx->GetInputDim("AxesTensor");
PADDLE_ENFORCE_EQ(
axes_dims.size(), 1,
"Input(AxesTensor)'s dimension of Op(unsqueeze) must be 1. "
"But received AxesTensor's shape = [%s], "
"AxesTensor's dimension = %d.",
axes_dims, axes_dims.size());
PADDLE_ENFORCE_GE(axes_dims[0], 0,
"Input(AxesTensor)'s shape must be known. But received "
"AxesTensor's shape = [%s]",
axes_dims);
PADDLE_ENFORCE_EQ(axes_dims.size(), 1,
platform::errors::InvalidArgument(
"Input(AxesTensor)'s dimension of "
"Op(unsqueeze) must be 1. "
"But received AxesTensor's shape = [%s], "
"AxesTensor's dimension = %d.",
axes_dims, axes_dims.size()));
PADDLE_ENFORCE_GE(
axes_dims[0], 0,
platform::errors::InvalidArgument(
"Input(AxesTensor)'s shape must be known. But received "
"AxesTensor's shape = [%s]",
axes_dims));
int output_size = x_dims.size() + static_cast<int>(axes_dims[0]);
PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6.");
platform::errors::InvalidArgument(
"The output tensor's rank should be less than 6."));
std::vector<int> vec_out_dims(output_size, -1);
ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims));
}
......@@ -80,13 +91,19 @@ class UnsqueezeOp : public framework::OperatorWithKernel {
// Validity Check: rank range.
PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6.");
platform::errors::InvalidArgument(
"The output tensor's rank should be less than 6."));
for (int axis : unsqz_dims) {
int cur = axis < 0 ? axis + cur_output_size + 1 : axis;
// Vaildity Check: the axis bound
PADDLE_ENFORCE_GE(cur, 0);
PADDLE_ENFORCE_LE(cur, cur_output_size);
PADDLE_ENFORCE_GE(cur, 0, platform::errors::InvalidArgument(
"The insert dimension value should "
"not be less than 0"));
PADDLE_ENFORCE_LE(cur, cur_output_size,
platform::errors::InvalidArgument(
"The insert dimension value shoud not be larger "
"than the dimension size of input tensor"));
// Move old axis, and insert new axis
for (int i = cur_output_size; i >= cur; --i) {
if (output_shape[i] == 1) {
......@@ -151,13 +168,17 @@ class UnsqueezeOpMaker : public framework::OpProtoAndCheckerMaker {
.AddCustomChecker([](const std::vector<int> &axes) {
// Validity Check: axes dims (<6).
PADDLE_ENFORCE_LT(static_cast<int>(axes.size()), 6,
"Invalid dimensions, dynamic dimensions should be "
"within [1, 6] dimensions (Eigen limit).");
platform::errors::InvalidArgument(
"Invalid "
"dimensions, dynamic dimensions should be "
"within [1, 6] dimensions (Eigen limit)."));
// Validity Check: the range of unsqueeze axis.
for (int axis : axes) {
PADDLE_ENFORCE_LT(axis, 6,
"Invalid dimensions, input axis should be"
" within [1, 6] dimensions (Eigen limit).");
platform::errors::InvalidArgument(
"Invalid "
"dimensions, input axis should be"
"within [1, 6] dimensions (Eigen limit)."));
}
});
AddComment(R"DOC(
......@@ -219,7 +240,8 @@ class Unsqueeze2Op : public UnsqueezeOp {
PADDLE_ENFORCE_EQ(
ctx->HasOutput("XShape"), true,
"Output(XShape) of Unsqueeze operator should not be null.");
platform::errors::InvalidArgument("Output(XShape) of Unsqueeze "
"operator should not be null."));
std::vector<int64_t> xshape_dims(x_dims.size() + 1);
xshape_dims[0] = 0;
for (int i = 0; i < x_dims.size(); ++i) {
......@@ -259,10 +281,12 @@ class Unsqueeze2GradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE_EQ(context->HasInput("XShape"), true,
"Input(XShape) shouldn't be null.");
PADDLE_ENFORCE_EQ(
context->HasInput("XShape"), true,
platform::errors::InvalidArgument("Input(XShape) shouldn't be null."));
PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) shouldn't be null.");
platform::errors::InvalidArgument(
"Input(Out@GRAD) shouldn't be null."));
auto xshape_dims = context->GetInputDim("XShape");
auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
context->SetOutputDim(framework::GradVarName("X"), x_dims);
......
......@@ -66,13 +66,20 @@ class UnsqueezeKernel : public framework::OpKernel<T> {
// Validity Check: rank range.
PADDLE_ENFORCE_LE(output_size, 6,
"The output tensor's rank should be less than 6.");
platform::errors::InvalidArgument(
"The output "
"tensor's rank should be less than 6."));
for (int axis : unsqz_dims) {
int cur = axis < 0 ? axis + cur_output_size + 1 : axis;
// Vaildity Check: the axis bound
PADDLE_ENFORCE_GE(cur, 0);
PADDLE_ENFORCE_LE(cur, cur_output_size);
PADDLE_ENFORCE_GE(cur, 0, platform::errors::InvalidArgument(
"The insert dimension value should "
"not be less than 0"));
PADDLE_ENFORCE_LE(cur, cur_output_size,
platform::errors::InvalidArgument(
"The insert dimension value shoule not be larger "
"than the dimension size of input tensor"));
// Move old axis, and insert new axis
for (int i = cur_output_size; i >= cur; --i) {
if (output_shape[i] == 1) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册