未验证 提交 27dee221 编写于 作者: Y Yibing Liu 提交者: GitHub

Format error message for ops (#24482) (#24532)

* Format error message for ops, test=release/1.8

* Fix check in sequence_expand, test=release/1.8
......@@ -89,41 +89,57 @@ class CRFDecodingOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Emission"), true,
"Input(Emission) should be not null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("Transition"), true,
"Input(Transition) should be not null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("ViterbiPath"), true,
"Output(ViterbiPath) should be not null.");
OP_INOUT_CHECK(ctx->HasInput("Emission"), "Input", "Emission",
"CRFDecoding");
OP_INOUT_CHECK(ctx->HasInput("Transition"), "Input", "Transition",
"CRFDecoding");
OP_INOUT_CHECK(ctx->HasOutput("ViterbiPath"), "Output", "ViterbiPath",
"CRFDecoding");
auto emission_dims = ctx->GetInputDim("Emission");
bool has_length = ctx->HasInput("Length");
if (has_length) {
PADDLE_ENFORCE_EQ(emission_dims.size(), 3,
"The Input(Emission) should be a 3-D tensor.");
platform::errors::InvalidArgument(
"The Input(Emission) should be a 3-D tensor. But "
"received: input rank %u, input shape [%s]. ",
emission_dims.size(), emission_dims));
} else {
PADDLE_ENFORCE_EQ(emission_dims.size(), 2,
"The Input(Emission) should be a 2-D tensor.");
platform::errors::InvalidArgument(
"The Input(Emission) should be a 2-D tensor. But "
"received: input rank %u, input shape [%s].",
emission_dims.size(), emission_dims));
}
PADDLE_ENFORCE_NE(emission_dims[0], 0,
"An empty mini-batch is not allowed.");
auto transition_dims = ctx->GetInputDim("Transition");
PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL,
"The Input(Transition) should be a 2-D tensor.");
platform::errors::InvalidArgument(
"The Input(Transition) should be a 2-D tensor. But "
"received: input rank %u, input shape [%s].",
transition_dims.size(), transition_dims));
PADDLE_ENFORCE_EQ(
transition_dims[0] - 2, transition_dims[1],
"An invalid dimension for the Input(Transition), which should "
"be a 2-D tensor with shape [(D + 2) x D].");
platform::errors::InvalidArgument(
"An invalid dimension for the Input(Transition), which should "
"be a 2-D tensor with shape [(D + 2) x D]. But received: input "
"rank %u, "
"input shape [%s].",
transition_dims.size(), transition_dims));
if (ctx->IsRuntime() || (emission_dims[emission_dims.size() - 1] > 0 &&
transition_dims[transition_dims.size() - 1] > 0)) {
PADDLE_ENFORCE_EQ(
emission_dims[emission_dims.size() - 1],
transition_dims[transition_dims.size() - 1],
"The last dimension of the Input(Emission) and the Input(Transition) "
"should be equal to the tag number.");
PADDLE_ENFORCE_EQ(emission_dims[emission_dims.size() - 1],
transition_dims[transition_dims.size() - 1],
platform::errors::InvalidArgument(
"The last dimension of the Input(Emission) and the "
"Input(Transition) "
"should be equal to the tag number. But received "
"Input(Emission): rank "
"%u, shape [%s]; received Input(Transition): rank "
"%u, shape [%s].",
emission_dims.size(), emission_dims,
transition_dims.size(), transition_dims));
}
if (ctx->HasInput("Label")) {
auto label_dims = ctx->GetInputDim("Label");
......@@ -132,20 +148,31 @@ class CRFDecodingOp : public framework::OperatorWithKernel {
(label_dims.size() == 3UL && label_dims[2] == 1) ||
label_dims.size() == 2UL,
true,
"The Input(Label) should be a 3-D tensor with last dimension "
"fixed to 1 or a 2-D tensor in padding mode.");
platform::errors::InvalidArgument(
"The Input(Label) should be a 3-D tensor with last dimension "
"fixed to 1 or a 2-D tensor in padding mode. But received: "
"input "
"rank %u, input shape [%s].",
label_dims.size(), label_dims));
} else {
PADDLE_ENFORCE_EQ((label_dims.size() == 2UL && label_dims[1] == 1) ||
label_dims.size() == 1UL,
true,
"The Input(Label) should be a 2-D tensor with last "
"dimension fixed to 1 or a 1-D tensor.");
PADDLE_ENFORCE_EQ(
(label_dims.size() == 2UL && label_dims[1] == 1) ||
label_dims.size() == 1UL,
true, platform::errors::InvalidArgument(
"The Input(Label) should be a 2-D tensor with last "
"dimension fixed to 1 or a 1-D tensor. But received: "
"input rank %u, input shape [%s].",
label_dims.size(), label_dims));
}
if (ctx->IsRuntime() || (emission_dims[0] > 0 && label_dims[0] > 0)) {
PADDLE_ENFORCE_EQ(
emission_dims[0], label_dims[0],
"The first dimension of Input(Emission) and Input(Label) "
"should be the same.");
platform::errors::InvalidArgument(
"The first dimension of Input(Emission) and Input(Label) "
"should be the same. But received Input(Emission): rank %u, "
"shape [%s]; received Input(Label): rank %u, shape [%s].",
emission_dims.size(), emission_dims, label_dims.size(),
label_dims));
}
}
......
......@@ -76,9 +76,16 @@ class CRFDecodingOpKernel : public framework::OpKernel<T> {
}
} else {
PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL,
"The Input(Emission) should be a sequence.");
platform::errors::InvalidArgument(
"The Input(Emission) should be a sequence with lod "
"level 1. But received: lod level %u.",
emission_weights->NumLevels()));
auto lod = emission_weights->lod();
PADDLE_ENFORCE_GT(lod.size(), 0, "Input(Emission) must be a sequence.");
PADDLE_ENFORCE_GT(
lod.size(), 0,
platform::errors::InvalidArgument(
"Input(Emission) must be a sequence. But received: lod level %u.",
lod.size()));
const size_t level = 0;
const size_t seq_num = lod[level].size() - 1;
......@@ -92,7 +99,10 @@ class CRFDecodingOpKernel : public framework::OpKernel<T> {
}
if (label) {
PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL,
"The Input(Label) should be a sequence.");
platform::errors::InvalidArgument(
"The Input(label) should be a sequence with lod "
"level 1. But received: lod level %u.",
label->NumLevels()));
const int64_t* label_value = label->data<int64_t>();
size_t numel = label->numel();
for (size_t i = 0; i < numel; ++i) {
......
......@@ -22,11 +22,11 @@ class EditDistanceOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Hyps"), "Input(Hyps) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("Refs"), "Input(Refs) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasOutput("SequenceNum"),
"Output(SequenceNum) shouldn't be null.");
OP_INOUT_CHECK(ctx->HasInput("Hyps"), "Input", "Hyps", "EditDistance");
OP_INOUT_CHECK(ctx->HasInput("Refs"), "Input", "Refs", "EditDistance");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "EditDistance");
OP_INOUT_CHECK(ctx->HasOutput("SequenceNum"), "Output", "SequenceNum",
"EditDistance");
auto hyp_dims = ctx->GetInputDim("Hyps");
auto ref_dims = ctx->GetInputDim("Refs");
......@@ -34,23 +34,41 @@ class EditDistanceOp : public framework::OperatorWithKernel {
auto hyp_length_dims = ctx->GetInputDim("HypsLength");
auto ref_length_dims = ctx->GetInputDim("RefsLength");
PADDLE_ENFORCE(hyp_dims.size() == 2 && ref_dims.size() == 2 &&
hyp_dims[0] == ref_dims[0],
"Input(Hyps) and Input(Refs) must be 2-D Tensors with "
"identical first dimension");
PADDLE_ENFORCE(hyp_length_dims[0] == ref_length_dims[0] &&
hyp_length_dims[0] == hyp_dims[0],
"Input(HypsLength), Input(RefsLength) and Input(Hyps) "
"should have identical first dimension");
PADDLE_ENFORCE_EQ(
hyp_dims.size() == 2 && ref_dims.size() == 2 &&
hyp_dims[0] == ref_dims[0],
true, platform::errors::InvalidArgument(
"Input(Hyps) and Input(Refs) must be 2-D Tensors with "
"identical first dimension. But received Input(Hyps): "
"input rank %u, input shape [%s]; received Input(Refs): "
"input rank %u, input shape [%s]",
hyp_dims.size(), hyp_dims, ref_dims.size(), ref_dims));
PADDLE_ENFORCE_EQ(
hyp_length_dims[0] == ref_length_dims[0] &&
hyp_length_dims[0] == hyp_dims[0],
true,
platform::errors::InvalidArgument(
"Input(HypsLength), Input(RefsLength) and Input(Hyps) "
"should have identical first dimension. But received "
"Input(HypsLength): input rank %u, input shape [%s]; "
"received Input(RefsLength): input rank %u, input shape "
"[%s]; received Input(Hyps): input rank %u, input shape "
"[%s].",
hyp_length_dims.size(), hyp_length_dims, ref_length_dims.size(),
ref_length_dims, hyp_dims.size(), hyp_dims));
} else {
PADDLE_ENFORCE(
hyp_dims.size() == 2 && hyp_dims[1] == 1,
"Input(Hyps) must be a 2-D LoDTensor with the 2nd dimension "
"equal to 1.");
PADDLE_ENFORCE(
ref_dims.size() == 2 && ref_dims[1] == 1,
"Input(Refs) must be a 2-D LoDTensor with the 2nd dimension "
"equal to 1.");
PADDLE_ENFORCE_EQ(
hyp_dims.size() == 2 && hyp_dims[1] == 1, true,
platform::errors::InvalidArgument(
"Input(Hyps) must be a 2-D LoDTensor with the 2nd dimension "
"equal to 1. But received: input rank %u, input shape [%s].",
hyp_dims.size(), hyp_dims));
PADDLE_ENFORCE_EQ(
ref_dims.size() == 2 && ref_dims[1] == 1, true,
platform::errors::InvalidArgument(
"Input(Refs) must be a 2-D LoDTensor with the 2nd dimension "
"equal to 1. But received: input rank %u, input shape [%s].",
ref_dims.size(), ref_dims));
}
ctx->SetOutputDim("Out", ctx->GetInputDim("Refs"));
......
......@@ -24,17 +24,27 @@ class ExpandAsOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true);
PADDLE_ENFORCE_EQ(ctx->HasInput("target_tensor"), true);
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true);
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAs");
OP_INOUT_CHECK(ctx->HasInput("target_tensor"), "Input", "target_tensor",
"ExpandAs");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ExpandAs");
auto x_dims = ctx->GetInputDim("X");
auto target_tensor_dims = ctx->GetInputDim("target_tensor");
PADDLE_ENFORCE_EQ(static_cast<size_t>(x_dims.size()),
target_tensor_dims.size(),
"The rank of input(target_tensor) must be equal "
"to the rank of Input(X).");
PADDLE_ENFORCE_LE(x_dims.size(), 6,
"The rank of Input(X) must not be greater than 6.");
PADDLE_ENFORCE_EQ(
static_cast<size_t>(x_dims.size()), target_tensor_dims.size(),
platform::errors::InvalidArgument(
"The rank of Input(target_tensor) must be equal "
"to the rank of Input(X). But received Input(X): input "
"rank %u, input shape [%s]; received Input(target_tensor): "
"input rank %u, input shape [%s].",
x_dims.size(), x_dims, target_tensor_dims.size(),
target_tensor_dims));
PADDLE_ENFORCE_LE(
x_dims.size(), 6,
platform::errors::InvalidArgument(
"The rank of Input(X) must not be greater than 6. But "
"received: input rank %u, input shape [%s].",
x_dims.size(), x_dims));
std::vector<int64_t> out_shape(x_dims.size());
ctx->SetOutputDim("Out", framework::make_ddim(out_shape));
}
......
......@@ -142,24 +142,27 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Emission"),
"Input(Emission) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("Transition"),
"Input(Transition) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("Alpha"),
"Output(Alpha) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("EmissionExps"),
"Output(EmissionExps) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("TransitionExps"),
"Output(TransitionExps) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("LogLikelihood"),
"Output(LogLikelihood) should be not null.");
OP_INOUT_CHECK(ctx->HasInput("Emission"), "Input", "Emission",
"LinearChainCRF");
OP_INOUT_CHECK(ctx->HasInput("Transition"), "Input", "Transition",
"LinearChainCRF");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "LinearChainCRF");
OP_INOUT_CHECK(ctx->HasOutput("Alpha"), "Output", "Alpha",
"LinearChainCRF");
OP_INOUT_CHECK(ctx->HasOutput("EmissionExps"), "Output", "EmissionExps",
"LinearChainCRF");
OP_INOUT_CHECK(ctx->HasOutput("TransitionExps"), "Output", "TransitionExps",
"LinearChainCRF");
OP_INOUT_CHECK(ctx->HasOutput("LogLikelihood"), "Output", "LogLikelihood",
"LinearChainCRF");
auto transition_dims = ctx->GetInputDim("Transition");
PADDLE_ENFORCE_EQ(transition_dims.size(), 2,
"The Input(Transition) should be a 2-D tensor.");
PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL,
platform::errors::InvalidArgument(
"The Input(Transition) should be a 2-D tensor. But "
"received: input rank %u, input shape [%s].",
transition_dims.size(), transition_dims));
bool check = true;
if ((!ctx->IsRuntime()) &&
(transition_dims[0] <= 0 || transition_dims[1] <= 0)) {
......@@ -168,49 +171,88 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
if (check) {
PADDLE_ENFORCE_EQ(
transition_dims[0] - 2, transition_dims[1],
"An invalid dimension for the Input(Transition), which should "
"be a 2-D tensor with shape [(D + 2) x D].");
platform::errors::InvalidArgument(
"An invalid dimension for the Input(Transition), which should "
"be a 2-D tensor with shape [(D + 2) x D]. But received: input "
"rank %u, "
"input shape [%s].",
transition_dims.size(), transition_dims));
}
auto emission_dims = ctx->GetInputDim("Emission");
PADDLE_ENFORCE_NE(emission_dims[0], 0,
"An empty mini-batch is not allowed.");
if (ctx->HasInput("Length")) {
PADDLE_ENFORCE_EQ(emission_dims.size(), 3,
"The Input(Emission) should be a 3-D tensor.");
platform::errors::InvalidArgument(
"The Input(Emission) should be a 3-D tensor. But "
"received: input rank %u, input shape [%s].",
emission_dims.size(), emission_dims));
auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ(
(label_dims.size() == 3UL && label_dims[2] == 1) ||
(label_dims.size() == 2UL),
true,
"The Input(Label) should be a 3-D tensor with last "
"dimension fixed to 1 or a 2-D tensor in padding mode.");
platform::errors::InvalidArgument(
"The Input(Label) should be a 3-D tensor with last dimension "
"fixed to 1 or a 2-D tensor in padding mode. But received: input "
"rank %u, input shape [%s].",
label_dims.size(), label_dims));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(emission_dims[0], label_dims[0],
"The batch size of Input(Emission) and Input(Label) "
"should be the same.");
platform::errors::InvalidArgument(
"The batch size of Input(Emission) "
"and Input(Label) should be the same. But "
"received Input(Emission): "
"rank %u, shape [%s]; received Input(Label): "
"rank %u, shape [%s].",
emission_dims.size(), emission_dims,
label_dims.size(), label_dims));
PADDLE_ENFORCE_EQ(emission_dims[1], label_dims[1],
"The max length of Input(Emission) and Input(Label) "
"should be the same.");
platform::errors::InvalidArgument(
"The max length of Input(Emission) "
"and Input(Label) should be the same. But "
"received Input(Emission): "
"rank %u, shape [%s]; received Input(Label): "
"rank %u, shape [%s].",
emission_dims.size(), emission_dims,
label_dims.size(), label_dims));
}
} else {
PADDLE_ENFORCE_EQ(emission_dims.size(), 2,
"The Input(Emission) should be a 2-D tensor.");
PADDLE_ENFORCE_EQ(
emission_dims.size(), 2,
platform::errors::InvalidArgument(
"The Input(Emission) should be a 2-D tensor. But received: "
"input rank %u, input shape [%s].",
emission_dims.size(), emission_dims));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(emission_dims[1], transition_dims[1],
"The 2nd dimension of the Input(Emission) and the "
"Input(Transition) "
"should be equal to the tag number.");
platform::errors::InvalidArgument(
"The 2nd dimension of the Input(Emission) and "
"the Input(Transition) "
"should be equal to the tag number. But received "
"Input(Emission): rank "
"%u, shape [%s]; received Input(Transition): "
"rank %u, shape [%s].",
emission_dims.size(), emission_dims,
transition_dims.size(), transition_dims));
}
auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ(label_dims.size(), 2,
"The Input(Label) should be a 2-D tensor with the 2nd "
"dimensions fixed to 1.");
PADDLE_ENFORCE_EQ(
label_dims.size(), 2,
platform::errors::InvalidArgument(
"The Input(Label) should be a 2-D tensor with the 2nd "
"dimensions fixed to 1. But received: input rank %u, "
"input shape [%s].",
label_dims.size(), label_dims));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(
emission_dims[0], label_dims[0],
"The height of Input(Emission) and the height of Input(Label) "
"should be the same.");
platform::errors::InvalidArgument(
"The first dimension of Input(Emission) and Input(Label) "
"should be the same. But received Input(Emission): rank %u, "
"shape "
"[%s]; received Input(Label): rank %u, shape [%s].",
emission_dims.size(), emission_dims, label_dims.size(),
label_dims));
}
}
ctx->SetOutputDim("Alpha", emission_dims);
......@@ -239,12 +281,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("EmissionExps"),
"Input(EmissionExps) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("TransitionExps"),
"Input(TransitionExps) should be not null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("LogLikelihood")),
"Input(LogLikelihood@GRAD) shoudl be not null.");
OP_INOUT_CHECK(ctx->HasInput("EmissionExps"), "Input", "EmissionExps",
"LinearChainCRFGrad");
OP_INOUT_CHECK(ctx->HasInput("TransitionExps"), "Input", "TransitionExps",
"LinearChainCRFGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("LogLikelihood")),
"Input", framework::GradVarName("LogLikelihood"),
"LinearChainCRFGrad");
auto transition_exps_dims = ctx->GetInputDim("TransitionExps");
auto emission_exps_dims = ctx->GetInputDim("EmissionExps");
......
......@@ -28,25 +28,35 @@ class SequenceConvOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequenceConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Filter"),
"Input(Filter) of SequenceConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceConvOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceConv");
OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "SequenceConv");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceConv");
int context_length = ctx->Attrs().Get<int>("contextLength");
int context_start = ctx->Attrs().Get<int>("contextStart");
auto in_dims = ctx->GetInputDim("X");
auto filter_dims = ctx->GetInputDim("Filter");
PADDLE_ENFORCE(ctx->Attrs().Get<int>("contextStride") == 1,
"Currently, SequenceConvOp only supports contextStride=1.");
PADDLE_ENFORCE(in_dims.size() == 2 && filter_dims.size() == 2,
"Input(X, Filter) should be 2-D tensor.");
PADDLE_ENFORCE(filter_dims[0] == context_length * in_dims[1],
"Filter's height should be context_length * "
"input_hidden_size .");
PADDLE_ENFORCE_EQ(
ctx->Attrs().Get<int>("contextStride"), 1,
platform::errors::InvalidArgument(
"Currently, SequenceConvOp only supports contextStride=1. But "
"received contextStride = %u.",
ctx->Attrs().Get<int>("contextStride")));
PADDLE_ENFORCE_EQ(
in_dims.size() == 2 && filter_dims.size() == 2, true,
platform::errors::InvalidArgument(
"Input(X, Filter) should be 2-D tensor. But received Input(X): "
"input rank %u, input shape [%s]; received Input(Filter): "
"input rank %u, input shape [%s].",
in_dims.size(), in_dims, filter_dims.size(), filter_dims));
PADDLE_ENFORCE_EQ(
filter_dims[0], context_length * in_dims[1],
platform::errors::InvalidArgument(
"Filter's height should be context_length * "
"input_hidden_size. But received: filter's height = %d, "
"context_length * input_hidden_size = %d.",
filter_dims[0], context_length * in_dims[1]));
if (ctx->Attrs().Get<bool>("paddingTrainable")) {
PADDLE_ENFORCE(
......@@ -63,12 +73,21 @@ class SequenceConvOp : public framework::OperatorWithKernel {
"If context_start is 0 and context_length is 1, paddingTrainable "
"should be false.");
}
PADDLE_ENFORCE(padding_dim.size() == 2,
"Input(PaddingData) should be 2-D tensor.");
PADDLE_ENFORCE(
padding_dim[0] == total_pad && padding_dim[1] == input_width,
"Input(PaddingData)'s shape is not consistent with 'context_start' "
"and 'context_length'.");
PADDLE_ENFORCE_EQ(
padding_dim.size(), 2,
platform::errors::InvalidArgument(
"Input(PaddingData) should be 2-D tensor. But received: "
"input rank %u, input shape [%s].",
padding_dim.size(), padding_dim));
PADDLE_ENFORCE_EQ(
padding_dim[0] == total_pad && padding_dim[1] == input_width, true,
platform::errors::InvalidArgument("Input(PaddingData)'s shape is not "
"consistent with 'context_start' "
"and 'context_length'. Received "
"Input(PaddingData): input rank "
"%u, "
"input shape [%s].",
padding_dim.size(), padding_dim));
}
in_dims[1] = filter_dims[1];
......@@ -83,9 +102,9 @@ class SequenceConvGradOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Gradient of output(Out) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("X"), "The input(X) should not be null.");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "SequenceConvGrad");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceConvGrad");
if (ctx->Attrs().Get<bool>("paddingTrainable") &&
ctx->HasOutput(framework::GradVarName("PaddingData"))) {
......
......@@ -41,9 +41,14 @@ class SequenceConvKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(
in->lod().empty(), false,
"Input(X) Tensor of SequenceConvOp does not contain LoD information.");
PADDLE_ENFORCE_EQ(in->lod().size(), 1UL,
"Only support one level sequence now.");
platform::errors::InvalidArgument("Input(X) Tensor of SequenceConvOp "
"does not contain LoD information."));
PADDLE_ENFORCE_EQ(
in->lod().size(), 1UL,
platform::errors::InvalidArgument(
"Only support input sequence with lod level equal to 1 at "
"present. But received: lod level %u.",
in->lod().size()));
const Tensor* padding_data = nullptr;
if (padding_trainable) {
......@@ -90,8 +95,12 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
int context_stride = context.Attr<int>("contextStride");
bool padding_trainable = context.Attr<bool>("paddingTrainable");
PADDLE_ENFORCE_EQ(in->lod().size(), 1UL,
"Only support one level sequence now.");
PADDLE_ENFORCE_EQ(
in->lod().size(), 1UL,
platform::errors::InvalidArgument(
"Only support input sequence with lod level equal to 1 at "
"present. But received: lod level %u.",
in->lod().size()));
auto lod_g_level_0 = in->lod()[0];
int up_pad = std::max(0, -context_start);
......
......@@ -22,12 +22,8 @@ class SequenceEnumerateOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(
ctx->HasInput("X"),
"Input(X) of SequecceEnumerate operator should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("Out"),
"Output(X) of SequenceEnumerate operator should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceEnumerate");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceEnumerate");
const auto x_dims = ctx->GetInputDim("X");
const auto win_size = ctx->Attrs().Get<int>("win_size");
......
......@@ -26,19 +26,20 @@ class SequenceExpandOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequenceExpandOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Y"),
"Input(Y) of SequenceExpandOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceExpandOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceExpand");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "SequenceExpand");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceExpand");
auto x_dims = ctx->GetInputDim("X");
auto out_dims = x_dims;
int ref_level = ctx->Attrs().Get<int>("ref_level");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Dimension number of Input(X) should be at least 2.");
PADDLE_ENFORCE_GE(
x_dims.size(), 2,
platform::errors::InvalidArgument(
"Dimension number of Input(X) should be at least 2. But "
"received: input rank %u, input shape [%s].",
x_dims.size(), x_dims));
if (ctx->IsRuntime()) {
framework::Variable* x_var =
......@@ -50,31 +51,47 @@ class SequenceExpandOp : public framework::OperatorWithKernel {
auto& y_lod = y_var->Get<LoDTensor>().lod();
PADDLE_ENFORCE_LE(x_lod.size(), 1UL,
"Level number of Input(X)'s lod should not be "
"greater than 1.");
PADDLE_ENFORCE_GT(y_lod.size(), 0UL,
"Level number of Input(Y)'s lod should be "
"greater than 0.");
PADDLE_ENFORCE(
platform::errors::InvalidArgument(
"Level of Input(X)'s lod should not be "
"greater than 1. But received: lod level %u.",
x_lod.size()));
PADDLE_ENFORCE_GT(
y_lod.size(), 0UL,
platform::errors::InvalidArgument(
"Level of Input(Y)'s lod should be greater than 0. But "
"received: lod level %u.",
y_lod.size()));
PADDLE_ENFORCE_EQ(
ref_level == -1 ||
(ref_level >= 0 && ref_level < static_cast<int>(y_lod.size())),
"Invlid `ref_level`, which should be either equal to -1 "
"or in [0, %d)",
y_lod.size());
true, platform::errors::InvalidArgument(
"Invlid `ref_level`, which should be either equal to -1 "
"or in [0, %d), but received `ref_level` = %u.",
y_lod.size(), ref_level));
if (ref_level == -1) ref_level = y_lod.size() - 1;
if (x_lod.size() > 0) {
PADDLE_ENFORCE(x_lod[0].size() == y_lod[ref_level].size(),
"Level number of Input(X)'s lod could be 0. Otherwise "
"size of Input(X)'s first level lod should be equal to "
"size of Input(Y)'s referred level lod.");
PADDLE_ENFORCE_EQ(
x_lod[0].size(), y_lod[ref_level].size(),
platform::errors::InvalidArgument(
"Level number of Input(X)'s lod could be 0. Otherwise "
"size of Input(X)'s first level lod should be equal to "
"size of Input(Y)'s referred level lod. But received: "
"Input(X).lod[0].size() = %u, Input(Y).lod[%d].size() = "
"%u",
x_lod[0].size(), ref_level, y_lod[ref_level].size()));
} else {
PADDLE_ENFORCE_EQ(x_dims[0],
static_cast<int64_t>(y_lod[ref_level].size()) - 1,
"When Input(X)'s lod is null, the dims[0] of "
"Input(X) should match the "
"size of Input(Y)'s referred level lod.");
PADDLE_ENFORCE_EQ(
x_dims[0], static_cast<int64_t>(y_lod[ref_level].size()) - 1,
platform::errors::InvalidArgument(
"When Input(X)'s lod is null, the dims[0] of "
"Input(X) should match the "
"size of Input(Y)'s referred level lod. But received "
"Input(X): input rank %u, input shape [%s]; received "
"Input(Y).lod[%d].size() - 1 = %d.",
x_dims.size(), x_dims, ref_level,
static_cast<int64_t>(y_lod[ref_level].size()) - 1));
}
int64_t out_first_dim = 0;
......@@ -194,9 +211,9 @@ class SequenceExpandOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceExpandOpGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "SequenceExpandOpGrad");
auto x_dims = ctx->GetInputDim("X");
auto x_grad_name = framework::GradVarName("X");
......
......@@ -24,25 +24,24 @@ class SequencePoolOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequencePoolOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SequencePoolOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequencePool");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequencePool");
if (!ctx->IsRuntime()) {
// Check the lod_level for compile-time.
auto in_lod_level = ctx->GetLoDLevel("X");
PADDLE_ENFORCE_GT(
in_lod_level, 0,
"The LoD level Input(X) of sequence_pool should be larger than 0.");
PADDLE_ENFORCE_GT(in_lod_level, 0, platform::errors::InvalidArgument(
"The LoD level of Input(X) should "
"be larger than 0, but received: "
"lod level %u.",
in_lod_level));
ctx->SetLoDLevel("Out", in_lod_level - 1);
}
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
if (ctx->Attrs().Get<std::string>("pooltype") == "MAX") {
PADDLE_ENFORCE_EQ(
ctx->HasOutput("MaxIndex"), true,
"Output(MaxIndex) of SequencePoolOp should not be null.");
OP_INOUT_CHECK(ctx->HasOutput("MaxIndex"), "Output", "MaxIndex",
"SequencePool");
ctx->SetOutputDim("MaxIndex", ctx->GetInputDim("X"));
}
}
......@@ -113,16 +112,26 @@ class SequencePoolGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
"Gradient of Out should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"The input X should not be null.");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "SequencePoolGrad");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequencePoolGrad");
auto og_dims = ctx->GetInputDim(framework::GradVarName("Out"));
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(og_dims.size(), x_dims.size(),
"The rank of output grad must equal to Input(X).");
platform::errors::InvalidArgument(
"The rank of output grad must equal to Input(X). But "
"received: input rank %u, input shape [%s].",
og_dims.size(), og_dims));
for (int64_t i = 1; i < og_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch.");
PADDLE_ENFORCE_EQ(
og_dims[i], x_dims[i],
platform::errors::InvalidArgument(
"The dimension mismatch between Input(OUT@GRAD) and "
"Input(X). Received Input(OUT@GRAD): input rank %u, "
"input shape [%s]; received Input(X): input rank %u, "
"input shape [%s].",
og_dims.size(), og_dims, x_dims.size(), x_dims));
}
ctx->ShareDim("X", /*->*/ framework::GradVarName("X"));
......
......@@ -453,6 +453,8 @@ def edit_distance(input,
# [4]
"""
check_variable_and_dtype(input, 'input', ['int64'], 'edit_distance')
check_variable_and_dtype(label, 'label', ['int64'], 'edit_distance')
helper = LayerHelper("edit_distance", **locals())
# remove some tokens from input and labels
......
......@@ -2151,6 +2151,9 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
print(transition)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'linear_chain_crf')
check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf')
helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[2] if length else input.shape[1]
transition = helper.create_parameter(
......@@ -2233,6 +2236,8 @@ def crf_decoding(input, param_attr, label=None, length=None):
crf_decode = fluid.layers.crf_decoding(input=emission, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad"))
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'crf_decoding')
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_variable_for_type_inference(
......@@ -11294,7 +11299,11 @@ def expand_as(x, target_tensor, name=None):
#(3,20)
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
check_variable_and_dtype(target_tensor, 'target_tensor',
['float32', 'float64', 'int32', 'int64', 'bool'],
'expand_as')
helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
......
......@@ -144,6 +144,8 @@ def sequence_conv(input,
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_conv')
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters]
......@@ -337,6 +339,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32'], 'sequence_pool')
helper = LayerHelper('sequence_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
......@@ -674,7 +677,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
dims ``[M, K]``. The lod level is at most 1. The data type should be \
float32, float64, int8, int32 or int64.
float32, float64, int32 or int64.
y (Variable): The input variable which is a LoDTensor, the lod level is \
at least 1.
ref_level (int): Lod level of ``y`` to be referred by ``x``. If set to -1, \
......@@ -734,6 +737,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'sequence_expand')
helper = LayerHelper('sequence_expand', input=x, **locals())
dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype)
......@@ -1222,7 +1227,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
Args:
input (Variable): The input variable which is a index sequence, \
which should be a LodTensor with shape ``[d_1, 1]`` and 1-level lod info. \
The data type should be float32, float64, int8, int32 or int64.
The data type should be int32 or int64.
win_size (int): The window size for enumerating all sub-sequences.
pad_value (int, optional): The padding value, default 0.
name(str, optional): For detailed information, please refer \
......@@ -1245,6 +1250,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['int32', 'int64'],
'sequence_enumerate')
helper = LayerHelper('sequence_enumerate', **locals())
out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部