未验证 提交 a9520db2 编写于 作者: Y Yibing Liu 提交者: GitHub

Format error message for ops (#24482)

* Format error message for ops, test=develop

* Fix check in sequence_expand, test=develop
上级 2644cb81
...@@ -89,41 +89,57 @@ class CRFDecodingOp : public framework::OperatorWithKernel { ...@@ -89,41 +89,57 @@ class CRFDecodingOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Emission"), true, OP_INOUT_CHECK(ctx->HasInput("Emission"), "Input", "Emission",
"Input(Emission) should be not null."); "CRFDecoding");
PADDLE_ENFORCE_EQ(ctx->HasInput("Transition"), true, OP_INOUT_CHECK(ctx->HasInput("Transition"), "Input", "Transition",
"Input(Transition) should be not null."); "CRFDecoding");
OP_INOUT_CHECK(ctx->HasOutput("ViterbiPath"), "Output", "ViterbiPath",
PADDLE_ENFORCE_EQ(ctx->HasOutput("ViterbiPath"), true, "CRFDecoding");
"Output(ViterbiPath) should be not null.");
auto emission_dims = ctx->GetInputDim("Emission"); auto emission_dims = ctx->GetInputDim("Emission");
bool has_length = ctx->HasInput("Length"); bool has_length = ctx->HasInput("Length");
if (has_length) { if (has_length) {
PADDLE_ENFORCE_EQ(emission_dims.size(), 3, PADDLE_ENFORCE_EQ(emission_dims.size(), 3,
"The Input(Emission) should be a 3-D tensor."); platform::errors::InvalidArgument(
"The Input(Emission) should be a 3-D tensor. But "
"received: input rank %u, input shape [%s]. ",
emission_dims.size(), emission_dims));
} else { } else {
PADDLE_ENFORCE_EQ(emission_dims.size(), 2, PADDLE_ENFORCE_EQ(emission_dims.size(), 2,
"The Input(Emission) should be a 2-D tensor."); platform::errors::InvalidArgument(
"The Input(Emission) should be a 2-D tensor. But "
"received: input rank %u, input shape [%s].",
emission_dims.size(), emission_dims));
} }
PADDLE_ENFORCE_NE(emission_dims[0], 0,
"An empty mini-batch is not allowed.");
auto transition_dims = ctx->GetInputDim("Transition"); auto transition_dims = ctx->GetInputDim("Transition");
PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL,
"The Input(Transition) should be a 2-D tensor."); platform::errors::InvalidArgument(
"The Input(Transition) should be a 2-D tensor. But "
"received: input rank %u, input shape [%s].",
transition_dims.size(), transition_dims));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
transition_dims[0] - 2, transition_dims[1], transition_dims[0] - 2, transition_dims[1],
platform::errors::InvalidArgument(
"An invalid dimension for the Input(Transition), which should " "An invalid dimension for the Input(Transition), which should "
"be a 2-D tensor with shape [(D + 2) x D]."); "be a 2-D tensor with shape [(D + 2) x D]. But received: input "
"rank %u, "
"input shape [%s].",
transition_dims.size(), transition_dims));
if (ctx->IsRuntime() || (emission_dims[emission_dims.size() - 1] > 0 && if (ctx->IsRuntime() || (emission_dims[emission_dims.size() - 1] > 0 &&
transition_dims[transition_dims.size() - 1] > 0)) { transition_dims[transition_dims.size() - 1] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(emission_dims[emission_dims.size() - 1],
emission_dims[emission_dims.size() - 1],
transition_dims[transition_dims.size() - 1], transition_dims[transition_dims.size() - 1],
"The last dimension of the Input(Emission) and the Input(Transition) " platform::errors::InvalidArgument(
"should be equal to the tag number."); "The last dimension of the Input(Emission) and the "
"Input(Transition) "
"should be equal to the tag number. But received "
"Input(Emission): rank "
"%u, shape [%s]; received Input(Transition): rank "
"%u, shape [%s].",
emission_dims.size(), emission_dims,
transition_dims.size(), transition_dims));
} }
if (ctx->HasInput("Label")) { if (ctx->HasInput("Label")) {
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
...@@ -132,20 +148,31 @@ class CRFDecodingOp : public framework::OperatorWithKernel { ...@@ -132,20 +148,31 @@ class CRFDecodingOp : public framework::OperatorWithKernel {
(label_dims.size() == 3UL && label_dims[2] == 1) || (label_dims.size() == 3UL && label_dims[2] == 1) ||
label_dims.size() == 2UL, label_dims.size() == 2UL,
true, true,
platform::errors::InvalidArgument(
"The Input(Label) should be a 3-D tensor with last dimension " "The Input(Label) should be a 3-D tensor with last dimension "
"fixed to 1 or a 2-D tensor in padding mode."); "fixed to 1 or a 2-D tensor in padding mode. But received: "
"input "
"rank %u, input shape [%s].",
label_dims.size(), label_dims));
} else { } else {
PADDLE_ENFORCE_EQ((label_dims.size() == 2UL && label_dims[1] == 1) || PADDLE_ENFORCE_EQ(
(label_dims.size() == 2UL && label_dims[1] == 1) ||
label_dims.size() == 1UL, label_dims.size() == 1UL,
true, true, platform::errors::InvalidArgument(
"The Input(Label) should be a 2-D tensor with last " "The Input(Label) should be a 2-D tensor with last "
"dimension fixed to 1 or a 1-D tensor."); "dimension fixed to 1 or a 1-D tensor. But received: "
"input rank %u, input shape [%s].",
label_dims.size(), label_dims));
} }
if (ctx->IsRuntime() || (emission_dims[0] > 0 && label_dims[0] > 0)) { if (ctx->IsRuntime() || (emission_dims[0] > 0 && label_dims[0] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
emission_dims[0], label_dims[0], emission_dims[0], label_dims[0],
platform::errors::InvalidArgument(
"The first dimension of Input(Emission) and Input(Label) " "The first dimension of Input(Emission) and Input(Label) "
"should be the same."); "should be the same. But received Input(Emission): rank %u, "
"shape [%s]; received Input(Label): rank %u, shape [%s].",
emission_dims.size(), emission_dims, label_dims.size(),
label_dims));
} }
} }
......
...@@ -76,9 +76,16 @@ class CRFDecodingOpKernel : public framework::OpKernel<T> { ...@@ -76,9 +76,16 @@ class CRFDecodingOpKernel : public framework::OpKernel<T> {
} }
} else { } else {
PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL,
"The Input(Emission) should be a sequence."); platform::errors::InvalidArgument(
"The Input(Emission) should be a sequence with lod "
"level 1. But received: lod level %u.",
emission_weights->NumLevels()));
auto lod = emission_weights->lod(); auto lod = emission_weights->lod();
PADDLE_ENFORCE_GT(lod.size(), 0, "Input(Emission) must be a sequence."); PADDLE_ENFORCE_GT(
lod.size(), 0,
platform::errors::InvalidArgument(
"Input(Emission) must be a sequence. But received: lod level %u.",
lod.size()));
const size_t level = 0; const size_t level = 0;
const size_t seq_num = lod[level].size() - 1; const size_t seq_num = lod[level].size() - 1;
...@@ -92,7 +99,10 @@ class CRFDecodingOpKernel : public framework::OpKernel<T> { ...@@ -92,7 +99,10 @@ class CRFDecodingOpKernel : public framework::OpKernel<T> {
} }
if (label) { if (label) {
PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL,
"The Input(Label) should be a sequence."); platform::errors::InvalidArgument(
"The Input(label) should be a sequence with lod "
"level 1. But received: lod level %u.",
label->NumLevels()));
const int64_t* label_value = label->data<int64_t>(); const int64_t* label_value = label->data<int64_t>();
size_t numel = label->numel(); size_t numel = label->numel();
for (size_t i = 0; i < numel; ++i) { for (size_t i = 0; i < numel; ++i) {
......
...@@ -22,11 +22,11 @@ class EditDistanceOp : public framework::OperatorWithKernel { ...@@ -22,11 +22,11 @@ class EditDistanceOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Hyps"), "Input(Hyps) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("Hyps"), "Input", "Hyps", "EditDistance");
PADDLE_ENFORCE(ctx->HasInput("Refs"), "Input(Refs) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("Refs"), "Input", "Refs", "EditDistance");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "EditDistance");
PADDLE_ENFORCE(ctx->HasOutput("SequenceNum"), OP_INOUT_CHECK(ctx->HasOutput("SequenceNum"), "Output", "SequenceNum",
"Output(SequenceNum) shouldn't be null."); "EditDistance");
auto hyp_dims = ctx->GetInputDim("Hyps"); auto hyp_dims = ctx->GetInputDim("Hyps");
auto ref_dims = ctx->GetInputDim("Refs"); auto ref_dims = ctx->GetInputDim("Refs");
...@@ -34,23 +34,41 @@ class EditDistanceOp : public framework::OperatorWithKernel { ...@@ -34,23 +34,41 @@ class EditDistanceOp : public framework::OperatorWithKernel {
auto hyp_length_dims = ctx->GetInputDim("HypsLength"); auto hyp_length_dims = ctx->GetInputDim("HypsLength");
auto ref_length_dims = ctx->GetInputDim("RefsLength"); auto ref_length_dims = ctx->GetInputDim("RefsLength");
PADDLE_ENFORCE(hyp_dims.size() == 2 && ref_dims.size() == 2 && PADDLE_ENFORCE_EQ(
hyp_dims.size() == 2 && ref_dims.size() == 2 &&
hyp_dims[0] == ref_dims[0], hyp_dims[0] == ref_dims[0],
true, platform::errors::InvalidArgument(
"Input(Hyps) and Input(Refs) must be 2-D Tensors with " "Input(Hyps) and Input(Refs) must be 2-D Tensors with "
"identical first dimension"); "identical first dimension. But received Input(Hyps): "
PADDLE_ENFORCE(hyp_length_dims[0] == ref_length_dims[0] && "input rank %u, input shape [%s]; received Input(Refs): "
"input rank %u, input shape [%s]",
hyp_dims.size(), hyp_dims, ref_dims.size(), ref_dims));
PADDLE_ENFORCE_EQ(
hyp_length_dims[0] == ref_length_dims[0] &&
hyp_length_dims[0] == hyp_dims[0], hyp_length_dims[0] == hyp_dims[0],
true,
platform::errors::InvalidArgument(
"Input(HypsLength), Input(RefsLength) and Input(Hyps) " "Input(HypsLength), Input(RefsLength) and Input(Hyps) "
"should have identical first dimension"); "should have identical first dimension. But received "
"Input(HypsLength): input rank %u, input shape [%s]; "
"received Input(RefsLength): input rank %u, input shape "
"[%s]; received Input(Hyps): input rank %u, input shape "
"[%s].",
hyp_length_dims.size(), hyp_length_dims, ref_length_dims.size(),
ref_length_dims, hyp_dims.size(), hyp_dims));
} else { } else {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
hyp_dims.size() == 2 && hyp_dims[1] == 1, hyp_dims.size() == 2 && hyp_dims[1] == 1, true,
platform::errors::InvalidArgument(
"Input(Hyps) must be a 2-D LoDTensor with the 2nd dimension " "Input(Hyps) must be a 2-D LoDTensor with the 2nd dimension "
"equal to 1."); "equal to 1. But received: input rank %u, input shape [%s].",
PADDLE_ENFORCE( hyp_dims.size(), hyp_dims));
ref_dims.size() == 2 && ref_dims[1] == 1, PADDLE_ENFORCE_EQ(
ref_dims.size() == 2 && ref_dims[1] == 1, true,
platform::errors::InvalidArgument(
"Input(Refs) must be a 2-D LoDTensor with the 2nd dimension " "Input(Refs) must be a 2-D LoDTensor with the 2nd dimension "
"equal to 1."); "equal to 1. But received: input rank %u, input shape [%s].",
ref_dims.size(), ref_dims));
} }
ctx->SetOutputDim("Out", ctx->GetInputDim("Refs")); ctx->SetOutputDim("Out", ctx->GetInputDim("Refs"));
......
...@@ -24,17 +24,27 @@ class ExpandAsOp : public framework::OperatorWithKernel { ...@@ -24,17 +24,27 @@ class ExpandAsOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAs");
PADDLE_ENFORCE_EQ(ctx->HasInput("target_tensor"), true); OP_INOUT_CHECK(ctx->HasInput("target_tensor"), "Input", "target_tensor",
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true); "ExpandAs");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ExpandAs");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto target_tensor_dims = ctx->GetInputDim("target_tensor"); auto target_tensor_dims = ctx->GetInputDim("target_tensor");
PADDLE_ENFORCE_EQ(static_cast<size_t>(x_dims.size()), PADDLE_ENFORCE_EQ(
target_tensor_dims.size(), static_cast<size_t>(x_dims.size()), target_tensor_dims.size(),
"The rank of input(target_tensor) must be equal " platform::errors::InvalidArgument(
"to the rank of Input(X)."); "The rank of Input(target_tensor) must be equal "
PADDLE_ENFORCE_LE(x_dims.size(), 6, "to the rank of Input(X). But received Input(X): input "
"The rank of Input(X) must not be greater than 6."); "rank %u, input shape [%s]; received Input(target_tensor): "
"input rank %u, input shape [%s].",
x_dims.size(), x_dims, target_tensor_dims.size(),
target_tensor_dims));
PADDLE_ENFORCE_LE(
x_dims.size(), 6,
platform::errors::InvalidArgument(
"The rank of Input(X) must not be greater than 6. But "
"received: input rank %u, input shape [%s].",
x_dims.size(), x_dims));
std::vector<int64_t> out_shape(x_dims.size()); std::vector<int64_t> out_shape(x_dims.size());
ctx->SetOutputDim("Out", framework::make_ddim(out_shape)); ctx->SetOutputDim("Out", framework::make_ddim(out_shape));
} }
......
...@@ -142,24 +142,27 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { ...@@ -142,24 +142,27 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Emission"), OP_INOUT_CHECK(ctx->HasInput("Emission"), "Input", "Emission",
"Input(Emission) should be not null."); "LinearChainCRF");
PADDLE_ENFORCE(ctx->HasInput("Transition"), OP_INOUT_CHECK(ctx->HasInput("Transition"), "Input", "Transition",
"Input(Transition) should be not null."); "LinearChainCRF");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "LinearChainCRF");
PADDLE_ENFORCE(ctx->HasOutput("Alpha"), OP_INOUT_CHECK(ctx->HasOutput("Alpha"), "Output", "Alpha",
"Output(Alpha) should be not null."); "LinearChainCRF");
PADDLE_ENFORCE(ctx->HasOutput("EmissionExps"), OP_INOUT_CHECK(ctx->HasOutput("EmissionExps"), "Output", "EmissionExps",
"Output(EmissionExps) should be not null."); "LinearChainCRF");
PADDLE_ENFORCE(ctx->HasOutput("TransitionExps"), OP_INOUT_CHECK(ctx->HasOutput("TransitionExps"), "Output", "TransitionExps",
"Output(TransitionExps) should be not null."); "LinearChainCRF");
PADDLE_ENFORCE(ctx->HasOutput("LogLikelihood"), OP_INOUT_CHECK(ctx->HasOutput("LogLikelihood"), "Output", "LogLikelihood",
"Output(LogLikelihood) should be not null."); "LinearChainCRF");
auto transition_dims = ctx->GetInputDim("Transition"); auto transition_dims = ctx->GetInputDim("Transition");
PADDLE_ENFORCE_EQ(transition_dims.size(), 2, PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL,
"The Input(Transition) should be a 2-D tensor."); platform::errors::InvalidArgument(
"The Input(Transition) should be a 2-D tensor. But "
"received: input rank %u, input shape [%s].",
transition_dims.size(), transition_dims));
bool check = true; bool check = true;
if ((!ctx->IsRuntime()) && if ((!ctx->IsRuntime()) &&
(transition_dims[0] <= 0 || transition_dims[1] <= 0)) { (transition_dims[0] <= 0 || transition_dims[1] <= 0)) {
...@@ -168,49 +171,88 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { ...@@ -168,49 +171,88 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
if (check) { if (check) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
transition_dims[0] - 2, transition_dims[1], transition_dims[0] - 2, transition_dims[1],
platform::errors::InvalidArgument(
"An invalid dimension for the Input(Transition), which should " "An invalid dimension for the Input(Transition), which should "
"be a 2-D tensor with shape [(D + 2) x D]."); "be a 2-D tensor with shape [(D + 2) x D]. But received: input "
"rank %u, "
"input shape [%s].",
transition_dims.size(), transition_dims));
} }
auto emission_dims = ctx->GetInputDim("Emission"); auto emission_dims = ctx->GetInputDim("Emission");
PADDLE_ENFORCE_NE(emission_dims[0], 0,
"An empty mini-batch is not allowed.");
if (ctx->HasInput("Length")) { if (ctx->HasInput("Length")) {
PADDLE_ENFORCE_EQ(emission_dims.size(), 3, PADDLE_ENFORCE_EQ(emission_dims.size(), 3,
"The Input(Emission) should be a 3-D tensor."); platform::errors::InvalidArgument(
"The Input(Emission) should be a 3-D tensor. But "
"received: input rank %u, input shape [%s].",
emission_dims.size(), emission_dims));
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
(label_dims.size() == 3UL && label_dims[2] == 1) || (label_dims.size() == 3UL && label_dims[2] == 1) ||
(label_dims.size() == 2UL), (label_dims.size() == 2UL),
true, true,
"The Input(Label) should be a 3-D tensor with last " platform::errors::InvalidArgument(
"dimension fixed to 1 or a 2-D tensor in padding mode."); "The Input(Label) should be a 3-D tensor with last dimension "
"fixed to 1 or a 2-D tensor in padding mode. But received: input "
"rank %u, input shape [%s].",
label_dims.size(), label_dims));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(emission_dims[0], label_dims[0], PADDLE_ENFORCE_EQ(emission_dims[0], label_dims[0],
"The batch size of Input(Emission) and Input(Label) " platform::errors::InvalidArgument(
"should be the same."); "The batch size of Input(Emission) "
"and Input(Label) should be the same. But "
"received Input(Emission): "
"rank %u, shape [%s]; received Input(Label): "
"rank %u, shape [%s].",
emission_dims.size(), emission_dims,
label_dims.size(), label_dims));
PADDLE_ENFORCE_EQ(emission_dims[1], label_dims[1], PADDLE_ENFORCE_EQ(emission_dims[1], label_dims[1],
"The max length of Input(Emission) and Input(Label) " platform::errors::InvalidArgument(
"should be the same."); "The max length of Input(Emission) "
"and Input(Label) should be the same. But "
"received Input(Emission): "
"rank %u, shape [%s]; received Input(Label): "
"rank %u, shape [%s].",
emission_dims.size(), emission_dims,
label_dims.size(), label_dims));
} }
} else { } else {
PADDLE_ENFORCE_EQ(emission_dims.size(), 2, PADDLE_ENFORCE_EQ(
"The Input(Emission) should be a 2-D tensor."); emission_dims.size(), 2,
platform::errors::InvalidArgument(
"The Input(Emission) should be a 2-D tensor. But received: "
"input rank %u, input shape [%s].",
emission_dims.size(), emission_dims));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(emission_dims[1], transition_dims[1], PADDLE_ENFORCE_EQ(emission_dims[1], transition_dims[1],
"The 2nd dimension of the Input(Emission) and the " platform::errors::InvalidArgument(
"Input(Transition) " "The 2nd dimension of the Input(Emission) and "
"should be equal to the tag number."); "the Input(Transition) "
"should be equal to the tag number. But received "
"Input(Emission): rank "
"%u, shape [%s]; received Input(Transition): "
"rank %u, shape [%s].",
emission_dims.size(), emission_dims,
transition_dims.size(), transition_dims));
} }
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ(label_dims.size(), 2, PADDLE_ENFORCE_EQ(
label_dims.size(), 2,
platform::errors::InvalidArgument(
"The Input(Label) should be a 2-D tensor with the 2nd " "The Input(Label) should be a 2-D tensor with the 2nd "
"dimensions fixed to 1."); "dimensions fixed to 1. But received: input rank %u, "
"input shape [%s].",
label_dims.size(), label_dims));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
emission_dims[0], label_dims[0], emission_dims[0], label_dims[0],
"The height of Input(Emission) and the height of Input(Label) " platform::errors::InvalidArgument(
"should be the same."); "The first dimension of Input(Emission) and Input(Label) "
"should be the same. But received Input(Emission): rank %u, "
"shape "
"[%s]; received Input(Label): rank %u, shape [%s].",
emission_dims.size(), emission_dims, label_dims.size(),
label_dims));
} }
} }
ctx->SetOutputDim("Alpha", emission_dims); ctx->SetOutputDim("Alpha", emission_dims);
...@@ -239,12 +281,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { ...@@ -239,12 +281,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("EmissionExps"), OP_INOUT_CHECK(ctx->HasInput("EmissionExps"), "Input", "EmissionExps",
"Input(EmissionExps) should be not null."); "LinearChainCRFGrad");
PADDLE_ENFORCE(ctx->HasInput("TransitionExps"), OP_INOUT_CHECK(ctx->HasInput("TransitionExps"), "Input", "TransitionExps",
"Input(TransitionExps) should be not null."); "LinearChainCRFGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("LogLikelihood")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("LogLikelihood")),
"Input(LogLikelihood@GRAD) shoudl be not null."); "Input", framework::GradVarName("LogLikelihood"),
"LinearChainCRFGrad");
auto transition_exps_dims = ctx->GetInputDim("TransitionExps"); auto transition_exps_dims = ctx->GetInputDim("TransitionExps");
auto emission_exps_dims = ctx->GetInputDim("EmissionExps"); auto emission_exps_dims = ctx->GetInputDim("EmissionExps");
......
...@@ -28,25 +28,35 @@ class SequenceConvOp : public framework::OperatorWithKernel { ...@@ -28,25 +28,35 @@ class SequenceConvOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceConv");
"Input(X) of SequenceConvOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "SequenceConv");
PADDLE_ENFORCE(ctx->HasInput("Filter"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceConv");
"Input(Filter) of SequenceConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceConvOp should not be null.");
int context_length = ctx->Attrs().Get<int>("contextLength"); int context_length = ctx->Attrs().Get<int>("contextLength");
int context_start = ctx->Attrs().Get<int>("contextStart"); int context_start = ctx->Attrs().Get<int>("contextStart");
auto in_dims = ctx->GetInputDim("X"); auto in_dims = ctx->GetInputDim("X");
auto filter_dims = ctx->GetInputDim("Filter"); auto filter_dims = ctx->GetInputDim("Filter");
PADDLE_ENFORCE(ctx->Attrs().Get<int>("contextStride") == 1, PADDLE_ENFORCE_EQ(
"Currently, SequenceConvOp only supports contextStride=1."); ctx->Attrs().Get<int>("contextStride"), 1,
PADDLE_ENFORCE(in_dims.size() == 2 && filter_dims.size() == 2, platform::errors::InvalidArgument(
"Input(X, Filter) should be 2-D tensor."); "Currently, SequenceConvOp only supports contextStride=1. But "
PADDLE_ENFORCE(filter_dims[0] == context_length * in_dims[1], "received contextStride = %u.",
ctx->Attrs().Get<int>("contextStride")));
PADDLE_ENFORCE_EQ(
in_dims.size() == 2 && filter_dims.size() == 2, true,
platform::errors::InvalidArgument(
"Input(X, Filter) should be 2-D tensor. But received Input(X): "
"input rank %u, input shape [%s]; received Input(Filter): "
"input rank %u, input shape [%s].",
in_dims.size(), in_dims, filter_dims.size(), filter_dims));
PADDLE_ENFORCE_EQ(
filter_dims[0], context_length * in_dims[1],
platform::errors::InvalidArgument(
"Filter's height should be context_length * " "Filter's height should be context_length * "
"input_hidden_size ."); "input_hidden_size. But received: filter's height = %d, "
"context_length * input_hidden_size = %d.",
filter_dims[0], context_length * in_dims[1]));
if (ctx->Attrs().Get<bool>("paddingTrainable")) { if (ctx->Attrs().Get<bool>("paddingTrainable")) {
PADDLE_ENFORCE( PADDLE_ENFORCE(
...@@ -63,12 +73,21 @@ class SequenceConvOp : public framework::OperatorWithKernel { ...@@ -63,12 +73,21 @@ class SequenceConvOp : public framework::OperatorWithKernel {
"If context_start is 0 and context_length is 1, paddingTrainable " "If context_start is 0 and context_length is 1, paddingTrainable "
"should be false."); "should be false.");
} }
PADDLE_ENFORCE(padding_dim.size() == 2, PADDLE_ENFORCE_EQ(
"Input(PaddingData) should be 2-D tensor."); padding_dim.size(), 2,
PADDLE_ENFORCE( platform::errors::InvalidArgument(
padding_dim[0] == total_pad && padding_dim[1] == input_width, "Input(PaddingData) should be 2-D tensor. But received: "
"Input(PaddingData)'s shape is not consistent with 'context_start' " "input rank %u, input shape [%s].",
"and 'context_length'."); padding_dim.size(), padding_dim));
PADDLE_ENFORCE_EQ(
padding_dim[0] == total_pad && padding_dim[1] == input_width, true,
platform::errors::InvalidArgument("Input(PaddingData)'s shape is not "
"consistent with 'context_start' "
"and 'context_length'. Received "
"Input(PaddingData): input rank "
"%u, "
"input shape [%s].",
padding_dim.size(), padding_dim));
} }
in_dims[1] = filter_dims[1]; in_dims[1] = filter_dims[1];
...@@ -83,9 +102,9 @@ class SequenceConvGradOp : public framework::OperatorWithKernel { ...@@ -83,9 +102,9 @@ class SequenceConvGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Gradient of output(Out) should not be null."); framework::GradVarName("Out"), "SequenceConvGrad");
PADDLE_ENFORCE(ctx->HasInput("X"), "The input(X) should not be null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceConvGrad");
if (ctx->Attrs().Get<bool>("paddingTrainable") && if (ctx->Attrs().Get<bool>("paddingTrainable") &&
ctx->HasOutput(framework::GradVarName("PaddingData"))) { ctx->HasOutput(framework::GradVarName("PaddingData"))) {
......
...@@ -41,9 +41,14 @@ class SequenceConvKernel : public framework::OpKernel<T> { ...@@ -41,9 +41,14 @@ class SequenceConvKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in->lod().empty(), false, in->lod().empty(), false,
"Input(X) Tensor of SequenceConvOp does not contain LoD information."); platform::errors::InvalidArgument("Input(X) Tensor of SequenceConvOp "
PADDLE_ENFORCE_EQ(in->lod().size(), 1UL, "does not contain LoD information."));
"Only support one level sequence now."); PADDLE_ENFORCE_EQ(
in->lod().size(), 1UL,
platform::errors::InvalidArgument(
"Only support input sequence with lod level equal to 1 at "
"present. But received: lod level %u.",
in->lod().size()));
const Tensor* padding_data = nullptr; const Tensor* padding_data = nullptr;
if (padding_trainable) { if (padding_trainable) {
...@@ -90,8 +95,12 @@ class SequenceConvGradKernel : public framework::OpKernel<T> { ...@@ -90,8 +95,12 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
int context_stride = context.Attr<int>("contextStride"); int context_stride = context.Attr<int>("contextStride");
bool padding_trainable = context.Attr<bool>("paddingTrainable"); bool padding_trainable = context.Attr<bool>("paddingTrainable");
PADDLE_ENFORCE_EQ(in->lod().size(), 1UL, PADDLE_ENFORCE_EQ(
"Only support one level sequence now."); in->lod().size(), 1UL,
platform::errors::InvalidArgument(
"Only support input sequence with lod level equal to 1 at "
"present. But received: lod level %u.",
in->lod().size()));
auto lod_g_level_0 = in->lod()[0]; auto lod_g_level_0 = in->lod()[0];
int up_pad = std::max(0, -context_start); int up_pad = std::max(0, -context_start);
......
...@@ -22,12 +22,8 @@ class SequenceEnumerateOp : public framework::OperatorWithKernel { ...@@ -22,12 +22,8 @@ class SequenceEnumerateOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE( OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceEnumerate");
ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceEnumerate");
"Input(X) of SequecceEnumerate operator should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("Out"),
"Output(X) of SequenceEnumerate operator should not be null.");
const auto x_dims = ctx->GetInputDim("X"); const auto x_dims = ctx->GetInputDim("X");
const auto win_size = ctx->Attrs().Get<int>("win_size"); const auto win_size = ctx->Attrs().Get<int>("win_size");
......
...@@ -26,19 +26,20 @@ class SequenceExpandOp : public framework::OperatorWithKernel { ...@@ -26,19 +26,20 @@ class SequenceExpandOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceExpand");
"Input(X) of SequenceExpandOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "SequenceExpand");
PADDLE_ENFORCE(ctx->HasInput("Y"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceExpand");
"Input(Y) of SequenceExpandOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceExpandOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto out_dims = x_dims; auto out_dims = x_dims;
int ref_level = ctx->Attrs().Get<int>("ref_level"); int ref_level = ctx->Attrs().Get<int>("ref_level");
PADDLE_ENFORCE_GE(x_dims.size(), 2, PADDLE_ENFORCE_GE(
"Dimension number of Input(X) should be at least 2."); x_dims.size(), 2,
platform::errors::InvalidArgument(
"Dimension number of Input(X) should be at least 2. But "
"received: input rank %u, input shape [%s].",
x_dims.size(), x_dims));
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
framework::Variable* x_var = framework::Variable* x_var =
...@@ -50,31 +51,47 @@ class SequenceExpandOp : public framework::OperatorWithKernel { ...@@ -50,31 +51,47 @@ class SequenceExpandOp : public framework::OperatorWithKernel {
auto& y_lod = y_var->Get<LoDTensor>().lod(); auto& y_lod = y_var->Get<LoDTensor>().lod();
PADDLE_ENFORCE_LE(x_lod.size(), 1UL, PADDLE_ENFORCE_LE(x_lod.size(), 1UL,
"Level number of Input(X)'s lod should not be " platform::errors::InvalidArgument(
"greater than 1."); "Level of Input(X)'s lod should not be "
PADDLE_ENFORCE_GT(y_lod.size(), 0UL, "greater than 1. But received: lod level %u.",
"Level number of Input(Y)'s lod should be " x_lod.size()));
"greater than 0."); PADDLE_ENFORCE_GT(
PADDLE_ENFORCE( y_lod.size(), 0UL,
platform::errors::InvalidArgument(
"Level of Input(Y)'s lod should be greater than 0. But "
"received: lod level %u.",
y_lod.size()));
PADDLE_ENFORCE_EQ(
ref_level == -1 || ref_level == -1 ||
(ref_level >= 0 && ref_level < static_cast<int>(y_lod.size())), (ref_level >= 0 && ref_level < static_cast<int>(y_lod.size())),
true, platform::errors::InvalidArgument(
"Invlid `ref_level`, which should be either equal to -1 " "Invlid `ref_level`, which should be either equal to -1 "
"or in [0, %d)", "or in [0, %d), but received `ref_level` = %u.",
y_lod.size()); y_lod.size(), ref_level));
if (ref_level == -1) ref_level = y_lod.size() - 1; if (ref_level == -1) ref_level = y_lod.size() - 1;
if (x_lod.size() > 0) { if (x_lod.size() > 0) {
PADDLE_ENFORCE(x_lod[0].size() == y_lod[ref_level].size(), PADDLE_ENFORCE_EQ(
x_lod[0].size(), y_lod[ref_level].size(),
platform::errors::InvalidArgument(
"Level number of Input(X)'s lod could be 0. Otherwise " "Level number of Input(X)'s lod could be 0. Otherwise "
"size of Input(X)'s first level lod should be equal to " "size of Input(X)'s first level lod should be equal to "
"size of Input(Y)'s referred level lod."); "size of Input(Y)'s referred level lod. But received: "
"Input(X).lod[0].size() = %u, Input(Y).lod[%d].size() = "
"%u",
x_lod[0].size(), ref_level, y_lod[ref_level].size()));
} else { } else {
PADDLE_ENFORCE_EQ(x_dims[0], PADDLE_ENFORCE_EQ(
static_cast<int64_t>(y_lod[ref_level].size()) - 1, x_dims[0], static_cast<int64_t>(y_lod[ref_level].size()) - 1,
platform::errors::InvalidArgument(
"When Input(X)'s lod is null, the dims[0] of " "When Input(X)'s lod is null, the dims[0] of "
"Input(X) should match the " "Input(X) should match the "
"size of Input(Y)'s referred level lod."); "size of Input(Y)'s referred level lod. But received "
"Input(X): input rank %u, input shape [%s]; received "
"Input(Y).lod[%d].size() - 1 = %d.",
x_dims.size(), x_dims, ref_level,
static_cast<int64_t>(y_lod[ref_level].size()) - 1));
} }
int64_t out_first_dim = 0; int64_t out_first_dim = 0;
...@@ -194,9 +211,9 @@ class SequenceExpandOpGrad : public framework::OperatorWithKernel { ...@@ -194,9 +211,9 @@ class SequenceExpandOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceExpandOpGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) should not be null."); framework::GradVarName("Out"), "SequenceExpandOpGrad");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
......
...@@ -24,25 +24,24 @@ class SequencePoolOp : public framework::OperatorWithKernel { ...@@ -24,25 +24,24 @@ class SequencePoolOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequencePool");
"Input(X) of SequencePoolOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequencePool");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SequencePoolOp should not be null.");
if (!ctx->IsRuntime()) { if (!ctx->IsRuntime()) {
// Check the lod_level for compile-time. // Check the lod_level for compile-time.
auto in_lod_level = ctx->GetLoDLevel("X"); auto in_lod_level = ctx->GetLoDLevel("X");
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(in_lod_level, 0, platform::errors::InvalidArgument(
in_lod_level, 0, "The LoD level of Input(X) should "
"The LoD level Input(X) of sequence_pool should be larger than 0."); "be larger than 0, but received: "
"lod level %u.",
in_lod_level));
ctx->SetLoDLevel("Out", in_lod_level - 1); ctx->SetLoDLevel("Out", in_lod_level - 1);
} }
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
if (ctx->Attrs().Get<std::string>("pooltype") == "MAX") { if (ctx->Attrs().Get<std::string>("pooltype") == "MAX") {
PADDLE_ENFORCE_EQ( OP_INOUT_CHECK(ctx->HasOutput("MaxIndex"), "Output", "MaxIndex",
ctx->HasOutput("MaxIndex"), true, "SequencePool");
"Output(MaxIndex) of SequencePoolOp should not be null.");
ctx->SetOutputDim("MaxIndex", ctx->GetInputDim("X")); ctx->SetOutputDim("MaxIndex", ctx->GetInputDim("X"));
} }
} }
...@@ -113,16 +112,26 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { ...@@ -113,16 +112,26 @@ class SequencePoolGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Gradient of Out should not be null."); framework::GradVarName("Out"), "SequencePoolGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequencePoolGrad");
"The input X should not be null.");
auto og_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto og_dims = ctx->GetInputDim(framework::GradVarName("Out"));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(og_dims.size(), x_dims.size(), PADDLE_ENFORCE_EQ(og_dims.size(), x_dims.size(),
"The rank of output grad must equal to Input(X)."); platform::errors::InvalidArgument(
"The rank of output grad must equal to Input(X). But "
"received: input rank %u, input shape [%s].",
og_dims.size(), og_dims));
for (int64_t i = 1; i < og_dims.size(); ++i) { for (int64_t i = 1; i < og_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch."); PADDLE_ENFORCE_EQ(
og_dims[i], x_dims[i],
platform::errors::InvalidArgument(
"The dimension mismatch between Input(OUT@GRAD) and "
"Input(X). Received Input(OUT@GRAD): input rank %u, "
"input shape [%s]; received Input(X): input rank %u, "
"input shape [%s].",
og_dims.size(), og_dims, x_dims.size(), x_dims));
} }
ctx->ShareDim("X", /*->*/ framework::GradVarName("X")); ctx->ShareDim("X", /*->*/ framework::GradVarName("X"));
......
...@@ -459,6 +459,8 @@ def edit_distance(input, ...@@ -459,6 +459,8 @@ def edit_distance(input,
# [4] # [4]
""" """
check_variable_and_dtype(input, 'input', ['int64'], 'edit_distance')
check_variable_and_dtype(label, 'label', ['int64'], 'edit_distance')
helper = LayerHelper("edit_distance", **locals()) helper = LayerHelper("edit_distance", **locals())
# remove some tokens from input and labels # remove some tokens from input and labels
......
...@@ -779,6 +779,9 @@ def linear_chain_crf(input, label, param_attr=None, length=None): ...@@ -779,6 +779,9 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
print(transition) print(transition)
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'linear_chain_crf')
check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf')
helper = LayerHelper('linear_chain_crf', **locals()) helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[2] if length else input.shape[1] size = input.shape[2] if length else input.shape[1]
transition = helper.create_parameter( transition = helper.create_parameter(
...@@ -861,6 +864,8 @@ def crf_decoding(input, param_attr, label=None, length=None): ...@@ -861,6 +864,8 @@ def crf_decoding(input, param_attr, label=None, length=None):
crf_decode = fluid.layers.crf_decoding(input=emission, length=length, crf_decode = fluid.layers.crf_decoding(input=emission, length=length,
param_attr=fluid.ParamAttr(name="crfw_pad")) param_attr=fluid.ParamAttr(name="crfw_pad"))
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'crf_decoding')
helper = LayerHelper('crf_decoding', **locals()) helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name) transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_variable_for_type_inference( viterbi_path = helper.create_variable_for_type_inference(
...@@ -10064,7 +10069,11 @@ def expand_as(x, target_tensor, name=None): ...@@ -10064,7 +10069,11 @@ def expand_as(x, target_tensor, name=None):
#(3,20) #(3,20)
""" """
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
check_variable_and_dtype(target_tensor, 'target_tensor',
['float32', 'float64', 'int32', 'int64', 'bool'],
'expand_as')
helper = LayerHelper('expand_as', input=x, **locals()) helper = LayerHelper('expand_as', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
......
...@@ -145,6 +145,8 @@ def sequence_conv(input, ...@@ -145,6 +145,8 @@ def sequence_conv(input,
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_conv')
helper = LayerHelper('sequence_conv', **locals()) helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters] filter_shape = [filter_size * input.shape[1], num_filters]
...@@ -338,6 +340,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): ...@@ -338,6 +340,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
""" """
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32'], 'sequence_pool')
helper = LayerHelper('sequence_pool', **locals()) helper = LayerHelper('sequence_pool', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype) pool_out = helper.create_variable_for_type_inference(dtype)
...@@ -672,7 +675,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): ...@@ -672,7 +675,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
Args: Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \ x (Variable): The input variable which is a Tensor or LoDTensor, with the \
dims ``[M, K]``. The lod level is at most 1. The data type should be \ dims ``[M, K]``. The lod level is at most 1. The data type should be \
float32, float64, int8, int32 or int64. float32, float64, int32 or int64.
y (Variable): The input variable which is a LoDTensor, the lod level is \ y (Variable): The input variable which is a LoDTensor, the lod level is \
at least 1. at least 1.
ref_level (int): Lod level of ``y`` to be referred by ``x``. If set to -1, \ ref_level (int): Lod level of ``y`` to be referred by ``x``. If set to -1, \
...@@ -732,6 +735,8 @@ def sequence_expand(x, y, ref_level=-1, name=None): ...@@ -732,6 +735,8 @@ def sequence_expand(x, y, ref_level=-1, name=None):
""" """
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'sequence_expand')
helper = LayerHelper('sequence_expand', input=x, **locals()) helper = LayerHelper('sequence_expand', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype) tmp = helper.create_variable_for_type_inference(dtype)
...@@ -1220,7 +1225,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): ...@@ -1220,7 +1225,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
Args: Args:
input (Variable): The input variable which is a index sequence, \ input (Variable): The input variable which is a index sequence, \
which should be a LodTensor with shape ``[d_1, 1]`` and 1-level lod info. \ which should be a LodTensor with shape ``[d_1, 1]`` and 1-level lod info. \
The data type should be float32, float64, int8, int32 or int64. The data type should be int32 or int64.
win_size (int): The window size for enumerating all sub-sequences. win_size (int): The window size for enumerating all sub-sequences.
pad_value (int, optional): The padding value, default 0. pad_value (int, optional): The padding value, default 0.
name(str, optional): For detailed information, please refer \ name(str, optional): For detailed information, please refer \
...@@ -1243,6 +1248,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): ...@@ -1243,6 +1248,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
""" """
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['int32', 'int64'],
'sequence_enumerate')
helper = LayerHelper('sequence_enumerate', **locals()) helper = LayerHelper('sequence_enumerate', **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True) helper.input_dtype(), stop_gradient=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册