未验证 提交 24cf3932 编写于 作者: 0 0YuanZhang0 提交者: GitHub

Cherry-pick API/OP error message enhancement (#24383)

* test=develop

* test=develop
上级 4db0e2df
...@@ -22,18 +22,24 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { ...@@ -22,18 +22,24 @@ class PrecisionRecallOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("MaxProbs"), PADDLE_ENFORCE_EQ(ctx->HasInput("MaxProbs"), true,
"Input(MaxProbs) should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasInput("Indices"), "Input(MaxProbs) should not be null."));
"Input(Indices) should not be null."); PADDLE_ENFORCE_EQ(ctx->HasInput("Indices"), true,
PADDLE_ENFORCE(ctx->HasInput("Labels"), platform::errors::InvalidArgument(
"Input(Labels) should not be null."); "Input(Indices) should not be null."));
PADDLE_ENFORCE(ctx->HasOutput("BatchMetrics"), PADDLE_ENFORCE_EQ(
"Output(BatchMetrics) should not be null."); ctx->HasInput("Labels"), true,
PADDLE_ENFORCE(ctx->HasOutput("AccumMetrics"), platform::errors::InvalidArgument("Input(Labels) should not be null."));
"Output(AccumMetrics) should not be null."); PADDLE_ENFORCE_EQ(ctx->HasOutput("BatchMetrics"), true,
PADDLE_ENFORCE(ctx->HasOutput("AccumStatesInfo"), platform::errors::InvalidArgument(
"Output(AccumStatesInfo) should not be null."); "Output(BatchMetrics) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("AccumMetrics"), true,
platform::errors::InvalidArgument(
"Output(AccumMetrics) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("AccumStatesInfo"), true,
platform::errors::InvalidArgument(
"Output(AccumStatesInfo) should not be null."));
int64_t cls_num = int64_t cls_num =
static_cast<int64_t>(ctx->Attrs().Get<int>("class_number")); static_cast<int64_t>(ctx->Attrs().Get<int>("class_number"));
...@@ -41,38 +47,48 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { ...@@ -41,38 +47,48 @@ class PrecisionRecallOp : public framework::OperatorWithKernel {
auto labels_dims = ctx->GetInputDim("Labels"); auto labels_dims = ctx->GetInputDim("Labels");
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(max_probs_dims[1], 1,
"Each instance contains one max probability, so the "
"shape of Input(MaxProbs) should be [batch_size, 1].");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Indices"), max_probs_dims, max_probs_dims[1], 1,
"The shape of Input(Indices) should bes same with max_probs_dims"); platform::errors::InvalidArgument(
"Each instance contains one max probability, so the shape of "
"Input(MaxProbs) should be [batch_size, 1]. But received (%d)",
max_probs_dims[1]));
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Indices"), max_probs_dims,
platform::errors::InvalidArgument(
"The shape of Input(Indices) should bes same with "
"max_probs_dims, But received (%d) != (%d)",
ctx->GetInputDim("Indices"), max_probs_dims));
PADDLE_ENFORCE_EQ(max_probs_dims[0], labels_dims[0],
platform::errors::InvalidArgument(
"The 1st dimension of Input(MaxProbs) and "
"Input(Labels) both are batch_size and the shape "
"should be the same. But received (%d) != (%d)",
max_probs_dims[0], labels_dims[0]));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
max_probs_dims[0], labels_dims[0], labels_dims[1], 1,
"The 1st dimension of Input(MaxProbs) and " platform::errors::InvalidArgument(
"Input(Labels) both are batch_size and the shape should " "The 2nd dimension of Input(Labels) contains instance label and "
"be the same."); "the shape should be equal to 1. But received (%d)",
PADDLE_ENFORCE_EQ(labels_dims[1], 1, labels_dims[1]));
"The 2nd dimension of Input(Labels) contains instance "
"label and the shape should be equal to 1.");
} }
if (ctx->HasInput("Weights")) { if (ctx->HasInput("Weights")) {
auto weights_dims = ctx->GetInputDim("Weights"); auto weights_dims = ctx->GetInputDim("Weights");
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(weights_dims, PADDLE_ENFORCE_EQ(
framework::make_ddim({max_probs_dims[0], 1}), weights_dims, framework::make_ddim({max_probs_dims[0], 1}),
"The shape of Input(Weights) should be " platform::errors::InvalidArgument(
"[batch_size, 1]."); "The shape of Input(Weights) should be [batch_size, 1]."));
} }
} }
if (ctx->HasInput("StatesInfo")) { if (ctx->HasInput("StatesInfo")) {
auto states_dims = ctx->GetInputDim("StatesInfo"); auto states_dims = ctx->GetInputDim("StatesInfo");
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), PADDLE_ENFORCE_EQ(
"The shape of Input(StatesInfo) should be " states_dims, framework::make_ddim({cls_num, 4}),
"[class_number, 4]."); platform::errors::InvalidArgument(
"The shape of Input(StatesInfo) should be [class_number, 4]."));
} }
} }
......
...@@ -58,11 +58,25 @@ class PrecisionRecallKernel : public framework::OpKernel<T> { ...@@ -58,11 +58,25 @@ class PrecisionRecallKernel : public framework::OpKernel<T> {
size_t idx = ids_data[i]; size_t idx = ids_data[i];
size_t label = labels_data[i]; size_t label = labels_data[i];
PADDLE_ENFORCE(idx >= 0 && idx < cls_num, PADDLE_ENFORCE_GE(idx, 0, platform::errors::InvalidArgument(
"Class index of each instance should be in " "Class index of each instance should be "
"[0, class_number)."); "larger than 0, But received (%d)",
PADDLE_ENFORCE(label >= 0 && label < cls_num, idx));
"Label of each instance should be in [0, class_number)."); PADDLE_ENFORCE_LT(idx, cls_num,
platform::errors::InvalidArgument(
"Class index of each instance should be less than "
"cls_num (%d), But received (%d)",
cls_num, idx));
PADDLE_ENFORCE_GE(label, 0, platform::errors::InvalidArgument(
"Label of each instance should be larger "
"than 0, But received (%d)",
label));
PADDLE_ENFORCE_LT(label, cls_num,
platform::errors::InvalidArgument(
"Label of each instance should be less than "
"cls_num (%d), But received (%d)",
cls_num, label));
T w = weights_data ? weights_data[i] : 1.0; T w = weights_data ? weights_data[i] : 1.0;
if (idx == label) { if (idx == label) {
......
...@@ -41,13 +41,20 @@ class SequenceConcatOp : public framework::OperatorWithKernel { ...@@ -41,13 +41,20 @@ class SequenceConcatOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *context) const override { void InferShape(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInputs("X"), PADDLE_ENFORCE_EQ(
"Input(X) of Sequence Concat Op should not be null."); context->HasInputs("X"), true,
PADDLE_ENFORCE(context->HasOutput("Out"), platform::errors::InvalidArgument(
"Output(Out) of Sequence Concat Op should not be null."); "Input(X) of Sequence Concat Op should not be null."));
PADDLE_ENFORCE_EQ(
context->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of Sequence Concat Op should not be null."));
PADDLE_ENFORCE_GT(context->Inputs("X").size(), 1, PADDLE_ENFORCE_GT(context->Inputs("X").size(), 1,
"The number of input sequences is at least two."); platform::errors::InvalidArgument(
"The number of input sequences is at least two. But "
"the number of input sequences is(%d)",
context->Inputs("X").size()));
auto x_dims = context->GetInputsDim("X"); auto x_dims = context->GetInputsDim("X");
int64_t batch_size = 0; int64_t batch_size = 0;
int64_t feature_size = 0; int64_t feature_size = 0;
...@@ -62,7 +69,10 @@ class SequenceConcatOp : public framework::OperatorWithKernel { ...@@ -62,7 +69,10 @@ class SequenceConcatOp : public framework::OperatorWithKernel {
} else { } else {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
feature_size, framework::product(x_dim) / x_dim[0], feature_size, framework::product(x_dim) / x_dim[0],
"Inputs of sequence concat must have same feature size"); platform::errors::InvalidArgument(
"Inputs of sequence concat must have same feature size, But "
"received (%d) != (%d)",
feature_size, framework::product(x_dim) / x_dim[0]));
} }
} }
if (batch_size < 0) { if (batch_size < 0) {
......
...@@ -73,16 +73,23 @@ class SeqConcatKernel : public framework::OpKernel<T> { ...@@ -73,16 +73,23 @@ class SeqConcatKernel : public framework::OpKernel<T> {
for (auto &x : xs) { for (auto &x : xs) {
if (lod_size == 0) { if (lod_size == 0) {
PADDLE_ENFORCE_EQ(x.get().lod().empty(), false, PADDLE_ENFORCE_EQ(x.get().lod().empty(), false,
"Input(X) Tensor of SequenceConcatOp does not " platform::errors::InvalidArgument(
"contain LoD information."); "Input(X) Tensor of SequenceConcatOp does not "
"contain LoD information."));
lod_size = x.get().lod()[0].size(); lod_size = x.get().lod()[0].size();
} else { } else {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(lod_size, x.get().lod()[0].size(),
lod_size, x.get().lod()[0].size(), platform::errors::InvalidArgument(
"The number of sequence must be same between each input"); "The number of sequence must be same between "
"each input. But received (%d) != (%d)",
lod_size, x.get().lod()[0].size()));
} }
} }
PADDLE_ENFORCE_NE(lod_size, 0, "Each input must have sequence information"); PADDLE_ENFORCE_NE(lod_size, 0,
platform::errors::InvalidArgument(
"Each input must have sequence information. But "
"received input lod size is(%d)",
lod_size));
std::vector<framework::Tensor> x_in_order; std::vector<framework::Tensor> x_in_order;
out.set_lod(detail::ConcatLoD(xs, &x_in_order)); out.set_lod(detail::ConcatLoD(xs, &x_in_order));
...@@ -100,7 +107,11 @@ class SeqConcatGradKernel : public framework::OpKernel<T> { ...@@ -100,7 +107,11 @@ class SeqConcatGradKernel : public framework::OpKernel<T> {
auto xs = context.MultiInput<framework::LoDTensor>("X"); auto xs = context.MultiInput<framework::LoDTensor>("X");
auto dxs = auto dxs =
context.MultiOutput<framework::LoDTensor>(framework::GradVarName("X")); context.MultiOutput<framework::LoDTensor>(framework::GradVarName("X"));
PADDLE_ENFORCE_EQ(xs.size(), dxs.size()); PADDLE_ENFORCE_EQ(xs.size(), dxs.size(),
platform::errors::InvalidArgument(
"The number of Input X and Output Grad X must be "
"same, But received (%d) != (%d)",
xs.size(), dxs.size()));
for (size_t i = 0; i < dxs.size(); ++i) { for (size_t i = 0; i < dxs.size(); ++i) {
if (dxs[i] != nullptr) { if (dxs[i] != nullptr) {
dxs[i]->set_lod(xs[i]->lod()); dxs[i]->set_lod(xs[i]->lod());
......
...@@ -26,24 +26,34 @@ class SequencePadOp : public framework::OperatorWithKernel { ...@@ -26,24 +26,34 @@ class SequencePadOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequencePadOp should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(ctx->HasInput("PadValue"), true, "Input(X) of SequencePadOp should not be null."));
"Input(PadValue) of SequencePadOp should not be null."); PADDLE_ENFORCE_EQ(
ctx->HasInput("PadValue"), true,
platform::errors::InvalidArgument(
"Input(PadValue) of SequencePadOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SequencePadOp should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(ctx->HasOutput("Length"), true, "Output(Out) of SequencePadOp should not be null."));
"Output(Length) of SequencePadOp should not be null."); PADDLE_ENFORCE_EQ(
ctx->HasOutput("Length"), true,
platform::errors::InvalidArgument(
"Output(Length) of SequencePadOp should not be null."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2, PADDLE_ENFORCE_GE(x_dims.size(), 2,
"The rank of Input(X) can't be less than 2."); platform::errors::InvalidArgument(
"The rank of SequencePadOp Input(X) can't be less "
"than 2. But received (%d)",
x_dims.size()));
auto time_step_dims = framework::slice_ddim(x_dims, 1, x_dims.size()); auto time_step_dims = framework::slice_ddim(x_dims, 1, x_dims.size());
auto pad_value_dims = ctx->GetInputDim("PadValue"); auto pad_value_dims = ctx->GetInputDim("PadValue");
PADDLE_ENFORCE_EQ(pad_value_dims == framework::make_ddim({1}) || PADDLE_ENFORCE_EQ(pad_value_dims == framework::make_ddim({1}) ||
pad_value_dims == time_step_dims, pad_value_dims == time_step_dims,
true, true,
"The Input(PadValue) must be a scalar or a tensor whose " platform::errors::InvalidArgument(
"shape equals to time steps in sequences"); "The Input(PadValue) must be a scalar or a tensor "
"whose shape equals to time steps in sequences"));
int out_dim_0 = -1; int out_dim_0 = -1;
...@@ -54,31 +64,43 @@ class SequencePadOp : public framework::OperatorWithKernel { ...@@ -54,31 +64,43 @@ class SequencePadOp : public framework::OperatorWithKernel {
boost::get<framework::Variable*>(ctx->GetInputVarPtrs("X")[0]); boost::get<framework::Variable*>(ctx->GetInputVarPtrs("X")[0]);
const auto& x_lod = x_var->Get<LoDTensor>().lod(); const auto& x_lod = x_var->Get<LoDTensor>().lod();
PADDLE_ENFORCE_EQ(x_lod.empty(), false, PADDLE_ENFORCE_EQ(x_lod.empty(), false,
"The Input(X) must hold lod info."); platform::errors::InvalidArgument(
"The Input(X) must hold lod info."));
const auto& x_lod_0 = x_lod[0]; const auto& x_lod_0 = x_lod[0];
PADDLE_ENFORCE_GE(x_lod_0.size(), 2, PADDLE_ENFORCE_GE(x_lod_0.size(), 2,
"The Input(X)'s lod info is corrupted."); platform::errors::InvalidArgument(
"The Input(X)'s lod info is corrupted. "));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims[0], static_cast<int64_t>(x_lod_0.back()), x_dims[0], static_cast<int64_t>(x_lod_0.back()),
"The Input(X)'s lod info mismatches the actual tensor shape."); platform::errors::InvalidArgument(
"The Input(X)'s lod info mismatches the actual tensor shape. The "
"Input(X)'s lod info is(%d), the actual tensor shape is(%d)",
x_dims[0], static_cast<int64_t>(x_lod_0.back())));
int seq_num = x_lod_0.size() - 1; int seq_num = x_lod_0.size() - 1;
int max_seq_len = math::MaximumSequenceLength(x_lod_0); int max_seq_len = math::MaximumSequenceLength(x_lod_0);
if (padded_length == -1) { if (padded_length == -1) {
padded_length = max_seq_len; padded_length = max_seq_len;
} }
PADDLE_ENFORCE_GE(padded_length, max_seq_len, PADDLE_ENFORCE_GE(
"The Attr(padded_length) must be -1 or an int greater " padded_length, max_seq_len,
"than the length of the longest original sequence."); platform::errors::InvalidArgument(
"The Attr(padded_length) must be -1 or an int greater than the "
"length of the longest original sequence. But the padded_length "
"received is (%d), the length of the longest original sequence "
"is (%d)",
padded_length, max_seq_len));
out_dim_0 = seq_num; out_dim_0 = seq_num;
} else { } else {
// compile time // compile time
if (padded_length == -1) { if (padded_length == -1) {
padded_length = 1; padded_length = 1;
} }
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(ctx->GetLoDLevel("X"), 0,
ctx->GetLoDLevel("X"), 0, platform::errors::InvalidArgument(
"The LoD level Input(X) of sequence_pad should be larger than 0."); "The LoD level Input(X) of sequence_pad should be "
"larger than 0. But received (%d)",
ctx->GetLoDLevel("X")));
} }
std::vector<int> out_dims_vec{out_dim_0, padded_length}; std::vector<int> out_dims_vec{out_dim_0, padded_length};
...@@ -185,10 +207,12 @@ class SequencePadGradOp : public framework::OperatorWithKernel { ...@@ -185,10 +207,12 @@ class SequencePadGradOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequencePadGradOp should not be null."); platform::errors::InvalidArgument(
"Input(X) of SequencePadGradOp should not be null."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true, ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) of SequencePadGradOp should not be null."); platform::errors::InvalidArgument(
"Input(Out@GRAD) of SequencePadGradOp should not be null."));
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
......
...@@ -37,7 +37,8 @@ class SequencePadOpKernel : public framework::OpKernel<T> { ...@@ -37,7 +37,8 @@ class SequencePadOpKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x->lod().empty(), false, x->lod().empty(), false,
"Input(X) Tensor of SequencePadOp does not contain LoD information."); platform::errors::InvalidArgument("Input(X) Tensor of SequencePadOp "
"does not contain LoD information."));
const auto* pad_value = ctx.Input<LoDTensor>("PadValue"); const auto* pad_value = ctx.Input<LoDTensor>("PadValue");
......
...@@ -23,13 +23,19 @@ class SequenceReshapeOp : public framework::OperatorWithKernel { ...@@ -23,13 +23,19 @@ class SequenceReshapeOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequenceReshapeOp should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Input(X) of SequenceReshapeOp should not be null."));
"Output(Out) of SequenceReshapeOp should not be null."); PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of SequenceReshapeOp should not be null."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto x_numel = product(x_dims); auto x_numel = product(x_dims);
PADDLE_ENFORCE_EQ(x_dims.size(), 2U, "Rank of Input(X) should be 2."); PADDLE_ENFORCE_EQ(
x_dims.size(), 2U,
platform::errors::InvalidArgument(
"Rank of Input(X) should be 2. But received (%d)", x_dims.size()));
int new_dim = ctx->Attrs().Get<int>("new_dim"); int new_dim = ctx->Attrs().Get<int>("new_dim");
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
ctx->SetOutputDim("Out", ctx->SetOutputDim("Out",
...@@ -90,11 +96,14 @@ class SequenceReshapeGradOp : public framework::OperatorWithKernel { ...@@ -90,11 +96,14 @@ class SequenceReshapeGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) of SequenceReshapeGradOp should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(Out@GRAD) of SequenceReshapeGradOp should not be null."));
"Input(X) of SequenceReshapeGradOp should not be null."); PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of SequenceReshapeGradOp should not be null."));
ctx->ShareDim("X", /*->*/ framework::GradVarName("X")); ctx->ShareDim("X", /*->*/ framework::GradVarName("X"));
ctx->ShareLoD("X", /*->*/ framework::GradVarName("X")); ctx->ShareLoD("X", /*->*/ framework::GradVarName("X"));
......
...@@ -33,13 +33,19 @@ class SequenceReshapeKernel : public framework::OpKernel<T> { ...@@ -33,13 +33,19 @@ class SequenceReshapeKernel : public framework::OpKernel<T> {
auto& in_lod = in->lod(); auto& in_lod = in->lod();
PADDLE_ENFORCE_EQ(in_lod.empty(), false, PADDLE_ENFORCE_EQ(in_lod.empty(), false,
"Input(X) Tensor of SequenceReshapeOp does not contain " platform::errors::InvalidArgument(
"LoD information."); "Input(X) Tensor of SequenceReshapeOp does not "
"contain LoD information."));
PADDLE_ENFORCE_EQ(in_lod.size(), 1UL, PADDLE_ENFORCE_EQ(in_lod.size(), 1UL,
"Only support one level sequence now."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ( "Only support one level sequence now. But lod size "
(uint64_t)in_dims[0], in_lod[0].back(), "of Input(X) is (%d)",
"Inconsistent size between X.shape[0] and X.lod()[0].back()."); in_lod.size()));
PADDLE_ENFORCE_EQ((uint64_t)in_dims[0], in_lod[0].back(),
platform::errors::InvalidArgument(
"The size of X.shape[0] and X.lod()[0].back() should "
"be same. But received (%d) != (%d)",
(uint64_t)in_dims[0], in_lod[0].back()));
auto in_lod_l0 = in_lod[0]; auto in_lod_l0 = in_lod[0];
int seq_num = in_lod_l0.size() - 1; int seq_num = in_lod_l0.size() - 1;
...@@ -56,10 +62,11 @@ class SequenceReshapeKernel : public framework::OpKernel<T> { ...@@ -56,10 +62,11 @@ class SequenceReshapeKernel : public framework::OpKernel<T> {
size_t offset = 0; size_t offset = 0;
offset = (seq_len * in_width) / out_width; offset = (seq_len * in_width) / out_width;
PADDLE_ENFORCE_EQ(offset * out_width, seq_len * in_width, PADDLE_ENFORCE_EQ(offset * out_width, seq_len * in_width,
"Please make sure (sequence_length * dimension) can " platform::errors::InvalidArgument(
"be divided by new_dim with no remainder for each " "Please make sure (sequence_length * dimension) "
"sequence. The %dth sequence is invalid.", "can be divided by new_dim with no remainder for "
i + 1); "each sequence. The %dth sequence is invalid.",
i + 1));
out_lod[0][i + 1] = out_lod[0][i] + offset; out_lod[0][i + 1] = out_lod[0][i] + offset;
} }
} }
......
...@@ -27,12 +27,19 @@ class SequenceReverseOp : public framework::OperatorWithKernel { ...@@ -27,12 +27,19 @@ class SequenceReverseOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must exist"); PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) must exist"); platform::errors::InvalidArgument(
"Input(X) of SequenceReverse must exist"));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true,
platform::errors::InvalidArgument(
"Output(Y) of SequenceReverse must exist"));
auto x_dim = ctx->GetInputDim("X"); auto x_dim = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dim.size(), 2, PADDLE_ENFORCE_GE(x_dim.size(), 2,
"Rank of Input(X) must be not less than 2."); platform::errors::InvalidArgument(
"Rank of Input(X) SequenceReverse must be not less "
"than 2. The Input(X) tensor's rank is(%d)",
x_dim.size()));
ctx->SetOutputDim("Y", x_dim); ctx->SetOutputDim("Y", x_dim);
ctx->ShareLoD("X", "Y"); ctx->ShareLoD("X", "Y");
...@@ -108,10 +115,15 @@ class SequenceReverseOpKernel : public framework::OpKernel<T> { ...@@ -108,10 +115,15 @@ class SequenceReverseOpKernel : public framework::OpKernel<T> {
auto *y = ctx.Output<LoDTensor>("Y"); auto *y = ctx.Output<LoDTensor>("Y");
PADDLE_ENFORCE_EQ(x.lod().empty(), false, PADDLE_ENFORCE_EQ(x.lod().empty(), false,
"Input(X) Tensor of SequenceReverseOp does not contain " platform::errors::InvalidArgument(
"LoD information."); "Input(X) Tensor of SequenceReverseOp does not "
PADDLE_ENFORCE_EQ(x.lod().size(), 1, "contain LoD information."));
"SequenceReverse Op only support one level lod.");
PADDLE_ENFORCE_EQ(
x.lod().size(), 1,
platform::errors::InvalidArgument("SequenceReverse Op only support one "
"level lod. Input(X) lod is(%d)",
x.lod().size()));
const size_t *lod; const size_t *lod;
size_t lod_count = x.lod()[0].size(); size_t lod_count = x.lod()[0].size();
...@@ -131,8 +143,10 @@ class SequenceReverseOpKernel : public framework::OpKernel<T> { ...@@ -131,8 +143,10 @@ class SequenceReverseOpKernel : public framework::OpKernel<T> {
auto *x_data = x.data<T>(); auto *x_data = x.data<T>();
auto *y_data = y->mutable_data<T>(ctx.GetPlace()); auto *y_data = y->mutable_data<T>(ctx.GetPlace());
PADDLE_ENFORCE_NE(x_data, y_data, PADDLE_ENFORCE_NE(
"SequenceReverse Op does not support in-place operation"); x_data, y_data,
platform::errors::InvalidArgument(
"SequenceReverse Op does not support in-place operation"));
if (platform::is_cpu_place(ctx.GetPlace())) { if (platform::is_cpu_place(ctx.GetPlace())) {
for (size_t idx = 0; idx < lod_count - 1; idx++) { for (size_t idx = 0; idx < lod_count - 1; idx++) {
......
...@@ -26,22 +26,36 @@ class SequenceUnpadOp : public framework::OperatorWithKernel { ...@@ -26,22 +26,36 @@ class SequenceUnpadOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequenceUnpadOp should not be null."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(ctx->HasInput("Length"), true, "Input(X) of SequenceUnpadOp should not be null."));
"Input(Length) of SequenceUnpadOp should not be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, ctx->HasInput("Length"), true,
"Output(Out) of SequenceUnpadOp should not be null."); platform::errors::InvalidArgument(
"Input(Length) of SequenceUnpadOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of SequenceUnpadOp should not be null."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2, PADDLE_ENFORCE_GE(
"The rank of Input(X) can't be less than 2."); x_dims.size(), 2,
platform::errors::InvalidArgument(
"The rank of Input(X) can't be less than 2. But received (%d)",
x_dims.size()));
auto len_dims = ctx->GetInputDim("Length"); auto len_dims = ctx->GetInputDim("Length");
PADDLE_ENFORCE_EQ(len_dims.size(), 1,
"The shape of Input(Length) should be [batch_size].");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
len_dims[0], x_dims[0], len_dims.size(), 1,
"Input(X) and Input(Length) should have the same first dimension."); platform::errors::InvalidArgument("The shape of Input(Length) should "
"be [batch_size]. But received (%d)",
len_dims.size()));
PADDLE_ENFORCE_EQ(len_dims[0], x_dims[0],
platform::errors::InvalidArgument(
"Input(X) and Input(Length) should have the same "
"first dimension. But the first dimension of "
"Input(X) and Input(Length) is (%d) != (%d)",
len_dims[0], x_dims[0]));
int64_t out_dim_0 = -1; int64_t out_dim_0 = -1;
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
...@@ -115,11 +129,14 @@ class SequenceUnpadGradOp : public framework::OperatorWithKernel { ...@@ -115,11 +129,14 @@ class SequenceUnpadGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, PADDLE_ENFORCE_EQ(
"Input(X) of SequenceUnpadGradOp should not be null."); ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of SequenceUnpadGradOp should not be null."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true, ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) of SequenceUnpadGradOp should not be null."); platform::errors::InvalidArgument(
"Input(Out@GRAD) of SequenceUnpadGradOp should not be null."));
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
......
...@@ -182,17 +182,21 @@ class TruncatedGaussianRandomOp : public framework::OperatorWithKernel { ...@@ -182,17 +182,21 @@ class TruncatedGaussianRandomOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
ctx->HasOutput("Out"), ctx->HasOutput("Out"), true,
"Output(Out) of TruncatedGaussianRandomOp should not be null."); platform::errors::InvalidArgument(
"Output(Out) of TruncatedGaussianRandomOp should not be null."));
auto shape = ctx->Attrs().Get<std::vector<int>>("shape"); auto shape = ctx->Attrs().Get<std::vector<int>>("shape");
std::vector<int64_t> out_dim; std::vector<int64_t> out_dim;
out_dim.reserve(shape.size()); out_dim.reserve(shape.size());
for (auto dim : shape) { for (auto dim : shape) {
out_dim.push_back(static_cast<int64_t>(dim)); out_dim.push_back(static_cast<int64_t>(dim));
} }
PADDLE_ENFORCE(shape.size() > 0UL, PADDLE_ENFORCE_GT(shape.size(), 0UL,
"shape can be one int or array. shape must be set."); platform::errors::InvalidArgument(
"shape can be one int or array. shape must be set, "
"But received (%d)",
shape.size()));
ctx->SetOutputDim("Out", framework::make_ddim(out_dim)); ctx->SetOutputDim("Out", framework::make_ddim(out_dim));
} }
......
...@@ -11450,6 +11450,9 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'): ...@@ -11450,6 +11450,9 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
""" """
helper = LayerHelper('gaussian_random', **locals()) helper = LayerHelper('gaussian_random', **locals())
check_type(shape, 'shape', (list, tuple), 'fluid.layers.gaussian_random')
check_dtype(dtype, 'dtype', ['float32', 'float64'],
'fluid.layers.gaussian_random')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op( helper.append_op(
...@@ -11543,6 +11546,12 @@ def gaussian_random_batch_size_like(input, ...@@ -11543,6 +11546,12 @@ def gaussian_random_batch_size_like(input,
""" """
helper = LayerHelper('gaussian_random_batch_size_like', **locals()) helper = LayerHelper('gaussian_random_batch_size_like', **locals())
check_type(input, 'input', (Variable),
'fluid.layers.gaussian_random_batch_size_like')
check_type(shape, 'shape', (list, tuple),
'fluid.layers.gaussian_random_batch_size_like')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'],
'fluid.layers.gaussian_random_batch_size_like')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op( helper.append_op(
......
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
from .layer_function_generator import templatedoc from .layer_function_generator import templatedoc
from ..framework import Variable, in_dygraph_mode from ..framework import Variable, in_dygraph_mode
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
__all__ = [ __all__ = [
'sequence_conv', 'sequence_conv',
...@@ -405,6 +406,16 @@ def sequence_concat(input, name=None): ...@@ -405,6 +406,16 @@ def sequence_concat(input, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_concat', **locals()) helper = LayerHelper('sequence_concat', **locals())
check_type(input, 'input', list, 'fluid.layers.sequence_concat')
if isinstance(input, list):
for i, input_x in enumerate(input):
check_type(input_x, 'input[' + str(i) + ']', Variable,
'fluid.layers.sequence_concat')
check_dtype(input_x.dtype, 'input[' + str(i) + ']',
['int64', 'float32', 'float64'],
'fluid.layers.sequence_concat')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]}) type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]})
...@@ -926,6 +937,11 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): ...@@ -926,6 +937,11 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pad', input=x, **locals()) helper = LayerHelper('sequence_pad', input=x, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
check_variable_and_dtype(pad_value, 'pad_value',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
length = helper.create_variable_for_type_inference(dtype) length = helper.create_variable_for_type_inference(dtype)
...@@ -1001,6 +1017,10 @@ def sequence_unpad(x, length, name=None): ...@@ -1001,6 +1017,10 @@ def sequence_unpad(x, length, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_unpad', input=x, **locals()) helper = LayerHelper('sequence_unpad', input=x, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_unpad')
check_variable_and_dtype(length, 'length', ['int64'],
'fluid.layers.sequence_unpad')
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -1062,6 +1082,9 @@ def sequence_reshape(input, new_dim): ...@@ -1062,6 +1082,9 @@ def sequence_reshape(input, new_dim):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_reshape', **locals()) helper = LayerHelper('sequence_reshape', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_reshape')
out = helper.create_variable_for_type_inference(helper.input_dtype()) out = helper.create_variable_for_type_inference(helper.input_dtype())
helper.append_op( helper.append_op(
type='sequence_reshape', type='sequence_reshape',
...@@ -1334,6 +1357,9 @@ def sequence_reverse(x, name=None): ...@@ -1334,6 +1357,9 @@ def sequence_reverse(x, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals()) helper = LayerHelper("sequence_reverse", **locals())
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'int8', 'int32', 'int64'],
'fluid.layers.sequence_reverse')
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
......
...@@ -20,6 +20,8 @@ import sys ...@@ -20,6 +20,8 @@ import sys
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
from paddle import fluid
class TestSequenceConcat(OpTest): class TestSequenceConcat(OpTest):
def setLoD(self): def setLoD(self):
...@@ -76,5 +78,41 @@ class TestSequenceConcatCase5(TestSequenceConcat): ...@@ -76,5 +78,41 @@ class TestSequenceConcatCase5(TestSequenceConcat):
self.out_lod = [20, 10] self.out_lod = [20, 10]
class TestSequenceConcatOpError(unittest.TestCase):
def test_errors(self):
def test_input_list():
# the input type must be list
x_data = fluid.layers.data(name='x', shape=[4], dtype='float32')
fluid.layers.sequence_concat(input=x_data)
self.assertRaises(TypeError, test_input_list)
def test_variable1():
# the input element type must be Variable
x1_data = np.array([[3, 5]]).astype('float32')
y1_data = fluid.layers.data(name='y1', shape=[4], dtype='float32')
fluid.layers.sequence_concat(input=[x1_data, y1_data])
def test_variable2():
x2_data = np.array([[3, 5]]).astype('float32')
y2_data = fluid.layers.data(name='y2', shape=[4], dtype='float32')
fluid.layers.sequence_concat(input=[y2_data, x2_data])
for i in range(2):
if i == 0:
self.assertRaises(TypeError, test_variable1)
else:
self.assertRaises(TypeError, test_variable2)
def test_dtype():
# dtype must be 'float32', 'float64', 'int64'
x3_data = fluid.layers.data(name="x3", shape=[3, 5], dtype='int32')
y3_data = fluid.layers.data(name="y3", shape=[3, 5], dtype='int16')
input_list = [x3_data, y3_data]
fluid.layers.sequence_concat(input=input_list)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,6 +18,8 @@ import sys ...@@ -18,6 +18,8 @@ import sys
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestSequencePadOp(OpTest): class TestSequencePadOp(OpTest):
def set_attr(self): def set_attr(self):
...@@ -143,5 +145,34 @@ class TestSequencePadOp8(TestSequencePadOp): ...@@ -143,5 +145,34 @@ class TestSequencePadOp8(TestSequencePadOp):
self.dtype = 'float64' self.dtype = 'float64'
class TestSequencePadOpError(unittest.TestCase):
def test_error(self):
def test_x_variable():
# the input x type must be Variable
x = np.random.random((2, 4)).astype("float32")
pad_value = fluid.layers.assign(input=np.array(
[0.0], dtype=np.float32))
fluid.layers.sequence_pad(x=x, pad_value=pad_value)
self.assertRaises(TypeError, test_x_variable)
def test_pad_value_variable():
x1 = fluid.layers.data(
name='x1', shape=[10, 5], dtype='float32', lod_level=1)
pad_value1 = np.array([0.0], dtype=np.float32)
fluid.layers.sequence_pad(x=x1, pad_value=pad_value1)
self.assertRaises(TypeError, test_pad_value_variable)
def test_dtype():
x2 = fluid.layers.data(
name='x2', shape=[10, 5], dtype='int16', lod_level=1)
pad_value2 = fluid.layers.assign(input=np.array(
[0.0], dtype=np.int32))
fluid.layers.sequence_pad(x=x2, pad_value=pad_value2)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -21,6 +21,8 @@ import sys ...@@ -21,6 +21,8 @@ import sys
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestSequenceReshape(OpTest): class TestSequenceReshape(OpTest):
def init_data(self): def init_data(self):
...@@ -83,5 +85,25 @@ class TestSequenceReshape_reduce_seq_len0_case1(TestSequenceReshape): ...@@ -83,5 +85,25 @@ class TestSequenceReshape_reduce_seq_len0_case1(TestSequenceReshape):
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float64') self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float64')
class TestSequenceReshapeOpError(unittest.TestCase):
def test_error(self):
def test_variable():
x = np.random.random((2, 4)).astype("float32")
fluid.layers.sequence_reshape(x=x, new_dim=4)
self.assertRaises(TypeError, test_variable)
def test_dtype():
x1 = fluid.layers.data(
name='x1',
shape=[2, 6],
append_batch_size=False,
dtype='float16',
lod_level=1)
fluid.layers.sequence_reshape(x=x1, new_dim=4)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -79,5 +79,22 @@ class TestSequenceReverse3(TestSequenceReverseBase): ...@@ -79,5 +79,22 @@ class TestSequenceReverse3(TestSequenceReverseBase):
self.lod = [0, 2, 10, 0] self.lod = [0, 2, 10, 0]
class TestSequenceReverseOpError(unittest.TestCase):
def test_error(self):
def test_variable():
# the input type must be Variable
x_data = np.random.random((2, 4)).astype("float32")
fluid.layers.sequence_reverse(x=x_data)
self.assertRaises(TypeError, test_variable)
def test_dtype():
# dtype must be 'float32', 'float64', 'int8', 'int32', 'int64'
x2_data = fluid.layers.data(name='x2', shape=[4], dtype='float16')
fluid.layers.sequence_reverse(x=x2_data)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,6 +19,8 @@ import sys ...@@ -19,6 +19,8 @@ import sys
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestSequenceUnpadOp(OpTest): class TestSequenceUnpadOp(OpTest):
def init(self): def init(self):
...@@ -84,5 +86,36 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp): ...@@ -84,5 +86,36 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp):
self.dtype = "float64" self.dtype = "float64"
class TestSequenceUnpadOpError(unittest.TestCase):
def test_error(self):
def test_x_variable():
x = np.random.random((10, 5)).astype("float64")
len = fluid.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_pad(x=x, length=len)
self.assertRaises(TypeError, test_x_variable)
def test_length_variable():
x1 = fluid.data(name='x1', shape=[10, 5], dtype='float32')
len1 = np.random.random((10)).astype("int64")
fluid.layers.sequence_pad(x=x1, length=len1)
self.assertRaises(TypeError, test_length_variable)
def test_x_dtype():
x2 = fluid.data(name='x2', shape=[10, 5], dtype='float16')
len2 = fluid.data(name='length2', shape=[10], dtype='int64')
fluid.layers.sequence_pad(x=x2, length=len2)
self.assertRaises(TypeError, test_x_dtype)
def test_length_dtype():
x3 = fluid.data(name='x3', shape=[10, 5], dtype='float64')
len3 = fluid.data(name='length3', shape=[10], dtype='int32')
fluid.layers.sequence_pad(x=x3, length=len3)
self.assertRaises(TypeError, test_length_dtype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册