未验证 提交 ebb36974 编写于 作者: C Chen Weihang 提交者: GitHub

[Cherry-pick] Aome SL Api/Op error msg polish (#24495)

* API/OP (Some SL API) error message enhancement  (#24441)

* polish some sl api error message, test=develop

* polish python input check of stride slice, test=develop

* fix unittest bugs, test=develop

* fix error info for transpose sequence_conv_pool max_sequence_len sequ… (#24437)

* fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop

* fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop

* update modify, test=develop

* update modify, test=develop

* fixed some modifications, test=develop
Co-authored-by: NXing Wu <wuxing03@baidu.com>
上级 7d0e9034
...@@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { ...@@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker {
class MaxSeqenceLenInferShape : public framework::InferShapeBase { class MaxSeqenceLenInferShape : public framework::InferShapeBase {
public: public:
void operator()(framework::InferShapeContext *context) const override { void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("RankTable")); OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable",
"MaxSeqenceLen");
context->SetOutputDim("Out", {1}); context->SetOutputDim("Out", {1});
} }
}; };
......
...@@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel { ...@@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase");
"Input(X) of SequenceErase operator should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceErase operator should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1, PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1,
"Input(X) of SequenceEraseOp should be a 2-D LoDTensor " platform::errors::InvalidArgument(
"with the 2nd dimension equal to 1."); "Input(X) of SequenceEraseOp should be a 2-D LoDTensor "
"with the 2nd dimension equal to 1,"
"but received size %d with the 2nd dimension %d.",
x_dims.size(), x_dims[1]));
ctx->SetOutputDim("Out", x_dims); ctx->SetOutputDim("Out", x_dims);
// The output LoDTensor's lod_level should be input X's lod_level. // The output LoDTensor's lod_level should be input X's lod_level.
// For compile-time, we call SetLoDLevel to set output's lod_level. // For compile-time, we call SetLoDLevel to set output's lod_level.
......
...@@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> { ...@@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
auto* out = ctx.Output<LoDTensor>("Out"); auto* out = ctx.Output<LoDTensor>("Out");
auto lod = in->lod(); auto lod = in->lod();
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), PADDLE_ENFORCE_EQ(
"The actual size mismatches with the LoD information."); lod[lod.size() - 1].back(), (size_t)in->numel(),
platform::errors::InvalidArgument(
"The actual size mismatches with the LoD information."));
auto tokens = ctx.Attr<std::vector<int>>("tokens"); auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel(); auto in_len = in->numel();
auto in_dat = in->data<T>(); auto in_dat = in->data<T>();
......
...@@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel<T> { ...@@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel<T> {
auto lod = in->lod(); auto lod = in->lod();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
lod.empty(), false, lod.empty(), false,
"Input(X) Tensor of SequenceEraseOp does not contain LoD information."); platform::errors::InvalidArgument("Input(X) Tensor of SequenceEraseOp "
"does not contain LoD information."));
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(),
"The actual size mismatches with the LoD information."); platform::errors::InvalidArgument(
"The actual input size %d mismatches with the LoD "
"information size %d.",
lod[lod.size() - 1].back(), (size_t)in->numel()));
auto tokens = ctx.Attr<std::vector<int>>("tokens"); auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel(); auto in_len = in->numel();
auto in_dat = in->data<T>(); auto in_dat = in->data<T>();
......
...@@ -74,23 +74,25 @@ class SequenceScatterOp : public framework::OperatorWithKernel { ...@@ -74,23 +74,25 @@ class SequenceScatterOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
// Enforce has inputs and outputs // Enforce has inputs and outputs
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceScatter");
"Input(X) of SequenceScatterOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Ids"), "Input", "Ids", "SequenceScatter");
PADDLE_ENFORCE(ctx->HasInput("Ids"), OP_INOUT_CHECK(ctx->HasInput("Updates"), "Input", "Updates",
"Input(Ids) of SequenceScatterOp should not be null."); "SequenceScatter");
PADDLE_ENFORCE(ctx->HasInput("Updates"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceScatter");
"Input(Updates) of SequenceScatterOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceScatterOp should not be null.");
// Set output dim the same as input // Set output dim the same as input
auto ref_dims = ctx->GetInputDim("X"); auto ref_dims = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", ref_dims); ctx->SetOutputDim("Out", ref_dims);
// Enforce the Updates and Ids are the same shape // Enforce the Updates and Ids are the same shape
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Updates")[0], auto updates_dim = ctx->GetInputDim("Updates");
ctx->GetInputDim("Ids")[0], auto ids_dim = ctx->GetInputDim("Ids");
"Updates and Ids should have same shape."); PADDLE_ENFORCE_EQ(
updates_dim[0], ids_dim[0],
platform::errors::InvalidArgument(
"The shape of SequenceScatter operator's input Updates and Ids do "
"not match, receive Updates's shape is [%s], Ids's shape is [%s].",
updates_dim, ids_dim));
// Enforce LoD of ids and updates be the same // Enforce LoD of ids and updates be the same
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
...@@ -101,12 +103,21 @@ class SequenceScatterOp : public framework::OperatorWithKernel { ...@@ -101,12 +103,21 @@ class SequenceScatterOp : public framework::OperatorWithKernel {
auto& ids_lod = ids_var->Get<LoDTensor>().lod(); auto& ids_lod = ids_var->Get<LoDTensor>().lod();
auto& updates_lod = updates_var->Get<LoDTensor>().lod(); auto& updates_lod = updates_var->Get<LoDTensor>().lod();
PADDLE_ENFORCE_EQ(ids_lod.size(), 1, PADDLE_ENFORCE_EQ(
"Currently only level 1 LoD could be" ids_lod.size(), 1,
" processed by sequence scatter op."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(updates_lod.size(), 1, "The SequenceScatter operator’s Input Ids holds wrong LoD "
"Currently only level 1 LoD " "information. Currently SequenceScatter operator can only deal "
"could be processed by sequence scatter op."); "with one level LoD for input Ids, but received LoD level is %d.",
ids_lod.size()));
PADDLE_ENFORCE_EQ(
updates_lod.size(), 1,
platform::errors::InvalidArgument(
"The SequenceScatter operator’s Input Updates holds wrong LoD "
"information. Currently SequenceScatter operator can only deal "
"with one level LoD for input Updates, but received LoD level is "
"%d.",
ids_lod.size()));
} }
} }
......
...@@ -35,8 +35,9 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> { ...@@ -35,8 +35,9 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> {
auto& ids_lod = ids->lod(); auto& ids_lod = ids->lod();
PADDLE_ENFORCE_EQ(ids_lod.empty(), false, PADDLE_ENFORCE_EQ(ids_lod.empty(), false,
"Input(Ids) Tensor of SequenceScatterOp does not contain " platform::errors::InvalidArgument(
"LoD information."); "Input(Ids) Tensor of SequenceScatter operator does "
"not contain LoD information."));
// Initialize out as same as x // Initialize out as same as x
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
...@@ -46,9 +47,12 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> { ...@@ -46,9 +47,12 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> {
auto out_dims = out->dims(); auto out_dims = out->dims();
for (int i = 0; i < x_dims.size(); ++i) for (int i = 0; i < x_dims.size(); ++i)
PADDLE_ENFORCE(x_dims[i] == out_dims[i], PADDLE_ENFORCE_EQ(x_dims[i], out_dims[i],
"Input and output shape of " platform::errors::InvalidArgument(
"sequence scatter op must exactly be the same."); "Input(X) and output(Out) shape of SequenceScatter "
"operator do not match. Received input(X)'s shape "
"is [%s], output(Out)'s shape is [%s].",
x_dims, out_dims));
size_t slice_size = 1; size_t slice_size = 1;
for (int i = 1; i < x_dims.size(); ++i) slice_size *= x_dims[i]; for (int i = 1; i < x_dims.size(); ++i) slice_size *= x_dims[i];
...@@ -56,8 +60,13 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> { ...@@ -56,8 +60,13 @@ class SequenceScatterOpKernel : public framework::OpKernel<T> {
auto lod_vec = ids_lod[0]; auto lod_vec = ids_lod[0];
unsigned int seg = 0; unsigned int seg = 0;
for (int i = 0; i < ids->dims()[0]; ++i) { for (int i = 0; i < ids->dims()[0]; ++i) {
PADDLE_ENFORCE_LT(seg, lod_vec.size() - 1, PADDLE_ENFORCE_LT(
"Segment num must not exceed batch size.\n"); seg, lod_vec.size() - 1,
platform::errors::OutOfRange("The segment index is out of bound in "
"SequenceScatter operator, it must be "
"less than batch size. The segment "
"index is %d, the batch size is %d.",
seg, lod_vec.size()));
int lower_bound = lod_vec[seg]; int lower_bound = lod_vec[seg];
int upper_bound = lod_vec[seg + 1]; int upper_bound = lod_vec[seg + 1];
if (i >= lower_bound && i < upper_bound) { if (i >= lower_bound && i < upper_bound) {
...@@ -77,8 +86,11 @@ template <typename T> ...@@ -77,8 +86,11 @@ template <typename T>
class SequenceScatterGradientOpKernel : public framework::OpKernel<T> { class SequenceScatterGradientOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), PADDLE_ENFORCE_EQ(
"This kernel only runs on CPU."); platform::is_cpu_place(ctx.GetPlace()), true,
platform::errors::Unimplemented("Device dose not match. The "
"SequenceScatterGradientOpKernel can "
"only run on CPU device."));
auto* dX = ctx.Output<Tensor>(framework::GradVarName("X")); auto* dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dUpdates = ctx.Output<LoDTensor>(framework::GradVarName("Updates")); auto* dUpdates = ctx.Output<LoDTensor>(framework::GradVarName("Updates"));
auto* ids = ctx.Input<LoDTensor>("Ids"); auto* ids = ctx.Input<LoDTensor>("Ids");
...@@ -94,9 +106,13 @@ class SequenceScatterGradientOpKernel : public framework::OpKernel<T> { ...@@ -94,9 +106,13 @@ class SequenceScatterGradientOpKernel : public framework::OpKernel<T> {
auto dout_dims = dOut->dims(); auto dout_dims = dOut->dims();
for (int i = 0; i < dx_dims.size(); ++i) for (int i = 0; i < dx_dims.size(); ++i)
PADDLE_ENFORCE(dx_dims[i] == dout_dims[i], PADDLE_ENFORCE_EQ(dx_dims[i], dout_dims[i],
"Input and output shape of " platform::errors::InvalidArgument(
"sequence scatter grad op must exactly be the same."); "Input(Out@GRAD) and output(X@GRAD) shape of "
"SequenceScatterGradient operator do not match. "
"Received input(Out@GRAD)'s shape is [%s], "
"output(X@GRAD)'s shape is [%s].",
dout_dims, dx_dims));
size_t slice_size = 1; size_t slice_size = 1;
for (int i = 1; i < dx_dims.size(); ++i) slice_size *= dx_dims[i]; for (int i = 1; i < dx_dims.size(); ++i) slice_size *= dx_dims[i];
...@@ -105,8 +121,13 @@ class SequenceScatterGradientOpKernel : public framework::OpKernel<T> { ...@@ -105,8 +121,13 @@ class SequenceScatterGradientOpKernel : public framework::OpKernel<T> {
unsigned int seg = 0; unsigned int seg = 0;
for (int i = 0; i < ids->dims()[0]; ++i) { for (int i = 0; i < ids->dims()[0]; ++i) {
PADDLE_ENFORCE_LT(seg, lod_vec.size() - 1, PADDLE_ENFORCE_LT(
"Segment num must not exceed batch size.\n"); seg, lod_vec.size() - 1,
platform::errors::OutOfRange(
"The segment index is out of bound in SequenceScatterGradient "
"operator, it must be less than batch size. The segment index is "
"%d, the batch size is %d.",
seg, lod_vec.size()));
int lower_bound = lod_vec[seg]; int lower_bound = lod_vec[seg];
int upper_bound = lod_vec[seg + 1]; int upper_bound = lod_vec[seg + 1];
if (i >= lower_bound && i < upper_bound) { if (i >= lower_bound && i < upper_bound) {
......
...@@ -23,14 +23,10 @@ class SequenceSliceOp : public framework::OperatorWithKernel { ...@@ -23,14 +23,10 @@ class SequenceSliceOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSlice");
"Input(X) of SequenceSliceOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Offset"), "Input", "Offset", "SequenceSlice");
PADDLE_ENFORCE(ctx->HasInput("Offset"), OP_INOUT_CHECK(ctx->HasInput("Length"), "Input", "Length", "SequenceSlice");
"Input(Offset) of SequenceSliceOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceSlice");
PADDLE_ENFORCE(ctx->HasInput("Length"),
"Input(Length) of SequenceSliceOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceSliceOp should not be null.");
auto input_dims = ctx->GetInputDim("X"); auto input_dims = ctx->GetInputDim("X");
auto offset_dim = ctx->GetInputDim("Offset"); auto offset_dim = ctx->GetInputDim("Offset");
...@@ -38,10 +34,18 @@ class SequenceSliceOp : public framework::OperatorWithKernel { ...@@ -38,10 +34,18 @@ class SequenceSliceOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
offset_dim.size(), 2UL, offset_dim.size(), 2UL,
"Only support one level sequence now, The rank of offset must be 2."); platform::errors::InvalidArgument(
"Input Offset dimension error. SequenceSlice operator only support "
"one level sequence now, the dimension of input Offset must be 2, "
"but received dimension is %d.",
offset_dim.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
length_dim.size(), 2UL, length_dim.size(), 2UL,
"Only support one level sequence now, The rank of Length must be 2."); platform::errors::InvalidArgument(
"Input Length dimension error. SequenceSlice operator only support "
"one level sequence now, the dimension of input Length must be 2, "
"but received dimension is %d.",
offset_dim.size()));
// Initialize the output's dims to maximum, // Initialize the output's dims to maximum,
// and re-set to real dims by the value of Offset and Length at kernel // and re-set to real dims by the value of Offset and Length at kernel
...@@ -62,10 +66,10 @@ class SequenceSliceGradOp : public framework::OperatorWithKernel { ...@@ -62,10 +66,10 @@ class SequenceSliceGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"The gradient of Out should not be null."); framework::GradVarName("Out"), "SequenceSliceGrad");
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")), OP_INOUT_CHECK(ctx->HasOutputs(framework::GradVarName("X")), "Output",
"The gradient of X should not be null."); framework::GradVarName("X"), "SequenceSliceGrad");
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
} }
......
...@@ -49,18 +49,32 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> { ...@@ -49,18 +49,32 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> {
auto* out = ctx.Output<LoDTensor>("Out"); auto* out = ctx.Output<LoDTensor>("Out");
auto lod = in->lod(); auto lod = in->lod();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(lod.empty(), false,
lod.empty(), false, platform::errors::InvalidArgument(
"Input(X) Tensor of SequenceSliceOp does not contain LoD information."); "Input(X) Tensor of SequenceSlice operator does not "
"contain LoD information."));
PADDLE_ENFORCE_EQ(
lod.size(), 1UL,
platform::errors::InvalidArgument(
"LoD information error. SequenceSlice operator only support one "
"level sequence now, but received LoD level is %d.",
lod.size()));
auto n = lod[0].size() - 1; auto n = lod[0].size() - 1;
PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now.");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
n, static_cast<size_t>(length->dims()[0]), n, static_cast<size_t>(length->dims()[0]),
"The size of input-sequence and length-array should be the same"); platform::errors::InvalidArgument(
"Input length shape error. The length of input LoD sequence and "
"input length-array‘s first dimension should be equal, but the LoD "
"sequence length is %d, the length-array‘s first dimension is %d.",
n, static_cast<size_t>(length->dims()[0])));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
n, static_cast<size_t>(offset->dims()[0]), n, static_cast<size_t>(offset->dims()[0]),
"The size of input-sequence and offset-array should be the same"); platform::errors::InvalidArgument(
"Input offset shape error. The length of input LoD sequence and "
"input offset-array‘s first dimension should be equal, but the LoD "
"sequence length is %d, the offset-array‘s first dimension is %d.",
n, static_cast<size_t>(offset->dims()[0])));
const int64_t* offset_data = offset->data<int64_t>(); const int64_t* offset_data = offset->data<int64_t>();
const int64_t* length_data = length->data<int64_t>(); const int64_t* length_data = length->data<int64_t>();
...@@ -79,11 +93,21 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> { ...@@ -79,11 +93,21 @@ class SequenceSliceOpKernel : public framework::OpKernel<T> {
for (size_t i = 0; i < n; ++i) { for (size_t i = 0; i < n; ++i) {
PADDLE_ENFORCE_LE(0, offset_data[i], PADDLE_ENFORCE_LE(0, offset_data[i],
"The offset[%d] must be nonnegative.", i); platform::errors::InvalidArgument(
"The input offset[%d]'s value is negative, its "
"value is %d, expect it to be non-negative.",
i, offset_data[i]));
PADDLE_ENFORCE_LE(0, length_data[i], PADDLE_ENFORCE_LE(0, length_data[i],
"The length[%d] must be nonnegative.", i); platform::errors::InvalidArgument(
PADDLE_ENFORCE_LE(lod[0][i] + offset_data[i] + length_data[i], "The input length[%d]'s value is negative, its "
lod[0][i + 1], "The target tensor's length overflow."); "value is %d, expect it to be non-negative.",
i, offset_data[i]));
PADDLE_ENFORCE_LE(
lod[0][i] + offset_data[i] + length_data[i], lod[0][i + 1],
platform::errors::OutOfRange(
"The slice end index of target tensor is out of range. expect it "
"less than or equal to %d, but the actual slice end index is %d.",
lod[0][i + 1], lod[0][i] + offset_data[i] + length_data[i]));
} }
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
......
...@@ -23,10 +23,8 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { ...@@ -23,10 +23,8 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSoftmax");
"Input(X) of SequenceSoftmaxOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceSoftmax");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceSoftmaxOp should not be null.");
ctx->ShareDim("X", /*->*/ "Out"); ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out");
...@@ -108,21 +106,22 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel { ...@@ -108,21 +106,22 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Out"), OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "SequenceSoftmaxGrad");
"Input(Out) of SequenceSoftmaxGradOp should not be null."); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
PADDLE_ENFORCE( "Out@GRAD", "SequenceSoftmaxGrad");
ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSoftmaxGrad");
"Input(Out@GRAD) of SequenceSoftmaxGradOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
PADDLE_ENFORCE(ctx->HasInput("X"), "X@GRAD", "SequenceSoftmaxGrad");
"Input(X) of SequenceSoftmaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), auto out_dim = ctx->GetInputDim("Out");
"Output(X@GRAD) of SequenceSoftmaxOp should not be null."); auto out_grad_dim = ctx->GetInputDim(framework::GradVarName("Out"));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Out"), out_dim, out_grad_dim,
ctx->GetInputDim(framework::GradVarName("Out")), platform::errors::InvalidArgument(
"Input(Out) and Input(Out@GRAD) of SequenceSoftmaxGradOp should be of " "The shape of Input(Out) and Input(Out@GRAD) of "
"the same shape."); "SequenceSoftmaxGrad operator do not match. The Input(Out)'s shape "
"is [%s], the Input(Out@GRAD)'s shape is [%s].",
out_dim, out_grad_dim));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
} }
......
...@@ -95,20 +95,27 @@ class SequenceSoftmaxKernel : public framework::OpKernel<T> { ...@@ -95,20 +95,27 @@ class SequenceSoftmaxKernel : public framework::OpKernel<T> {
auto lod = x->lod(); auto lod = x->lod();
auto dims = x->dims(); auto dims = x->dims();
PADDLE_ENFORCE_EQ(lod.empty(), false, PADDLE_ENFORCE_EQ(
"Input(X) Tensor of SequenceSoftmaxOp does not contain " lod.empty(), false,
"LoD information."); platform::errors::InvalidArgument(
"Input(X) Tensor of SequenceSoftmax operator does not contain "
"LoD information."));
const size_t level = lod.size() - 1; const size_t level = lod.size() - 1;
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_EQ(
lod.size(), 0U, dims[0], static_cast<int64_t>(lod[level].back()),
"The LoD level of Input X should be larger than 0 (lod.size() > 0)."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(dims[0], static_cast<int64_t>(lod[level].back()), "The first dimension of Input(X) should be equal to the sum of all "
"The first dimension of Input(X) should be equal to the " "sequences' lengths. But the first dimension of Input(X) is %d, "
"sum of all sequences' lengths."); "the sum of all sequences' lengths is %d.",
PADDLE_ENFORCE_EQ(dims[0], x->numel(), dims[0], static_cast<int64_t>(lod[level].back())));
"The width of each timestep in Input(X) of " PADDLE_ENFORCE_EQ(
"SequenceSoftmaxOp should be 1."); dims[0], x->numel(),
platform::errors::InvalidArgument(
"The width of each timestep in Input(X) of SequenceSoftmax "
"operator should be 1. But the first dimension of Input(X) is %d, "
"the number of elements is %d.",
dims[0], x->numel()));
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
......
...@@ -29,14 +29,16 @@ class StridedSliceOp : public framework::OperatorWithKernel { ...@@ -29,14 +29,16 @@ class StridedSliceOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "StridedSlice");
"Input (Input) of slice op should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "StridedSlice");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output (Out) of slice op should not be null.");
auto in_dims = ctx->GetInputDim("Input"); auto in_dims = ctx->GetInputDim("Input");
PADDLE_ENFORCE_LT(in_dims.size(), 7, PADDLE_ENFORCE_LT(
"The rank of input should be less than 7."); in_dims.size(), 7,
platform::errors::InvalidArgument(
"The dimension of StridedSlice operator's input should be less "
"than 7, but received dimension is %d.",
in_dims.size()));
auto starts = ctx->Attrs().Get<std::vector<int>>("starts"); auto starts = ctx->Attrs().Get<std::vector<int>>("starts");
auto ends = ctx->Attrs().Get<std::vector<int>>("ends"); auto ends = ctx->Attrs().Get<std::vector<int>>("ends");
auto strides = ctx->Attrs().Get<std::vector<int>>("strides"); auto strides = ctx->Attrs().Get<std::vector<int>>("strides");
...@@ -50,20 +52,26 @@ class StridedSliceOp : public framework::OperatorWithKernel { ...@@ -50,20 +52,26 @@ class StridedSliceOp : public framework::OperatorWithKernel {
if (ctx->HasInputs("StartsTensorList")) { if (ctx->HasInputs("StartsTensorList")) {
auto StartsTensorList = ctx->Inputs("StartsTensorList"); auto StartsTensorList = ctx->Inputs("StartsTensorList");
PADDLE_ENFORCE_GT(StartsTensorList.size(), 0, PADDLE_ENFORCE_GT(
"StartsTensorList size can't be zero"); StartsTensorList.size(), 0,
platform::errors::InvalidArgument(
"StridedSlice operator's StartsTensorList is empty."));
starts_size = StartsTensorList.size(); starts_size = StartsTensorList.size();
} }
if (ctx->HasInputs("EndsTensorList")) { if (ctx->HasInputs("EndsTensorList")) {
auto EndsTensorList = ctx->Inputs("EndsTensorList"); auto EndsTensorList = ctx->Inputs("EndsTensorList");
PADDLE_ENFORCE_GT(EndsTensorList.size(), 0, PADDLE_ENFORCE_GT(
"EndsTensorList size can't be zero"); EndsTensorList.size(), 0,
platform::errors::InvalidArgument(
"StridedSlice operator's EndsTensorList is empty."));
ends_size = EndsTensorList.size(); ends_size = EndsTensorList.size();
} }
if (ctx->HasInputs("StridesTensorList")) { if (ctx->HasInputs("StridesTensorList")) {
auto StridesTensorList = ctx->Inputs("StridesTensorList"); auto StridesTensorList = ctx->Inputs("StridesTensorList");
PADDLE_ENFORCE_GT(StridesTensorList.size(), 0, PADDLE_ENFORCE_GT(
"StridesTensorList size can't be zero"); StridesTensorList.size(), 0,
platform::errors::InvalidArgument(
"StridedSlice operator's StridesTensorList is empty."));
strides_size = StridesTensorList.size(); strides_size = StridesTensorList.size();
} }
...@@ -73,18 +81,31 @@ class StridedSliceOp : public framework::OperatorWithKernel { ...@@ -73,18 +81,31 @@ class StridedSliceOp : public framework::OperatorWithKernel {
tensor_input = true; tensor_input = true;
} }
if (!ctx->HasInput("EndsTensor")) { if (!ctx->HasInput("EndsTensor")) {
PADDLE_ENFORCE_EQ(ends_size, axes.size(), PADDLE_ENFORCE_EQ(
"The size of ends must be equal to the size of axes."); ends_size, axes.size(),
platform::errors::InvalidArgument(
"The size of ends attribute in StridedSlice operator is not "
"equal to the size of axes attribute. The ends attribute's size "
"is %d, axes attribute's size is %d.",
ends_size, axes.size()));
} }
if (!ctx->HasInput("StartsTensor")) { if (!ctx->HasInput("StartsTensor")) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
starts_size, axes.size(), starts_size, axes.size(),
"The size of starts must be equal to the size of axes."); platform::errors::InvalidArgument(
"The size of starts attribute in StridedSlice operator is not "
"equal to the size of axes attribute. The starts attribute's "
"size is %d, axes attribute's size is %d.",
starts_size, axes.size()));
} }
if (!ctx->HasInput("StridesTensor")) { if (!ctx->HasInput("StridesTensor")) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
strides_size, axes.size(), strides_size, axes.size(),
"The size of strides must be equal to the size of axes."); platform::errors::InvalidArgument(
"The size of strides attribute in StridedSlice operator is not "
"equal to the size of axes attribute. The strides attribute's "
"size is %d, axes attribute's size is %d.",
strides_size, axes.size()));
} }
// we need to analysis strided slice op is valid for // we need to analysis strided slice op is valid for
// the parameter that we get from python front // the parameter that we get from python front
...@@ -101,7 +122,10 @@ class StridedSliceOp : public framework::OperatorWithKernel { ...@@ -101,7 +122,10 @@ class StridedSliceOp : public framework::OperatorWithKernel {
for (size_t i = 0; i < decrease_axis.size(); ++i) { for (size_t i = 0; i < decrease_axis.size(); ++i) {
if (ctx->IsRuntime() && infer_flags[i] != -1) { if (ctx->IsRuntime() && infer_flags[i] != -1) {
PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1, PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1,
"decrease dim should be 1"); platform::errors::InvalidArgument(
"the size of decrease dimension should be 1, "
"but received %d.",
out_dims[decrease_axis[i]]));
} }
out_dims[decrease_axis[i]] = 0; out_dims[decrease_axis[i]] = 0;
} }
...@@ -219,9 +243,11 @@ class StridedSliceOpGrad : public framework::OperatorWithKernel { ...@@ -219,9 +243,11 @@ class StridedSliceOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, "Input should not be null"); OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input",
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, "StridedSliceGrad");
"Input(Out@GRAD) should not be null"); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out@GRAD", "StridedSliceGrad");
auto x_dims = ctx->GetInputDim("Input"); auto x_dims = ctx->GetInputDim("Input");
auto x_grad_name = framework::GradVarName("Input"); auto x_grad_name = framework::GradVarName("Input");
if (ctx->HasOutput(x_grad_name)) { if (ctx->HasOutput(x_grad_name)) {
......
...@@ -54,7 +54,9 @@ static void StridedSliceOutDims( ...@@ -54,7 +54,9 @@ static void StridedSliceOutDims(
continue; continue;
} }
PADDLE_ENFORCE_NE(stride_index, 0, "stride must not to be zero"); PADDLE_ENFORCE_NE(stride_index, 0,
platform::errors::InvalidArgument(
"stride index in StridedSlice operator is 0."));
int axis_size = in_dims[axes_index]; int axis_size = in_dims[axes_index];
if (axis_size < 0) { if (axis_size < 0) {
continue; continue;
...@@ -78,8 +80,9 @@ static void StridedSliceOutDims( ...@@ -78,8 +80,9 @@ static void StridedSliceOutDims(
((stride_index < 0 && (start_index <= end_index)) || ((stride_index < 0 && (start_index <= end_index)) ||
(stride_index > 0 && (start_index >= end_index))); (stride_index > 0 && (start_index >= end_index)));
PADDLE_ENFORCE_EQ(zero_dim_condition, false, PADDLE_ENFORCE_EQ(zero_dim_condition, false,
"starts and end must meet requirement in different " platform::errors::InvalidArgument(
"stride conditiont"); "The start index and end index are invalid for their "
"corresponding stride."));
int left = std::max(0, std::min(start_index, end_index)); int left = std::max(0, std::min(start_index, end_index));
int right = std::min(axis_size, std::max(start_index, end_index)); int right = std::min(axis_size, std::max(start_index, end_index));
int step = std::abs(stride_index); int step = std::abs(stride_index);
...@@ -249,8 +252,11 @@ class StridedSliceKernel : public framework::OpKernel<T> { ...@@ -249,8 +252,11 @@ class StridedSliceKernel : public framework::OpKernel<T> {
if (decrease_axis.size() > 0) { if (decrease_axis.size() > 0) {
std::vector<int> new_out_shape; std::vector<int> new_out_shape;
for (size_t i = 0; i < decrease_axis.size(); ++i) { for (size_t i = 0; i < decrease_axis.size(); ++i) {
PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1, PADDLE_ENFORCE_EQ(
"decrease dim should be 1"); out_dims[decrease_axis[i]], 1,
platform::errors::InvalidArgument(
"the size of decrease dimension should be 1, but received %d.",
out_dims[decrease_axis[i]]));
out_dims_origin[decrease_axis[i]] = 0; out_dims_origin[decrease_axis[i]] = 0;
} }
......
...@@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel { ...@@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Transpose");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Transpose");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis"); std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis");
size_t x_rank = x_dims.size(); size_t x_rank = x_dims.size();
size_t axis_size = axis.size(); size_t axis_size = axis.size();
PADDLE_ENFORCE_EQ(x_rank, axis_size, PADDLE_ENFORCE_EQ(x_rank, axis_size,
"ShapeError: The input tensor's dimension " platform::errors::InvalidArgument(
"should be equal to the axis's size. " "The input tensor's dimension "
"But received input tensor's dimension is %d, " "should be equal to the axis's size. "
"axis's size is %d", "But received input tensor's dimension is %d, "
x_rank, axis_size); "axis's size is %d",
x_rank, axis_size));
std::vector<int> count(axis_size, 0); std::vector<int> count(axis_size, 0);
for (size_t i = 0; i < axis_size; i++) { for (size_t i = 0; i < axis_size; i++) {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1, axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1, true,
"ValueError: Each element of Attribute axis should " platform::errors::InvalidArgument(
"be a unique value range from 0 to (dims - 1), " "Each element of Attribute axis should "
"where the dims is the axis's size, " "be a unique value range from 0 to (dims - 1), "
"unique value means this axis value can appear only once. " "where the dims is the axis's size, "
"But received axis[%d] is %d, axis_size is %d, " "unique value means this axis value can appear only once. "
"count[axis[%d]] is %d", "But received axis[%d] is %d, axis_size is %d, "
i, axis[i], axis_size, i, count[axis[i]]); "count[axis[%d]] is %d",
i, axis[i], axis_size, i, count[axis[i]]));
} }
framework::DDim out_dims(x_dims); framework::DDim out_dims(x_dims);
...@@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel { ...@@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "TransposeOpGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) should not be null"); framework::GradVarName("Out"), "TransposeOpGrad");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
...@@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp { ...@@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp {
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
TransposeOp::InferShape(ctx); TransposeOp::InferShape(ctx);
PADDLE_ENFORCE(ctx->HasOutput("XShape"), OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Transpose2");
"Output(XShape) should not be null");
const auto &in_dims = ctx->GetInputDim("X"); const auto &in_dims = ctx->GetInputDim("X");
std::vector<int64_t> x_shape_dim(in_dims.size() + 1); std::vector<int64_t> x_shape_dim(in_dims.size() + 1);
x_shape_dim[0] = 0; x_shape_dim[0] = 0;
...@@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel { ...@@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null"); OP_INOUT_CHECK(ctx->HasInput("XShape"), "Input", "XShape",
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Transpose2OpGrad");
"Input(Out@GRAD) should not be null"); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "Transpose2OpGrad");
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
auto xshape_dim = ctx->GetInputDim("XShape"); auto xshape_dim = ctx->GetInputDim("XShape");
auto x_shape_dim = auto x_shape_dim =
......
...@@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx, ...@@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx,
trans6(dev_ctx, in, out, axis); trans6(dev_ctx, in, out, axis);
break; break;
default: default:
PADDLE_THROW("Tensors with rank at most 6 are supported"); PADDLE_THROW(platform::errors::InvalidArgument(
"Tensors with rank at most 6 are supported"
", but received input tensor's rank is %d,",
dim));
} }
} }
......
...@@ -11929,18 +11929,31 @@ def strided_slice(input, axes, starts, ends, strides): ...@@ -11929,18 +11929,31 @@ def strided_slice(input, axes, starts, ends, strides):
sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2) sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2]. # sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2].
""" """
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
"Input starts must be an Variable, python list or tuple.")
if not isinstance(ends, (list, tuple, Variable)):
raise ValueError(
"Input ends must be an Variable, python list or tuple.")
if not isinstance(strides, (list, tuple, Variable)):
raise ValueError(
"Input strides must be an Variable, python list or tuple.")
helper = LayerHelper('strided_slice', **locals()) helper = LayerHelper('strided_slice', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'strided_slice')
check_type(axes, 'axes', (list, tuple), 'strided_slice')
check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice')
check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice')
check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice')
def check_list_elements_dtype(list_input, input_name):
if isinstance(list_input, Variable):
check_dtype(list_input.dtype, input_name, ['int32'],
'strided_slice')
else:
for i, var in enumerate(list_input):
var_name = input_name + '[' + str(i) + ']'
if isinstance(var, Variable):
check_dtype(var.dtype, var_name, ['int32'], 'strided_slice')
check_list_elements_dtype(axes, 'axes')
check_list_elements_dtype(starts, 'starts')
check_list_elements_dtype(ends, 'ends')
check_list_elements_dtype(strides, 'strides')
def get_new_list_tensor(old_list): def get_new_list_tensor(old_list):
new_list_tensor = [] new_list_tensor = []
for dim in old_list: for dim in old_list:
......
...@@ -239,6 +239,8 @@ def sequence_softmax(input, use_cudnn=False, name=None): ...@@ -239,6 +239,8 @@ def sequence_softmax(input, use_cudnn=False, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_softmax', **locals()) helper = LayerHelper('sequence_softmax', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_softmax')
dtype = helper.input_dtype() dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype) softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
...@@ -560,10 +562,10 @@ def sequence_slice(input, offset, length, name=None): ...@@ -560,10 +562,10 @@ def sequence_slice(input, offset, length, name=None):
Args: Args:
input(Variable): LoDTensor, The input Variable which consists of the complete input(Variable): LoDTensor, The input Variable which consists of the complete
sequences.The data type is float32 or float64. sequences.The data type can be float32, float64, int32 or int64
offset(Variable): LoDTensor, The offset to slice each sequence.The data offset(Variable): LoDTensor, The offset to slice each sequence. The data
type is int32 or int64. type is int32 or int64.
length(Variable): LoDTensor, The length of each subsequence.The data length(Variable): LoDTensor, The length of each subsequence. The data
type is int32 or int64. type is int32 or int64.
name(str|None): The default value is None. Normally there is no need name(str|None): The default value is None. Normally there is no need
for user to set this property. For more information, for user to set this property. For more information,
...@@ -588,6 +590,15 @@ def sequence_slice(input, offset, length, name=None): ...@@ -588,6 +590,15 @@ def sequence_slice(input, offset, length, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_slice", **locals()) helper = LayerHelper("sequence_slice", **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'sequence_slice')
check_variable_and_dtype(offset, 'offset', ['int32', 'int64'],
'sequence_slice')
check_variable_and_dtype(length, 'length', ['int32', 'int64'],
'sequence_slice')
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
...@@ -1137,7 +1148,7 @@ def sequence_scatter(input, index, updates, name=None): ...@@ -1137,7 +1148,7 @@ def sequence_scatter(input, index, updates, name=None):
Args: Args:
input (Variable): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64. input (Variable): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64.
index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type must be int64. index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type can be int32 or int64.
updates (Variable): A LodTensor contains updates information. It has the same LoD level with the index and has the updates (Variable): A LodTensor contains updates information. It has the same LoD level with the index and has the
same data type with the input. Supported data types: float32, float64, int32, int64. same data type with the input. Supported data types: float32, float64, int32, int64.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information,
...@@ -1161,6 +1172,16 @@ def sequence_scatter(input, index, updates, name=None): ...@@ -1161,6 +1172,16 @@ def sequence_scatter(input, index, updates, name=None):
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.") "sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_scatter', **locals()) helper = LayerHelper('sequence_scatter', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'sequence_scatter')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'sequence_scatter')
check_variable_and_dtype(updates, 'updates',
['float32', 'float64', 'int32', 'int64'],
'sequence_scatter')
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
......
...@@ -305,6 +305,8 @@ def sequence_conv_pool(input, ...@@ -305,6 +305,8 @@ def sequence_conv_pool(input,
act="tanh", act="tanh",
pool_type="sqrt") pool_type="sqrt")
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input')
conv_out = layers.sequence_conv( conv_out = layers.sequence_conv(
input=input, input=input,
num_filters=num_filters, num_filters=num_filters,
......
...@@ -444,11 +444,11 @@ class TestStridedSliceAPI(unittest.TestCase): ...@@ -444,11 +444,11 @@ class TestStridedSliceAPI(unittest.TestCase):
minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_1 = fluid.layers.fill_constant([1], "int32", -1)
minus_3 = fluid.layers.fill_constant([1], "int32", -3) minus_3 = fluid.layers.fill_constant([1], "int32", -3)
starts = fluid.layers.data( starts = fluid.layers.data(
name='starts', shape=[3], append_batch_size=False) name='starts', shape=[3], dtype='int32', append_batch_size=False)
ends = fluid.layers.data( ends = fluid.layers.data(
name='ends', shape=[3], append_batch_size=False) name='ends', shape=[3], dtype='int32', append_batch_size=False)
strides = fluid.layers.data( strides = fluid.layers.data(
name='strides', shape=[3], append_batch_size=False) name='strides', shape=[3], dtype='int32', append_batch_size=False)
x = fluid.layers.data( x = fluid.layers.data(
name="x", name="x",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册