未验证 提交 479c47f3 编写于 作者: X Xing Wu 提交者: GitHub

fix error info for transpose sequence_conv_pool max_sequence_len sequ… (#24437)

* fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop

* fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop

* update modify, test=develop

* update modify, test=develop

* fixed some modifications, test=develop
上级 1c00732d
...@@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { ...@@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker {
class MaxSeqenceLenInferShape : public framework::InferShapeBase { class MaxSeqenceLenInferShape : public framework::InferShapeBase {
public: public:
void operator()(framework::InferShapeContext *context) const override { void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("RankTable")); OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable",
"MaxSeqenceLen");
context->SetOutputDim("Out", {1}); context->SetOutputDim("Out", {1});
} }
}; };
......
...@@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel { ...@@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase");
"Input(X) of SequenceErase operator should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceErase operator should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1, PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1,
platform::errors::InvalidArgument(
"Input(X) of SequenceEraseOp should be a 2-D LoDTensor " "Input(X) of SequenceEraseOp should be a 2-D LoDTensor "
"with the 2nd dimension equal to 1."); "with the 2nd dimension equal to 1,"
"but received size %d with the 2nd dimension %d.",
x_dims.size(), x_dims[1]));
ctx->SetOutputDim("Out", x_dims); ctx->SetOutputDim("Out", x_dims);
// The output LoDTensor's lod_level should be input X's lod_level. // The output LoDTensor's lod_level should be input X's lod_level.
// For compile-time, we call SetLoDLevel to set output's lod_level. // For compile-time, we call SetLoDLevel to set output's lod_level.
......
...@@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> { ...@@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
auto* out = ctx.Output<LoDTensor>("Out"); auto* out = ctx.Output<LoDTensor>("Out");
auto lod = in->lod(); auto lod = in->lod();
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), PADDLE_ENFORCE_EQ(
"The actual size mismatches with the LoD information."); lod[lod.size() - 1].back(), (size_t)in->numel(),
platform::errors::InvalidArgument(
"The actual size mismatches with the LoD information."));
auto tokens = ctx.Attr<std::vector<int>>("tokens"); auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel(); auto in_len = in->numel();
auto in_dat = in->data<T>(); auto in_dat = in->data<T>();
......
...@@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel<T> { ...@@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel<T> {
auto lod = in->lod(); auto lod = in->lod();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
lod.empty(), false, lod.empty(), false,
"Input(X) Tensor of SequenceEraseOp does not contain LoD information."); platform::errors::InvalidArgument("Input(X) Tensor of SequenceEraseOp "
"does not contain LoD information."));
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(),
"The actual size mismatches with the LoD information."); platform::errors::InvalidArgument(
"The actual input size %d mismatches with the LoD "
"information size %d.",
lod[lod.size() - 1].back(), (size_t)in->numel()));
auto tokens = ctx.Attr<std::vector<int>>("tokens"); auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel(); auto in_len = in->numel();
auto in_dat = in->data<T>(); auto in_dat = in->data<T>();
......
...@@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel { ...@@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Transpose");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Transpose");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis"); std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis");
size_t x_rank = x_dims.size(); size_t x_rank = x_dims.size();
size_t axis_size = axis.size(); size_t axis_size = axis.size();
PADDLE_ENFORCE_EQ(x_rank, axis_size, PADDLE_ENFORCE_EQ(x_rank, axis_size,
"ShapeError: The input tensor's dimension " platform::errors::InvalidArgument(
"The input tensor's dimension "
"should be equal to the axis's size. " "should be equal to the axis's size. "
"But received input tensor's dimension is %d, " "But received input tensor's dimension is %d, "
"axis's size is %d", "axis's size is %d",
x_rank, axis_size); x_rank, axis_size));
std::vector<int> count(axis_size, 0); std::vector<int> count(axis_size, 0);
for (size_t i = 0; i < axis_size; i++) { for (size_t i = 0; i < axis_size; i++) {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1, axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1, true,
"ValueError: Each element of Attribute axis should " platform::errors::InvalidArgument(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), " "be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, " "where the dims is the axis's size, "
"unique value means this axis value can appear only once. " "unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, " "But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d", "count[axis[%d]] is %d",
i, axis[i], axis_size, i, count[axis[i]]); i, axis[i], axis_size, i, count[axis[i]]));
} }
framework::DDim out_dims(x_dims); framework::DDim out_dims(x_dims);
...@@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel { ...@@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "TransposeOpGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) should not be null"); framework::GradVarName("Out"), "TransposeOpGrad");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
...@@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp { ...@@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp {
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
TransposeOp::InferShape(ctx); TransposeOp::InferShape(ctx);
PADDLE_ENFORCE(ctx->HasOutput("XShape"), OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Transpose2");
"Output(XShape) should not be null");
const auto &in_dims = ctx->GetInputDim("X"); const auto &in_dims = ctx->GetInputDim("X");
std::vector<int64_t> x_shape_dim(in_dims.size() + 1); std::vector<int64_t> x_shape_dim(in_dims.size() + 1);
x_shape_dim[0] = 0; x_shape_dim[0] = 0;
...@@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel { ...@@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null"); OP_INOUT_CHECK(ctx->HasInput("XShape"), "Input", "XShape",
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Transpose2OpGrad");
"Input(Out@GRAD) should not be null"); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "Transpose2OpGrad");
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
auto xshape_dim = ctx->GetInputDim("XShape"); auto xshape_dim = ctx->GetInputDim("XShape");
auto x_shape_dim = auto x_shape_dim =
......
...@@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx, ...@@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx,
trans6(dev_ctx, in, out, axis); trans6(dev_ctx, in, out, axis);
break; break;
default: default:
PADDLE_THROW("Tensors with rank at most 6 are supported"); PADDLE_THROW(platform::errors::InvalidArgument(
"Tensors with rank at most 6 are supported"
", but received input tensor's rank is %d,",
dim));
} }
} }
......
...@@ -305,6 +305,8 @@ def sequence_conv_pool(input, ...@@ -305,6 +305,8 @@ def sequence_conv_pool(input,
act="tanh", act="tanh",
pool_type="sqrt") pool_type="sqrt")
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input')
conv_out = layers.sequence_conv( conv_out = layers.sequence_conv(
input=input, input=input,
num_filters=num_filters, num_filters=num_filters,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册