未验证 提交 057e28bc 编写于 作者: S smallv0221 提交者: GitHub

API(lstm_unit, lstmp, sequence_mask, sequence_enumerate, sequence_conv) error...

API(lstm_unit, lstmp, sequence_mask, sequence_enumerate, sequence_conv) error message enhancement (#27572)

* API(Compute) error message enhancement on line 44, 50, 53.

* lstm_unit error message enhancement.
lstmp error message enhancement.
sequence_conv error message enhencement.
sequence_enumerate error message enhencement.
sequence_mask error message enhencement.

* Update lstm_unit_op.cc

* Update lstm_unit_op.h

* error msg enhancement.

* Update sequence_conv_op.cc

* Update lstm_unit_op.cc

* Update sequence_conv_op.cc

* Update sequence_enumerate_op.cc

* Update sequence_enumerate_op.cu

* Update sequence_enumerate_op.h

* Update sequence_pool_op.h

* error message enhencement.

* error message enhancement.
上级 606611d3
......@@ -23,23 +23,31 @@ class LstmUnitOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("C_prev"),
"Input(C_prev) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("C"),
"Output(C) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("H"),
"Output(H) of LSTM should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "lstm_unit");
OP_INOUT_CHECK(ctx->HasInput("C_prev"), "Input", "C_prev", "lstm_unit");
OP_INOUT_CHECK(ctx->HasOutput("C"), "Output", "C", "lstm_unit");
OP_INOUT_CHECK(ctx->HasOutput("H"), "Output", "H", "lstm_unit");
auto x_dims = ctx->GetInputDim("X");
auto c_prev_dims = ctx->GetInputDim("C_prev");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2.");
PADDLE_ENFORCE_EQ(
x_dims.size(), 2,
platform::errors::InvalidArgument(
"Input(X)'s rank must be 2. Received %d instead.", x_dims.size()));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[0], c_prev_dims[0],
"Batch size of inputs and states must be equal");
platform::errors::InvalidArgument(
"Batch size of inputs and states must be equal, "
"but received %d (inputs)"
"vs %d (states).",
x_dims[0], c_prev_dims[0]));
PADDLE_ENFORCE_EQ(x_dims[1], c_prev_dims[1] * 4,
"Dimension of FC should equal to prev state * 4");
platform::errors::InvalidArgument(
"Dimension of FC should equal to prev state * 4, "
"but received %d (dimension of FC)"
"vs %d (prev state * 4).",
x_dims[1], c_prev_dims[1] * 4));
}
int b_size = c_prev_dims[0]; // batch size
......@@ -85,10 +93,10 @@ class LstmUnitGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("C")),
"Input(C@GRAD) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("H")),
"Input(H@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("C")), "Input",
framework::GradVarName("C"), "lstm_unit");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("H")), "Input",
framework::GradVarName("H"), "lstm_unit");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
ctx->SetOutputDim(framework::GradVarName("C_prev"),
ctx->GetInputDim("C_prev"));
......
......@@ -93,8 +93,9 @@ template <typename T>
class LstmUnitOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
......@@ -124,8 +125,9 @@ template <typename T>
class LstmUnitGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
......
......@@ -39,8 +39,9 @@ template <typename DeviceContext, typename T>
class LstmUnitKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
PADDLE_ENFORCE_EQ(
platform::is_cpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CPUPlace."));
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
......@@ -82,8 +83,9 @@ template <typename DeviceContext, typename T>
class LstmUnitGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
PADDLE_ENFORCE_EQ(
platform::is_cpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CPUPlace."));
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
......
......@@ -91,7 +91,8 @@ class LSTMPKernel : public framework::OpKernel<T> {
else if (act_type == math::detail::ActivationType::kReLU)
ReluFunctor<T>()(d, x, y);
else
PADDLE_THROW("unsupported activation type");
PADDLE_THROW(
platform::errors::InvalidArgument("unsupported activation type"));
}
void Compute(const framework::ExecutionContext& ctx) const override {
......@@ -263,7 +264,8 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
else if (act_type == math::detail::ActivationType::kReLU)
ReluGradFunctor<T>()(d, x, y, dy, dx);
else
PADDLE_THROW("unsupported activation type");
PADDLE_THROW(
platform::errors::InvalidArgument("unsupported activation type"));
}
void Compute(const framework::ExecutionContext& ctx) const override {
......
......@@ -59,20 +59,22 @@ class SequenceConvOp : public framework::OperatorWithKernel {
filter_dims[0], context_length * in_dims[1]));
if (ctx->Attrs().Get<bool>("paddingTrainable")) {
PADDLE_ENFORCE(
ctx->HasInput("PaddingData"),
"Input(PaddingData) of SequenceConvOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("PaddingData"), "Input", "PaddingData",
"sequence_conv");
framework::DDim padding_dim = ctx->GetInputDim("PaddingData");
int up_pad = std::max(0, -context_start);
int down_pad = std::max(0, context_start + context_length - 1);
int total_pad = up_pad + down_pad;
int input_width = static_cast<int>(in_dims[1]);
bool start_equals_zero = context_start == 0;
bool length_equals_one = context_length == 1;
bool start_length = start_equals_zero && length_equals_one;
if (context_start == 0 && context_length == 1) {
PADDLE_THROW(
"If context_start is 0 and context_length is 1, paddingTrainable "
"should be false.");
}
PADDLE_ENFORCE_EQ(
start_length, false,
platform::errors::InvalidArgument(
"If context_start is 0 and context_length is 1, paddingTrainable "
"should be false."));
PADDLE_ENFORCE_EQ(
padding_dim.size(), 2,
platform::errors::InvalidArgument(
......
......@@ -43,8 +43,11 @@ class SequenceEnumerateOpMaker : public framework::OpProtoAndCheckerMaker {
"Output LoDTensor of SequenceEnumerate operator.");
AddAttr<int>("win_size", "(int) The enumerate sequence window size.")
.AddCustomChecker([](const int& win_size) {
PADDLE_ENFORCE(win_size >= 2,
"The window size should be not less than 2.");
PADDLE_ENFORCE_GE(win_size, 2,
platform::errors::InvalidArgument(
"The window size should be not less than 2."
"Received window size is %d",
win_size));
});
AddAttr<int>("pad_value", "(int) The enumerate sequence padding value.")
.SetDefault(0);
......
......@@ -58,7 +58,10 @@ class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(
static_cast<uint64_t>(in_dims[0]), in_lod[0].back(),
"The actual input data's size mismatched with LoD information.");
platform::errors::InvalidArgument(
"The actual input data's size mismatched with LoD information."
"Received input data size is %d (actual) vs %d (loD information).",
static_cast<uint64_t>(in_dims[0]), in_lod[0].back()));
/* Generate enumerate sequence set */
auto stream = context.cuda_device_context().stream();
......
......@@ -29,21 +29,31 @@ class SequenceEnumerateKernel : public framework::OpKernel<T> {
int win_size = context.Attr<int>("win_size");
auto pad_value = static_cast<T>(context.Attr<int>("pad_value"));
PADDLE_ENFORCE_EQ(in->lod().empty(), false,
"Input(X) Tensor of SequenceEnumerateOp does not contain "
"LoD information.");
PADDLE_ENFORCE_EQ(
in->lod().empty(), false,
platform::errors::InvalidArgument(
"Input(X) Tensor of SequenceEnumerateOp does not contain "
"LoD information."));
auto in_dims = in->dims();
auto lod0 = in->lod()[0];
PADDLE_ENFORCE_EQ(
static_cast<uint64_t>(in_dims[0]), lod0.back(),
"The actual input data's size mismatched with LoD information.");
platform::errors::InvalidArgument(
"The actual input data's size mismatched with LoD information."
"Received input data size is %d (actual) vs %d (loD information).",
static_cast<uint64_t>(in_dims[0]), lod0.back()));
PADDLE_ENFORCE_EQ(
in_dims.size(), 2UL,
"Input(X) of SequenceEnumerate operator's rank should be 2.");
platform::errors::InvalidArgument(
"Input(X) of SequenceEnumerate operator's rank should be 2."
"Received %d instead.",
in_dims.size()));
PADDLE_ENFORCE_EQ(in_dims[1], 1,
"Input(X) of SequenceEnumerate operator's 2nd "
"dimension should be 1.");
platform::errors::InvalidArgument(
"Input(X) of SequenceEnumerate operator's 2nd "
"dimension should be 1. Received %d instead.",
in_dims[1]));
// Generate enumerate sequence set
auto in_data = in->data<T>();
......
......@@ -69,8 +69,10 @@ class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker {
"= max(Input(X)).")
.SetDefault(-1)
.AddCustomChecker([](const int& v) {
PADDLE_ENFORCE(v < 0 || v >= 1,
"Attr(maxlen) must be less than 0 or larger than 1");
PADDLE_ENFORCE_EQ(
v < 0 || v >= 1, true,
platform::errors::InvalidArgument(
"Attr(maxlen) must be less than 0 or larger than 1"));
});
AddAttr<int>("out_dtype", "Output data type");
AddComment(R"DOC(
......
......@@ -42,14 +42,22 @@ class SequencePoolKernel : public framework::OpKernel<T> {
"Input(X) Tensor of SequencePoolOp "
"does not contain LoD information."));
PADDLE_ENFORCE_LE(lod_level, 2UL,
"The lod level of input shall be no more than 2.");
platform::errors::InvalidArgument(
"The lod level of input shall be no more than 2."
"Received lod level is %d.",
lod_level));
PADDLE_ENFORCE_GE(
dims[0],
/*batch size = */ static_cast<int64_t>(lod[lod_level - 1].size() - 1),
"The first dimension of Input(X) must be large than batch size.");
platform::errors::InvalidArgument(
"The first dimension of Input(X) must be large than batch size."
"But received first dimension of Input(X) is %d, while batch"
"size is %d.",
dims[0], static_cast<int64_t>(lod[lod_level - 1].size() - 1)));
if (lod_level > 1UL) {
PADDLE_ENFORCE_EQ(lod[0][lod[0].size() - 1], lod[1].size() - 1,
"The input lod information is illegal.");
platform::errors::InvalidArgument(
"The input lod information is illegal."));
framework::LoD out_lod;
out_lod.push_back(lod[0]);
out->set_lod(out_lod);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册