提交 a35a2ee9 编写于 作者: S smallv0221

lstm_unit error message enhancement.

lstmp error message enhancement.
sequence_conv error message enhencement.
sequence_enumerate error message enhencement.
sequence_mask error message enhencement.
上级 2725f51d
......@@ -23,23 +23,23 @@ class LstmUnitOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasInput("C_prev"),
"Input(C_prev) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("C"),
"Output(C) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("H"),
"Output(H) of LSTM should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "lstm_unit");
OP_INOUT_CHECK(ctx->HasInput("C_prev"), "Input", "C_prev", "lstm_unit");
OP_INOUT_CHECK(ctx->HasInput("C"), "Output", "C", "lstm_unit");
OP_INOUT_CHECK(ctx->HasInput("H"), "Output", "H", "lstm_unit");
auto x_dims = ctx->GetInputDim("X");
auto c_prev_dims = ctx->GetInputDim("C_prev");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2.");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument(
"Input(X)'s rank must be 2."));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[0], c_prev_dims[0],
"Batch size of inputs and states must be equal");
platform::errors::InvalidArgument(
"Batch size of inputs and states must be equal"));
PADDLE_ENFORCE_EQ(x_dims[1], c_prev_dims[1] * 4,
"Dimension of FC should equal to prev state * 4");
platform::errors::InvalidArgument(
"Dimension of FC should equal to prev state * 4"));
}
int b_size = c_prev_dims[0]; // batch size
......@@ -85,10 +85,10 @@ class LstmUnitGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("C")),
"Input(C@GRAD) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("H")),
"Input(H@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("C")), "Input",
framework::GradVarName("C"), "lstm_unit");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("H")), "Input",
framework::GradVarName("H"), "lstm_unit");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
ctx->SetOutputDim(framework::GradVarName("C_prev"),
ctx->GetInputDim("C_prev"));
......
......@@ -93,8 +93,9 @@ template <typename T>
class LstmUnitOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
......@@ -124,8 +125,9 @@ template <typename T>
class LstmUnitGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
......
......@@ -39,8 +39,9 @@ template <typename DeviceContext, typename T>
class LstmUnitKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
PADDLE_ENFORCE(
platform::is_cpu_place(ctx.GetPlace()),
paddle::platform::errors::PreconditionNotMet("It must use CPUPlace."));
auto* x_tensor = ctx.Input<framework::Tensor>("X");
auto* c_prev_tensor = ctx.Input<framework::Tensor>("C_prev");
......@@ -82,8 +83,9 @@ template <typename DeviceContext, typename T>
class LstmUnitGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace.");
PADDLE_ENFORCE(
platform::is_cpu_place(ctx.GetPlace()),
paddle::platform::errors::PreconditionNotMet("It must use CPUPlace."));
auto x_tensor = ctx.Input<Tensor>("X");
auto c_prev_tensor = ctx.Input<Tensor>("C_prev");
......
......@@ -91,7 +91,8 @@ class LSTMPKernel : public framework::OpKernel<T> {
else if (act_type == math::detail::ActivationType::kReLU)
ReluFunctor<T>()(d, x, y);
else
PADDLE_THROW("unsupported activation type");
PADDLE_THROW(
platform::errors::InvalidArgument("unsupported activation type"));
}
void Compute(const framework::ExecutionContext& ctx) const override {
......@@ -263,7 +264,8 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
else if (act_type == math::detail::ActivationType::kReLU)
ReluGradFunctor<T>()(d, x, y, dy, dx);
else
PADDLE_THROW("unsupported activation type");
PADDLE_THROW(
platform::errors::InvalidArgument("unsupported activation type"));
}
void Compute(const framework::ExecutionContext& ctx) const override {
......
......@@ -61,7 +61,8 @@ class SequenceConvOp : public framework::OperatorWithKernel {
if (ctx->Attrs().Get<bool>("paddingTrainable")) {
PADDLE_ENFORCE(
ctx->HasInput("PaddingData"),
"Input(PaddingData) of SequenceConvOp should not be null.");
platform::errors::InvalidArgument(
"Input(PaddingData) of SequenceConvOp should not be null."));
framework::DDim padding_dim = ctx->GetInputDim("PaddingData");
int up_pad = std::max(0, -context_start);
int down_pad = std::max(0, context_start + context_length - 1);
......@@ -69,9 +70,9 @@ class SequenceConvOp : public framework::OperatorWithKernel {
int input_width = static_cast<int>(in_dims[1]);
if (context_start == 0 && context_length == 1) {
PADDLE_THROW(
PADDLE_THROW(platform::errors::InvalidArgument(
"If context_start is 0 and context_length is 1, paddingTrainable "
"should be false.");
"should be false."));
}
PADDLE_ENFORCE_EQ(
padding_dim.size(), 2,
......
......@@ -44,7 +44,8 @@ class SequenceEnumerateOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("win_size", "(int) The enumerate sequence window size.")
.AddCustomChecker([](const int& win_size) {
PADDLE_ENFORCE(win_size >= 2,
"The window size should be not less than 2.");
platform::errors::InvalidArgument(
"The window size should be not less than 2."));
});
AddAttr<int>("pad_value", "(int) The enumerate sequence padding value.")
.SetDefault(0);
......
......@@ -58,7 +58,8 @@ class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(
static_cast<uint64_t>(in_dims[0]), in_lod[0].back(),
"The actual input data's size mismatched with LoD information.");
platform::errors::InvalidArgument(
"The actual input data's size mismatched with LoD information."));
/* Generate enumerate sequence set */
auto stream = context.cuda_device_context().stream();
......
......@@ -29,21 +29,26 @@ class SequenceEnumerateKernel : public framework::OpKernel<T> {
int win_size = context.Attr<int>("win_size");
auto pad_value = static_cast<T>(context.Attr<int>("pad_value"));
PADDLE_ENFORCE_EQ(in->lod().empty(), false,
"Input(X) Tensor of SequenceEnumerateOp does not contain "
"LoD information.");
PADDLE_ENFORCE_EQ(
in->lod().empty(), false,
platform::errors::InvalidArgument(
"Input(X) Tensor of SequenceEnumerateOp does not contain "
"LoD information."));
auto in_dims = in->dims();
auto lod0 = in->lod()[0];
PADDLE_ENFORCE_EQ(
static_cast<uint64_t>(in_dims[0]), lod0.back(),
"The actual input data's size mismatched with LoD information.");
platform::errors::InvalidArgument(
"The actual input data's size mismatched with LoD information."));
PADDLE_ENFORCE_EQ(
in_dims.size(), 2UL,
"Input(X) of SequenceEnumerate operator's rank should be 2.");
platform::errors::InvalidArgument(
"Input(X) of SequenceEnumerate operator's rank should be 2."));
PADDLE_ENFORCE_EQ(in_dims[1], 1,
"Input(X) of SequenceEnumerate operator's 2nd "
"dimension should be 1.");
platform::errors::InvalidArgument(
"Input(X) of SequenceEnumerate operator's 2nd "
"dimension should be 1."));
// Generate enumerate sequence set
auto in_data = in->data<T>();
......
......@@ -69,8 +69,10 @@ class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker {
"= max(Input(X)).")
.SetDefault(-1)
.AddCustomChecker([](const int& v) {
PADDLE_ENFORCE(v < 0 || v >= 1,
"Attr(maxlen) must be less than 0 or larger than 1");
PADDLE_ENFORCE(
v < 0 || v >= 1,
platform::errors::InvalidArgument(
"Attr(maxlen) must be less than 0 or larger than 1"));
});
AddAttr<int>("out_dtype", "Output data type");
AddComment(R"DOC(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册