未验证 提交 6c1acf34 编写于 作者: X xiemoyuan 提交者: GitHub

Optimize the error message for OP (#27617)

* Optimize the error message for OPs.

* Optimize the error message for OPs in details.
上级 baddedfd
...@@ -117,7 +117,8 @@ void BeamSearchDecodeFunctor::apply() const { ...@@ -117,7 +117,8 @@ void BeamSearchDecodeFunctor::apply() const {
template <> template <>
void BeamSearchDecodeFunctor::apply<bool>() const { void BeamSearchDecodeFunctor::apply<bool>() const {
PADDLE_THROW("beam search decode op does not support bool!"); PADDLE_THROW(platform::errors::InvalidArgument(
"beam search decode op does not support bool!"));
} }
class BeamSearchDecodeOp : public framework::OperatorBase { class BeamSearchDecodeOp : public framework::OperatorBase {
......
...@@ -146,7 +146,7 @@ class ChunkEvalKernel : public framework::OpKernel<T> { ...@@ -146,7 +146,7 @@ class ChunkEvalKernel : public framework::OpKernel<T> {
tag_end = -1; tag_end = -1;
tag_single = -1; tag_single = -1;
} else { } else {
PADDLE_THROW("Unknown chunk scheme."); PADDLE_THROW(platform::errors::InvalidArgument("Unknown chunk scheme."));
} }
other_chunk_type = num_chunk_types = context.Attr<int>("num_chunk_types"); other_chunk_type = num_chunk_types = context.Attr<int>("num_chunk_types");
excluded_chunk_types.insert( excluded_chunk_types.insert(
......
...@@ -274,8 +274,8 @@ template <typename T> ...@@ -274,8 +274,8 @@ template <typename T>
class NotImpleKernel : public framework::OpKernel<T> { class NotImpleKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_THROW( PADDLE_THROW(platform::errors::Unimplemented(
"CPU is not support for this kernel now. Will be add in the future"); "CPU is not support for this kernel now. Will be add in the future"));
} }
}; };
......
...@@ -111,8 +111,9 @@ class EditDistanceGPUKernel : public framework::OpKernel<T> { ...@@ -111,8 +111,9 @@ class EditDistanceGPUKernel : public framework::OpKernel<T> {
if (normalized) { if (normalized) {
for (size_t i = 1; i < ref_lod.size(); ++i) { for (size_t i = 1; i < ref_lod.size(); ++i) {
PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1], PADDLE_ENFORCE_GT(ref_lod[i], ref_lod[i - 1],
"Reference string %d is empty.", i); platform::errors::InvalidArgument(
"Reference string %d is empty.", i));
} }
} }
......
...@@ -58,8 +58,9 @@ class EditDistanceKernel : public framework::OpKernel<T> { ...@@ -58,8 +58,9 @@ class EditDistanceKernel : public framework::OpKernel<T> {
if (normalized) { if (normalized) {
for (size_t i = 1; i < ref_lod.size(); ++i) { for (size_t i = 1; i < ref_lod.size(); ++i) {
PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1], PADDLE_ENFORCE_GT(ref_lod[i], ref_lod[i - 1],
"Reference string %d is empty.", i); platform::errors::InvalidArgument(
"Reference string %d is empty.", i));
} }
} }
auto num_strs = hyp_lod.size() - 1; auto num_strs = hyp_lod.size() - 1;
...@@ -106,10 +107,11 @@ class EditDistanceKernel : public framework::OpKernel<T> { ...@@ -106,10 +107,11 @@ class EditDistanceKernel : public framework::OpKernel<T> {
} }
if (normalized) { if (normalized) {
PADDLE_ENFORCE(n > 0, PADDLE_ENFORCE_GT(n, 0UL,
platform::errors::InvalidArgument(
"The reference string (#%d) cannot be empty " "The reference string (#%d) cannot be empty "
"when Attr(normalized) is enabled.", "when Attr(normalized) is enabled.",
n); n));
distance = distance / n; distance = distance / n;
} }
out[num] = distance; out[num] = distance;
......
...@@ -89,8 +89,9 @@ class ExpandAsGradOp : public framework::OperatorWithKernel { ...@@ -89,8 +89,9 @@ class ExpandAsGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAs");
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "ExpandAs");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
......
...@@ -61,7 +61,10 @@ class ExpandAsKernel : public framework::OpKernel<T> { ...@@ -61,7 +61,10 @@ class ExpandAsKernel : public framework::OpKernel<T> {
switch (rank) { switch (rank) {
REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED)
default: default:
PADDLE_THROW("Only support tensor with rank being between 1 and 6."); PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But received "
"tensor X's rank = %d.",
rank));
} }
} }
...@@ -77,13 +80,19 @@ class ExpandAsKernel : public framework::OpKernel<T> { ...@@ -77,13 +80,19 @@ class ExpandAsKernel : public framework::OpKernel<T> {
auto x_dims = in0->dims(); auto x_dims = in0->dims();
auto y_dims = target_tensor->dims(); auto y_dims = target_tensor->dims();
for (int i = 0; i < y_dims.size(); ++i) { for (int i = 0; i < y_dims.size(); ++i) {
PADDLE_ENFORCE_NE(x_dims[i], 0, "X(input) should not have 0 dim"); PADDLE_ENFORCE_NE(
x_dims[i], 0UL,
platform::errors::InvalidArgument(
"X(input) should not have 0 dim. But received x_dims[%d] = 0.",
i));
bcast_dims[i] = y_dims[i] / x_dims[i]; bcast_dims[i] = y_dims[i] / x_dims[i];
bcast_dims_remainder += y_dims[i] % x_dims[i]; bcast_dims_remainder += y_dims[i] % x_dims[i];
} }
PADDLE_ENFORCE_EQ(bcast_dims_remainder, 0, PADDLE_ENFORCE_EQ(
bcast_dims_remainder, 0UL,
platform::errors::InvalidArgument(
"X(input) could not be broadcast together with remapped " "X(input) could not be broadcast together with remapped "
"shape(expand tensor's shape)"); "shape(expand tensor's shape)"));
framework::DDim out_dims(in_dims); framework::DDim out_dims(in_dims);
for (size_t i = 0; i < bcast_dims.size(); ++i) { for (size_t i = 0; i < bcast_dims.size(); ++i) {
out_dims[i] *= bcast_dims[i]; out_dims[i] *= bcast_dims[i];
...@@ -137,7 +146,10 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -137,7 +146,10 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
switch (dims) { switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
default: default:
PADDLE_THROW("Only support tensor with rank being between 1 and 6."); PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But "
"received tensor's rank = %d.",
dims));
} }
} }
} }
...@@ -149,12 +161,6 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -149,12 +161,6 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
const std::vector<int>& reduce_dims_vec) const { const std::vector<int>& reduce_dims_vec) const {
size_t reshape_size = reshape_dims_vec.size(); size_t reshape_size = reshape_dims_vec.size();
size_t reduce_size = reduce_dims_vec.size(); size_t reduce_size = reduce_dims_vec.size();
PADDLE_ENFORCE_EQ(reshape_size, reshape_dims_vec.size(),
"Inconsistent size between template Dims and "
"reshape dimensions.");
PADDLE_ENFORCE_EQ(reduce_size, reduce_dims_vec.size(),
"Inconsistent size between template Dims and "
"reduce dimensions.");
auto* in0 = context.Input<Tensor>(framework::GradVarName("Out")); auto* in0 = context.Input<Tensor>(framework::GradVarName("Out"));
auto* out0 = context.Output<Tensor>(framework::GradVarName("X")); auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
out0->mutable_data<T>(context.GetPlace()); out0->mutable_data<T>(context.GetPlace());
......
...@@ -27,9 +27,10 @@ static inline T NormalizeL1(T* x, size_t len) { ...@@ -27,9 +27,10 @@ static inline T NormalizeL1(T* x, size_t len) {
// (This comment is from the old LinearChainCRFLayer.) // (This comment is from the old LinearChainCRFLayer.)
// Right now, we just bet that sum won't be zero. If this really happens, we // Right now, we just bet that sum won't be zero. If this really happens, we
// will figure out what should be done then. // will figure out what should be done then.
PADDLE_ENFORCE(sum, PADDLE_ENFORCE_GT(
sum, 0., platform::errors::InvalidArgument(
"The unnormalized probabilities of all possible unfinished " "The unnormalized probabilities of all possible unfinished "
"sequences must be greater than 0."); "sequences must be greater than 0."));
T s = 1. / sum; T s = 1. / sum;
for (size_t i = 0; i < len; ++i) x[i] *= s; for (size_t i = 0; i < len; ++i) x[i] *= s;
return sum; return sum;
...@@ -84,13 +85,19 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> { ...@@ -84,13 +85,19 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> {
const Tensor* label_length = ctx.Input<framework::Tensor>("Length"); const Tensor* label_length = ctx.Input<framework::Tensor>("Length");
length_data = label_length->data<int64_t>(); length_data = label_length->data<int64_t>();
seq_num = label_length->numel(); seq_num = label_length->numel();
PADDLE_ENFORCE_EQ(seq_num, emission_dims[0], PADDLE_ENFORCE_EQ(
seq_num, emission_dims[0],
platform::errors::InvalidArgument(
"the size of Input(length) must be equal to " "the size of Input(length) must be equal to "
"emission_dims[0]."); "emission_dims[0]. But input_size = %d, emission_dims[0] = %d.",
seq_num, emission_dims[0]));
auto label_dims = label->dims(); auto label_dims = label->dims();
PADDLE_ENFORCE_EQ(seq_num, label_dims[0], PADDLE_ENFORCE_EQ(
seq_num, label_dims[0],
platform::errors::InvalidArgument(
"the size of Input(length) must be equal to " "the size of Input(length) must be equal to "
"label_dims[0]."); "label_dims[0]. But input_size = %d, label_dims[0] = %d.",
seq_num, label_dims[0]));
batch_size = emission_dims[0] * emission_dims[1]; batch_size = emission_dims[0] * emission_dims[1];
tag_num = emission_dims[2]; tag_num = emission_dims[2];
...@@ -102,7 +109,9 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> { ...@@ -102,7 +109,9 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> {
math::set_constant(ctx.device_context(), alpha, 0.0); math::set_constant(ctx.device_context(), alpha, 0.0);
} else { } else {
in_lod = ctx.Input<LoDTensor>("Label")->lod(); in_lod = ctx.Input<LoDTensor>("Label")->lod();
PADDLE_ENFORCE_NE(in_lod.size(), 0, "Input(Label) must be a sequence."); PADDLE_ENFORCE_NE(in_lod.size(), 0,
platform::errors::InvalidArgument(
"Input(Label) must be a sequence."));
seq_num = in_lod[0].size() - 1; seq_num = in_lod[0].size() - 1;
batch_size = emission_dims[0]; batch_size = emission_dims[0];
tag_num = emission_dims[1]; tag_num = emission_dims[1];
...@@ -204,7 +213,8 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> { ...@@ -204,7 +213,8 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> {
const int64_t* lbl = label.data<int64_t>(); const int64_t* lbl = label.data<int64_t>();
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
static_cast<size_t>(*std::max_element(lbl, lbl + seq_length)), tag_num, static_cast<size_t>(*std::max_element(lbl, lbl + seq_length)), tag_num,
"An invalid tag label that execesses the largest tag number."); platform::errors::InvalidArgument(
"An invalid tag label that execesses the largest tag number."));
// Calculate the nominator part, which depends on the label sequence. // Calculate the nominator part, which depends on the label sequence.
ll += w[lbl[0]] /*start transition*/ + x[lbl[0]] + ll += w[lbl[0]] /*start transition*/ + x[lbl[0]] +
...@@ -254,7 +264,9 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel<T> { ...@@ -254,7 +264,9 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel<T> {
{emission_dims[0] * emission_dims[1], emission_dims[2]}); {emission_dims[0] * emission_dims[1], emission_dims[2]});
} else { } else {
in_lod = ctx.Input<LoDTensor>("Label")->lod(); in_lod = ctx.Input<LoDTensor>("Label")->lod();
PADDLE_ENFORCE_NE(in_lod.size(), 0, "Input(Label) must be a sequence."); PADDLE_ENFORCE_NE(in_lod.size(), 0,
platform::errors::InvalidArgument(
"Input(Label) must be a sequence."));
seq_num = static_cast<int64_t>(in_lod[0].size() - 1); seq_num = static_cast<int64_t>(in_lod[0].size() - 1);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册