From ed292695c577c85e730931daf948b8e4dd0236dc Mon Sep 17 00:00:00 2001 From: kinghuin Date: Wed, 9 Sep 2020 10:10:33 +0800 Subject: [PATCH] optimize the error message for math dir optimize the error message for math dir --- paddle/fluid/operators/math/sequence2batch.cc | 21 ++++- paddle/fluid/operators/math/sequence2batch.cu | 21 ++++- paddle/fluid/operators/math/sequence2batch.h | 36 +++++-- .../fluid/operators/math/sequence_padding.cc | 17 +++- .../fluid/operators/math/sequence_padding.cu | 20 ++-- .../fluid/operators/math/sequence_padding.h | 25 +++-- .../fluid/operators/math/sequence_pooling.cc | 93 ++++++++++++++----- .../fluid/operators/math/sequence_pooling.cu | 10 +- .../operators/math/sequence_pooling_test.cc | 16 +++- paddle/fluid/operators/math/tree2col.cc | 6 +- 10 files changed, 203 insertions(+), 62 deletions(-) diff --git a/paddle/fluid/operators/math/sequence2batch.cc b/paddle/fluid/operators/math/sequence2batch.cc index e4ffeedb5a..300a369201 100644 --- a/paddle/fluid/operators/math/sequence2batch.cc +++ b/paddle/fluid/operators/math/sequence2batch.cc @@ -29,11 +29,24 @@ class CopyMatrixRowsFunctor { auto src_dims = src.dims(); auto dst_dims = dst->dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, - "The src must be matrix with rank 2."); + platform::errors::InvalidArgument( + "The source tensor must be a matrix with rank 2, but " + "got the source tensor rank is %lu. " + "Please check the rank of the source tensor", + src_dims.size())); PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL, - "The dst must be matrix with rank 2."); - PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], - "The width of src and dst must be same."); + platform::errors::InvalidArgument( + "The destination tensor must be a matrix with rank, " + "but got the destination tensor rank is %lu. " + "Please check the rank of the destination tensor", + dst_dims.size())); + PADDLE_ENFORCE_EQ( + src_dims[1], dst_dims[1], + platform::errors::InvalidArgument( + "The width of the source tensor and the destination tensor must be " + "same. But got %lu != %lu.Please check the rank of the source " + "tensor", + src_dims.size(), dst_dims.size())); auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data(); diff --git a/paddle/fluid/operators/math/sequence2batch.cu b/paddle/fluid/operators/math/sequence2batch.cu index 9ab13659c1..cd1ca57268 100644 --- a/paddle/fluid/operators/math/sequence2batch.cu +++ b/paddle/fluid/operators/math/sequence2batch.cu @@ -46,11 +46,24 @@ class CopyMatrixRowsFunctor { auto src_dims = src.dims(); auto dst_dims = dst->dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, - "The src must be matrix with rank 2."); + platform::errors::InvalidArgument( + "The source tensor must be a matrix with rank 2, but " + "got the source tensor rank is %lu. " + "Please check the rank of the source tensor", + src_dims.size())); PADDLE_ENFORCE_EQ(dst_dims.size(), 2, - "The dst must be matrix with rank 2."); - PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], - "The width of src and dst must be same."); + platform::errors::InvalidArgument( + "The destination tensor must be a matrix with rank, " + "but got the destination tensor rank is %lu. " + "Please check the rank of the destination tensor", + dst_dims.size())); + PADDLE_ENFORCE_EQ( + src_dims[1], dst_dims[1], + platform::errors::InvalidArgument( + "The width of the source tensor and the destination tensor must be " + "same. But got %lu != %lu.Please check the rank of the source " + "tensor", + src_dims.size(), dst_dims.size())); auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data(); diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index 9d9f7ef00b..6aa513e4d1 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -64,19 +64,30 @@ class LoDTensor2BatchFunctor { bool is_reverse = false) const { if (!is_cal_batch_lod) { auto lods = batch->lod(); - PADDLE_ENFORCE_GT(lods.size(), 2UL, - "The LoD of LoDTensor should inlcude at least 2-level " - "sequence information."); + PADDLE_ENFORCE_GT( + lods.size(), 2UL, + platform::errors::InvalidArgument( + "The LoD of LoDTensor should inlcude at least 2-level " + "sequence information, but got the LoD level is %lu. Please " + "check the input value.", + lods.size())); PADDLE_ENFORCE_EQ( lods[1].size(), static_cast(lod_tensor.dims()[0]), - "The LoD information should be consistent with the dims."); + platform::errors::InvalidArgument( + "The LoD information should be consistent with the dims, but got " + "%lu != %lu. Please check the input value.", + lods[1].size(), static_cast(lod_tensor.dims()[0]))); CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, lods[1], batch, true); return; } auto lods = lod_tensor.lod(); - PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now."); + PADDLE_ENFORCE_EQ(lods.size(), 1UL, + platform::errors::InvalidArgument( + "Only support one level sequence now, but got the " + "LoD level is %lu. Please check the input value.", + lods.size())); const auto& lod = lods[0]; @@ -161,12 +172,19 @@ class Batch2LoDTensorFunctor { const framework::LoDTensor& batch, framework::LoDTensor* lod_tensor) const { auto in_lod = batch.lod(); - PADDLE_ENFORCE_GT(in_lod.size(), 2UL, - "The LoD of LoDTensor should inlcude at least 2-level " - "sequence information."); + PADDLE_ENFORCE_GT( + in_lod.size(), 2UL, + platform::errors::InvalidArgument( + "The LoD of LoDTensor should inlcude at least 2-level " + "sequence information, but got the LoD level is %lu. Please check " + "the input value.", + in_lod.size())); PADDLE_ENFORCE_EQ( in_lod[1].size(), static_cast(lod_tensor->dims()[0]), - "The LoD information should be consistent with the dims."); + platform::errors::InvalidArgument( + "The LoD information should be consistent with the dims, but got " + "%lu != %lu. Please check the input value.", + in_lod[1].size(), static_cast(lod_tensor->dims()[0]))); CopyMatrixRowsFunctor to_seq; to_seq(context, batch, in_lod[1], lod_tensor, false); } diff --git a/paddle/fluid/operators/math/sequence_padding.cc b/paddle/fluid/operators/math/sequence_padding.cc index 4630689dec..076df01764 100644 --- a/paddle/fluid/operators/math/sequence_padding.cc +++ b/paddle/fluid/operators/math/sequence_padding.cc @@ -35,7 +35,11 @@ void CopyValidData(framework::Tensor* dst_tensor, int valid_seq_len = seq_offsets[seq_idx + 1] - seq_offsets[seq_idx]; PADDLE_ENFORCE_GE( pad_seq_len, valid_seq_len, - "The padded sequence length can not be less than its original length."); + platform::errors::InvalidArgument( + "The padded sequence length can not " + "be less than its original length. Expected %ld >= %ld, but got " + "%ld < %ld. Please check input value.", + pad_seq_len, valid_seq_len, pad_seq_len, valid_seq_len)); int seq_data_offset = seq_offsets[seq_idx] * step_width; int pad_data_offset = layout == kBatchLengthWidth ? seq_idx * pad_seq_len * step_width @@ -95,9 +99,14 @@ class PaddingLoDTensorFunctor { CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, step_width, layout); - PADDLE_ENFORCE(pad_value.numel() == 1 || pad_value.numel() == step_width, - "The numel of 'pad_value' can only be 1 or be equal to the " - "'step_width'."); + + PADDLE_ENFORCE_EQ( + pad_value.numel() == 1 || pad_value.numel() == step_width, true, + platform::errors::InvalidArgument( + "The numel of 'pad_value' can only be 1 or be equal to the " + "'step_width', but got %ld != 1 and %ld. Please check the input " + "value.", + pad_value.numel(), step_width)); // fill padding value T* pad_data = pad_tensor->data(); diff --git a/paddle/fluid/operators/math/sequence_padding.cu b/paddle/fluid/operators/math/sequence_padding.cu index 1b43306790..19c3af0341 100644 --- a/paddle/fluid/operators/math/sequence_padding.cu +++ b/paddle/fluid/operators/math/sequence_padding.cu @@ -66,17 +66,25 @@ class PaddingLoDTensorFunctor { if (pad_seq_len == -1) { pad_seq_len = max_seq_len; } - PADDLE_ENFORCE_GE(pad_seq_len, max_seq_len, - "The pad_seq_len must be equal to or greater than the " - "original max sequence length."); + PADDLE_ENFORCE_GE( + pad_seq_len, max_seq_len, + platform::errors::InvalidArgument( + "The pad_seq_len must be equal to or greater than the " + "original max sequence length. Expected %ld >= %ld, but got %ld < " + "%ld. Please check the input value.", + pad_seq_len, max_seq_len, pad_seq_len, max_seq_len)); int step_width = seq_tensor.numel() / seq_tensor_dims[0]; int seq_num = seq_offsets.size() - 1; CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, step_width, layout); - PADDLE_ENFORCE(pad_value.numel() == 1 || pad_value.numel() == step_width, - "The numel of 'pad_value' can only be 1 or be equal to the " - "'step_width'."); + PADDLE_ENFORCE_EQ( + pad_value.numel() == 1 || pad_value.numel() == step_width, true, + platform::errors::InvalidArgument( + "The numel of 'pad_value' can only be 1 or be equal to " + "the 'step_width', but got %ld != 1 and %ld. Please check the " + "input value.", + pad_value.numel(), step_width)); const int kBlockSize = 512; diff --git a/paddle/fluid/operators/math/sequence_padding.h b/paddle/fluid/operators/math/sequence_padding.h index 5580ee5374..956a4ff6a2 100644 --- a/paddle/fluid/operators/math/sequence_padding.h +++ b/paddle/fluid/operators/math/sequence_padding.h @@ -52,14 +52,25 @@ inline static void CheckDims(const framework::DDim& seq_tensor_dims, const framework::Vector& seq_offset, int64_t padded_seq_len, int64_t step_width, const PadLayout& layout) { - PADDLE_ENFORCE_EQ(static_cast(seq_tensor_dims[0]), seq_offset.back(), - "Value of 1st dimension of the sequence tensor should be " - "equal to sum of lengths of all sequences."); + PADDLE_ENFORCE_EQ( + static_cast(seq_tensor_dims[0]), seq_offset.back(), + platform::errors::InvalidArgument( + "Value of 1st dimension of the sequence tensor should be " + "equal to sum of lengths of all sequences. Expected %ld == %ld, but " + "got %ld != %ld. Please check the input value.", + static_cast(seq_tensor_dims[0]), seq_offset.back(), + static_cast(seq_tensor_dims[0]), seq_offset.back())); - PADDLE_ENFORCE(seq_tensor_dims.size() + 1 == pad_tensor_dims.size() || - seq_tensor_dims.size() == pad_tensor_dims.size(), - "pad_tensor's rank should be 1 greater than seq_tensor's " - "rank, or be equal with it."); + PADDLE_ENFORCE_EQ( + seq_tensor_dims.size() + 1 == pad_tensor_dims.size() || + seq_tensor_dims.size() == pad_tensor_dims.size(), + true, platform::errors::InvalidArgument( + "pad_tensor's rank should be 1 greater than seq_tensor's " + "rank, or be equal with it. The pad_tensor's rank is %ld, " + "expected the seq_tensor's rank is %ld or %ld, but got %ld. " + "Please check the input value.", + pad_tensor_dims.size(), pad_tensor_dims.size(), + pad_tensor_dims.size() - 1, seq_tensor_dims.size())); } /* diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index cc3fbd5876..2eee4d0a6c 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -42,15 +42,29 @@ class MaxSeqPoolFunctor { auto out_dims = output->dims(); auto idx_dims = index->dims(); PADDLE_ENFORCE_GT(in_dims.size(), 1, - "The rank of input shall be greater than 1."); + platform::errors::InvalidArgument( + "The rank of input shall be greater than 1, but got " + "the rank is %ld. Please check the input value", + in_dims.size())); PADDLE_ENFORCE_GT(out_dims.size(), 1, - "The rank of output shall be greater than 1."); + platform::errors::InvalidArgument( + "The rank of output shall be greater than 1, but got " + "the rank is %ld. Please check the input value", + out_dims.size())); for (int64_t i = 1; i < in_dims.size(); ++i) { - PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i], - "The dimension of input and output shall be same."); + PADDLE_ENFORCE_EQ( + in_dims[i], out_dims[i], + platform::errors::InvalidArgument( + "The dimension of input and output shall be same. Expected %ld " + "== %ld, but got %ld != %ld. Please check the input value.", + in_dims[i], out_dims[i], in_dims[i], out_dims[i])); } - PADDLE_ENFORCE_EQ(idx_dims, out_dims, - "The dimension of index and output shall be same."); + PADDLE_ENFORCE_EQ( + idx_dims, out_dims, + platform::errors::InvalidArgument( + "The dimension of index and output shall be same. Expected %ld == " + "%ld, but got %ld != %ld. Please check the input value.", + idx_dims, out_dims, idx_dims, out_dims)); auto lod_level = input.lod().size(); auto starts = input.lod()[lod_level - 1]; @@ -94,12 +108,22 @@ class MaxSeqPoolFunctor { auto in_dims = input.dims(); auto out_dims = output->dims(); PADDLE_ENFORCE_GT(in_dims.size(), 1, - "The rank of input shall be greater than 1."); + platform::errors::InvalidArgument( + "The rank of input shall be greater than 1, but got " + "%ld <= 1. Please check the input value.", + in_dims.size())); PADDLE_ENFORCE_GT(out_dims.size(), 1, - "The rank of output shall be greater than 1."); + platform::errors::InvalidArgument( + "The rank of output shall be greater than 1, but got " + "%ld <= 1. Please check the input value.", + out_dims.size())); for (int64_t i = 1; i < in_dims.size(); ++i) { - PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i], - "The dimension of input and output shall be same."); + PADDLE_ENFORCE_EQ( + in_dims[i], out_dims[i], + platform::errors::InvalidArgument( + "The dimension of input and output shall be same. Expected %ld " + "== %ld, but got %ld != %ld. Please check the input value.", + in_dims[i], out_dims[i], in_dims[i], out_dims[i])); } auto lod_level = input.lod().size(); @@ -139,16 +163,29 @@ class MaxSeqPoolGradFunctor { auto ig_dims = in_grad->dims(); auto idx_dims = index.dims(); PADDLE_ENFORCE_GT(og_dims.size(), 1, - "The rank of output@Grad shall be greater than 1."); + platform::errors::InvalidArgument( + "The rank of output@Grad shall be greater than 1, " + "but got %ld <= 1. Please check the input value.", + og_dims.size())); PADDLE_ENFORCE_GT(ig_dims.size(), 1, - "The rank of input@Grad shall be greater than 1."); + platform::errors::InvalidArgument( + "The rank of input@Grad shall be greater than 1, but " + "got %ld <= 1. Please check the input value.", + ig_dims.size())); for (int64_t i = 1; i < og_dims.size(); ++i) { - PADDLE_ENFORCE_EQ( - og_dims[i], ig_dims[i], - "The dimension of input@Grad and output@Grad shall be same."); + PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i], + platform::errors::InvalidArgument( + "The dimension of input@Grad and output@Grad shall " + "be same. Expected %ld == %ld, but got %ld != %ld. " + "Please check the input value.", + og_dims[i], ig_dims[i], og_dims[i], ig_dims[i])); } - PADDLE_ENFORCE_EQ(idx_dims, og_dims, - "The dimension of index and output@Grad shall be same."); + PADDLE_ENFORCE_EQ( + idx_dims, og_dims, + platform::errors::InvalidArgument( + "The dimension of index and output@Grad shall be same. Expected " + "%ld == %ld, but got %ld != %ld. Please check the input value.", + idx_dims, og_dims, idx_dims, og_dims)); const T* og_data = out_grad.data(); const int* max_index = index.data(); @@ -244,9 +281,12 @@ class SumSeqPoolGradFunctor { auto lod = in_grad->lod()[lod_level - 1]; int64_t out_w = out_grad.numel() / out_grad.dims()[0]; int64_t in_w = in_grad->numel() / in_grad->dims()[0]; - PADDLE_ENFORCE_EQ( - in_w, out_w, - "The feature size of input@Grad and output@Grad shall be same."); + PADDLE_ENFORCE_EQ(in_w, out_w, + platform::errors::InvalidArgument( + "The feature size of input@Grad and output@Grad " + "shall be same. Expected %ld == %ld, but got %ld != " + "%ld. Please check the input value.", + in_w, out_w, in_w, out_w)); const T* out_g_data = out_grad.data(); T* in_g_data = in_grad->mutable_data(context.GetPlace()); auto blas = math::GetBlas(context); @@ -298,7 +338,8 @@ class SequencePoolFunctor { auto place = context.GetPlace(); PADDLE_ENFORCE_EQ( platform::is_cpu_place(place), true, - "Sequence_pool should run on CPU Device when pooltype is SUM"); + platform::errors::InvalidArgument( + "Sequence_pool should run on CPU Device when pooltype is SUM")); const T* src = input.data(); T* dst = output->mutable_data(place); jit::seq_pool_attr_t attr( @@ -342,7 +383,10 @@ class SequencePoolFunctor { out_e.device(place) = in_e.sum(Eigen::array({{0}})) / std::sqrt(static_cast(h)); } else { - PADDLE_THROW("unsupported pooling pooltype"); + PADDLE_THROW(platform::errors::InvalidArgument( + "unsupported pooling pooltype: %s. Only support \"AVERAGE\" and " + "\"SQRT\"", + pooltype)); } } } @@ -400,7 +444,10 @@ class SequencePoolGradFunctor { } else if (pooltype == "FIRST") { in_g_e.chip(0, 0).device(place) = out_g_e_v; } else { - PADDLE_THROW("unsupported pooling pooltype"); + PADDLE_THROW(platform::errors::InvalidArgument( + "unsupported pooling pooltype: %s. Only support \"AVERAGE\", " + "\"SQRT\", \"LAST\" and \"FIRST\"", + pooltype)); } } } diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index 422b06c70e..cba8dd935e 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -205,7 +205,10 @@ class SequencePoolFunctor { lod.CUDAData(context.GetPlace()), lod.size(), item_dim, output->mutable_data(context.GetPlace()), nullptr); } else { - PADDLE_THROW("unsupported pooling pooltype"); + PADDLE_THROW(platform::errors::InvalidArgument( + "unsupported pooling pooltype: %s. Only support \"MAX\", " + "\"AVERAGE\", \"SUM\", \"SQRT\", \"LAST\" and \"FIRST\"", + pooltype)); } } }; @@ -370,7 +373,10 @@ class SequencePoolGradFunctor { in_grad->mutable_data(context.GetPlace()), nullptr); } else { - PADDLE_THROW("unsupported pooling pooltype"); + PADDLE_THROW(platform::errors::InvalidArgument( + "unsupported pooling pooltype: %s. Only support \"MAX\", " + "\"AVERAGE\", \"SUM\", \"SQRT\", \"LAST\" and \"FIRST\"", + pooltype)); } } }; diff --git a/paddle/fluid/operators/math/sequence_pooling_test.cc b/paddle/fluid/operators/math/sequence_pooling_test.cc index efab1a375b..4b5f484e52 100644 --- a/paddle/fluid/operators/math/sequence_pooling_test.cc +++ b/paddle/fluid/operators/math/sequence_pooling_test.cc @@ -50,9 +50,21 @@ void TestSequencePoolingSum(const DeviceContext &context, in_grad.mutable_data(in_dims, place); // check tensor contruction result - PADDLE_ENFORCE_EQ(in_grad.dims().size(), out_grad.dims().size()); + PADDLE_ENFORCE_EQ( + in_grad.dims().size(), out_grad.dims().size(), + paddle::platform::errors::InvalidArgument( + "The dimension of input and output shall be same. Expected %ld == " + "%ld, but got %ld != %ld. Please check the input value.", + in_grad.dims().size(), out_grad.dims().size(), in_grad.dims().size(), + out_grad.dims().size())); for (int64_t i = 1; i < out_grad.dims().size(); ++i) { - PADDLE_ENFORCE_EQ(in_grad.dims()[i], out_grad.dims()[i]); + PADDLE_ENFORCE_EQ( + in_grad.dims()[i], out_grad.dims()[i], + paddle::platform::errors::InvalidArgument( + "The dimension of input and output shall be same. Expected %ld == " + "%ld, but got %ld != %ld. Please check the input value.", + in_grad.dims()[i], out_grad.dims()[i], in_grad.dims()[i], + out_grad.dims()[i])); } // call functor diff --git a/paddle/fluid/operators/math/tree2col.cc b/paddle/fluid/operators/math/tree2col.cc index cafcf63193..0344226ea6 100644 --- a/paddle/fluid/operators/math/tree2col.cc +++ b/paddle/fluid/operators/math/tree2col.cc @@ -55,7 +55,11 @@ void Tree2ColUtil::construct_tree(const paddle::Tensor &EdgeSet, std::vector> *tr, size_t *node_count) { auto edge_set_dims = EdgeSet.dims(); - PADDLE_ENFORCE_EQ(edge_set_dims[1], 2); + PADDLE_ENFORCE_EQ(edge_set_dims[1], 2, + platform::errors::InvalidArgument( + "The second dimension of the EdgeSet shall be 2, but " + "got %ld != 2. Please check the input value.", + edge_set_dims[1])); int64_t edge_count = EdgeSet.numel(); const int *edge_data = EdgeSet.data(); -- GitLab