From 13804ed80cfd01e47d52876f8694cc0a8a3ad311 Mon Sep 17 00:00:00 2001 From: WeiXin <2279280558@qq.com> Date: Tue, 8 Sep 2020 19:56:39 +0800 Subject: [PATCH] Error msg/polish tensor error msg (#26976) * polish one line error message in tensor.cc * polish error messages in tensor.cc,tensor.h tensor_impl.h * polish error messages in tensor.cc tensor.h tensor_impl.h * polish error messages in tensor.cc,tensor.h tensor_impl.h * polish error messages in tensor.cc tensor.h tensor_impl.h tensor_test.cc * polish error messages in tensor.cc tensor.h tensor_impl.h --- paddle/fluid/framework/tensor.cc | 54 ++++++++++++++++++--------- paddle/fluid/framework/tensor.h | 8 +++- paddle/fluid/framework/tensor_impl.h | 19 +++++++--- paddle/fluid/framework/tensor_test.cc | 4 +- 4 files changed, 57 insertions(+), 28 deletions(-) diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index 544c014eaf..0b22bab267 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -19,13 +19,17 @@ namespace paddle { namespace framework { extern size_t SizeOfType(proto::VarType::Type type); void Tensor::check_memory_size() const { - PADDLE_ENFORCE_NOT_NULL( - holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); + PADDLE_ENFORCE_NOT_NULL(holder_, platform::errors::PreconditionNotMet( + "Tensor holds no memory. " + "Call Tensor::mutable_data firstly.")); PADDLE_ENFORCE_LE( numel() * SizeOfType(type()), memory_size(), - "Tensor's dims_ is out of bound. Call Tensor::mutable_data " - "first to re-allocate memory.\n" - "or maybe the required data-type mismatches the data already stored."); + platform::errors::PreconditionNotMet( + "Tensor's dimension is out of bound." + "Tensor's dimension must be equal or less than the size of its " + "memory." + "But received Tensor's dimension is d%, memory's size is %d.", + numel() * SizeOfType(type()), memory_size())); } Tensor::Tensor(const proto::VarType::Type& dtype) : type_(dtype), offset_(0) {} @@ -37,15 +41,21 @@ size_t Tensor::memory_size() const { void* Tensor::mutable_data(const platform::Place& place, proto::VarType::Type type, size_t requested_size) { type_ = type; - PADDLE_ENFORCE_GE(numel(), 0, - "When calling this method, the Tensor's numel must be " - "equal or larger than zero. " - "Please check Tensor::dims, or Tensor::Resize has been " - "called first. The Tensor's shape is [", - dims(), "] now"); + PADDLE_ENFORCE_GE( + numel(), 0, + platform::errors::PreconditionNotMet( + "The Tensor's element number must be equal or greater than zero. " + "The Tensor's shape is [", + dims(), "] now")); size_t size = numel() * SizeOfType(type); if (requested_size) { - PADDLE_ENFORCE_GE(requested_size, size); + PADDLE_ENFORCE_GE( + requested_size, size, + platform::errors::InvalidArgument( + "The requested memory size is less than the memory size of Tensor. " + "But received requested memory size is d%, " + "memory size of Tensor is %d.", + requested_size, size)); size = requested_size; } /* some versions of boost::variant don't have operator!= */ @@ -62,8 +72,8 @@ void* Tensor::mutable_data(const platform::Place& place, void* Tensor::mutable_data(const platform::Place& place, size_t requested_size) { - PADDLE_ENFORCE_NOT_NULL( - this->holder_, "Cannot invoke mutable data if current hold nothing."); + PADDLE_ENFORCE_NOT_NULL(this->holder_, platform::errors::PreconditionNotMet( + "The tensor is not initialized.")); return mutable_data(place, type_, requested_size); } @@ -75,12 +85,20 @@ Tensor& Tensor::ShareDataWith(const Tensor& src) { Tensor Tensor::Slice(int64_t begin_idx, int64_t end_idx) const { check_memory_size(); - PADDLE_ENFORCE_GE(begin_idx, 0, - "The start row index must be greater than 0."); - PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound."); + PADDLE_ENFORCE_GE( + begin_idx, 0, + platform::errors::OutOfRange("The start row index must be greater than 0." + "But received the start index is d%.", + begin_idx)); + PADDLE_ENFORCE_LE( + end_idx, dims_[0], + platform::errors::OutOfRange("The end row index is out of bound.")); PADDLE_ENFORCE_LT( begin_idx, end_idx, - "The start row index must be lesser than the end row index."); + platform::errors::InvalidArgument( + "The start row index must be less than the end row index." + "But received the start index = %d, the end index = %d.", + begin_idx, end_idx)); if (dims_[0] == 1) { return *this; diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index d9fddc4c77..f2ccff2c13 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -131,13 +131,17 @@ class Tensor { const platform::Place& place() const { PADDLE_ENFORCE_NOT_NULL( - holder_, "Tensor not initialized yet when Tensor::place() is called."); + holder_, + platform::errors::PreconditionNotMet( + "Tensor not initialized yet when Tensor::place() is called.")); return holder_->place(); } proto::VarType::Type type() const { PADDLE_ENFORCE_NOT_NULL( - holder_, "Tensor not initialized yet when Tensor::type() is called."); + holder_, + platform::errors::PreconditionNotMet( + "Tensor not initialized yet when Tensor::type() is called.")); return type_; } diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index f5171b0a8d..986551b935 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -43,9 +43,13 @@ inline T* Tensor::data() { check_memory_size(); bool valid = std::is_same::value || type_ == DataTypeTrait::DataType(); - PADDLE_ENFORCE( - valid, "Tensor holds the wrong type, it holds %s, but desires to be %s", - DataTypeToString(type_), DataTypeToString(DataTypeTrait::DataType())); + PADDLE_ENFORCE_EQ( + valid, true, + platform::errors::InvalidArgument( + "Tensor holds the wrong type, it holds %s, but desires to be %s", + DataTypeToString(type_), + DataTypeToString(DataTypeTrait::DataType()))); + return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } @@ -69,9 +73,12 @@ inline T* Tensor::mutable_data(const platform::Place& place, inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { int rank = src.dims().size(); PADDLE_ENFORCE_GE( - rank, 2, - "'ReshapeToMatrix()' is only used for flatten high rank " - "tensors to matrixs. Can not be used in reshaping vectors."); + rank, 2, platform::errors::InvalidArgument( + "'ReshapeToMatrix()' is only used for flatten high rank " + "tensors to matrixs. The dimensions of Tensor must be " + "greater or equal than 2. " + "But received dimensions of Tensor is %d", + rank)); if (rank == 2) { return src; } diff --git a/paddle/fluid/framework/tensor_test.cc b/paddle/fluid/framework/tensor_test.cc index 84f98d339a..cc972dd93d 100644 --- a/paddle/fluid/framework/tensor_test.cc +++ b/paddle/fluid/framework/tensor_test.cc @@ -41,7 +41,7 @@ TEST(Tensor, DataAssert) { std::string ex_msg = err.what(); EXPECT_TRUE(ex_msg.find("holder_ should not be null") != std::string::npos); EXPECT_TRUE(ex_msg.find("Tensor holds no memory. Call " - "Tensor::mutable_data first.") != + "Tensor::mutable_data firstly.") != std::string::npos); } ASSERT_TRUE(caught); @@ -157,7 +157,7 @@ TEST(Tensor, ShareDataWith) { EXPECT_TRUE(ex_msg.find("holder_ should not be null") != std::string::npos); EXPECT_TRUE(ex_msg.find("Tensor holds no memory. Call " - "Tensor::mutable_data first.") != + "Tensor::mutable_data firstly.") != std::string::npos); } ASSERT_TRUE(caught); -- GitLab