From 364cc53618257a1db348c73bff2ed11c7e41b6a6 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 24 Jul 2020 15:12:38 +0800 Subject: [PATCH] Polish paddle fluid framework error message - part2 (#25667) * polish framework error meg part2 * polish details --- paddle/fluid/framework/data_transform.cc | 17 ++++-- paddle/fluid/framework/data_type.cc | 16 ++++-- paddle/fluid/framework/data_type.h | 4 +- paddle/fluid/framework/data_type_transform.cc | 7 ++- paddle/fluid/framework/ddim.cc | 8 ++- paddle/fluid/framework/ddim.h | 57 +++++++++++++------ paddle/fluid/framework/dlpack_tensor.cc | 22 ++++--- paddle/fluid/framework/downpour_worker.cc | 12 ++-- paddle/fluid/framework/eigen.h | 20 +++++-- paddle/fluid/framework/executor_gc_helper.cc | 5 +- paddle/fluid/framework/garbage_collector.cc | 6 +- 11 files changed, 118 insertions(+), 56 deletions(-) diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 76c53e82315..f54311eebfa 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -45,9 +45,10 @@ void TransformData(const OpKernelType &expected_kernel_type, if (NeedTransformLayout(lout, lin)) { #ifdef PADDLE_WITH_MKLDNN if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) { - PADDLE_ENFORCE( - !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), - "No layout transform needed between two MKLDNN OPKernels"); + PADDLE_ENFORCE_EQ( + !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), true, + platform::errors::PreconditionNotMet( + "No layout transform needed between two MKLDNN OPKernels.")); if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) { // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel @@ -96,7 +97,10 @@ void TransformData(const OpKernelType &expected_kernel_type, PassTensorData(&out, &in); } - PADDLE_ENFORCE(transformed, "No transform is applied, please check!"); + PADDLE_ENFORCE_EQ( + transformed, true, + platform::errors::PreconditionNotMet( + "No transform is applied for the data needs to be transformed.")); // get output data output_tensor->ShareDataWith(in); } @@ -116,7 +120,10 @@ void SetTensorToVariable(const Variable &in_var, const Tensor &tensor, trans_selected_rows->set_rows(in_selected_rows.rows()); trans_selected_rows->mutable_value()->ShareDataWith(tensor); } else { - PADDLE_THROW("unknown var type"); + PADDLE_THROW(platform::errors::Unavailable( + "Unsupported variable type, only supports LoDTensor or SelectedRows, " + "but the input variable type is %s.", + ToTypeName(in_var.Type()))); } } diff --git a/paddle/fluid/framework/data_type.cc b/paddle/fluid/framework/data_type.cc index a0248cf3c75..f479d92483c 100644 --- a/paddle/fluid/framework/data_type.cc +++ b/paddle/fluid/framework/data_type.cc @@ -65,7 +65,8 @@ proto::VarType::Type ToDataType(std::type_index type) { if (it != gDataTypeMap().cpp_to_proto_.end()) { return it->second; } - PADDLE_THROW("Not support %s as tensor type", type.name()); + PADDLE_THROW(platform::errors::Unimplemented( + "Not support %s as tensor data type.", platform::demangle(type.name()))); } std::type_index ToTypeIndex(proto::VarType::Type type) { @@ -73,8 +74,9 @@ std::type_index ToTypeIndex(proto::VarType::Type type) { if (it != gDataTypeMap().proto_to_cpp_.end()) { return it->second; } - PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", - static_cast(type)); + PADDLE_THROW(platform::errors::Unimplemented( + "Not support proto::VarType::Type(%d) as tensor type.", + static_cast(type))); } std::string DataTypeToString(const proto::VarType::Type type) { @@ -82,8 +84,9 @@ std::string DataTypeToString(const proto::VarType::Type type) { if (it != gDataTypeMap().proto_to_str_.end()) { return it->second; } - PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", - static_cast(type)); + PADDLE_THROW(platform::errors::Unimplemented( + "Not support proto::VarType::Type(%d) as tensor type.", + static_cast(type))); } size_t SizeOfType(proto::VarType::Type type) { @@ -91,7 +94,8 @@ size_t SizeOfType(proto::VarType::Type type) { if (it != gDataTypeMap().proto_to_size_.end()) { return it->second; } - PADDLE_THROW("Not support %s as tensor type", DataTypeToString(type)); + PADDLE_THROW(platform::errors::Unimplemented("Not support %s as tensor type.", + DataTypeToString(type))); } } // namespace framework diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index e3b45d05d85..2c4a7b4d027 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -78,7 +78,9 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { _ForEachDataType_(VisitDataTypeCallback); #undef VisitDataTypeCallback - PADDLE_THROW("Not supported %d", type); + PADDLE_THROW(platform::errors::Unimplemented( + "Not supported proto::VarType::Type(%d) as data type.", + static_cast(type))); } template diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc index d79f8cacb5f..44542f05d9d 100644 --- a/paddle/fluid/framework/data_type_transform.cc +++ b/paddle/fluid/framework/data_type_transform.cc @@ -56,7 +56,8 @@ struct CastDataType { context->Wait(); #endif } else { - PADDLE_THROW("Unsupported place!"); + PADDLE_THROW(platform::errors::Unimplemented( + "Place type is not supported when casting data type.")); } } }; @@ -98,7 +99,9 @@ void TransDataType(const OpKernelType& kernel_type_for_var, framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); break; default: - PADDLE_THROW("Not support type %d", src_type); + PADDLE_THROW(platform::errors::Unimplemented( + "Data type (%s) is not supported when casting data type.", + DataTypeToString(src_type))); } } diff --git a/paddle/fluid/framework/ddim.cc b/paddle/fluid/framework/ddim.cc index 799deec1b69..fe7d2430662 100644 --- a/paddle/fluid/framework/ddim.cc +++ b/paddle/fluid/framework/ddim.cc @@ -81,9 +81,11 @@ bool contain_unknown_dim(const DDim& ddim) { } DDim slice_ddim(const DDim& dim, int begin, int end) { - PADDLE_ENFORCE(begin >= 0 && end <= dim.size(), - "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", - begin, end, dim.size()); + PADDLE_ENFORCE_EQ( + (begin >= 0 && end <= dim.size()), true, + platform::errors::InvalidArgument( + "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", begin, + end, dim.size())); // Constructor of DDim would check whether end - begin is valid return DDim(dim.Get() + begin, end - begin); } diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h index cbc8b0fb7cc..29c4732f991 100644 --- a/paddle/fluid/framework/ddim.h +++ b/paddle/fluid/framework/ddim.h @@ -29,20 +29,23 @@ namespace framework { return (callback); \ } -#define PADDLE_VISIT_DDIM(rank, callback) \ - switch (rank) { \ - PADDLE_VISIT_DDIM_BASE(0, callback); \ - PADDLE_VISIT_DDIM_BASE(1, callback); \ - PADDLE_VISIT_DDIM_BASE(2, callback); \ - PADDLE_VISIT_DDIM_BASE(3, callback); \ - PADDLE_VISIT_DDIM_BASE(4, callback); \ - PADDLE_VISIT_DDIM_BASE(5, callback); \ - PADDLE_VISIT_DDIM_BASE(6, callback); \ - PADDLE_VISIT_DDIM_BASE(7, callback); \ - PADDLE_VISIT_DDIM_BASE(8, callback); \ - PADDLE_VISIT_DDIM_BASE(9, callback); \ - default: \ - PADDLE_THROW("Invalid rank %d", rank); \ +#define PADDLE_VISIT_DDIM(rank, callback) \ + switch (rank) { \ + PADDLE_VISIT_DDIM_BASE(0, callback); \ + PADDLE_VISIT_DDIM_BASE(1, callback); \ + PADDLE_VISIT_DDIM_BASE(2, callback); \ + PADDLE_VISIT_DDIM_BASE(3, callback); \ + PADDLE_VISIT_DDIM_BASE(4, callback); \ + PADDLE_VISIT_DDIM_BASE(5, callback); \ + PADDLE_VISIT_DDIM_BASE(6, callback); \ + PADDLE_VISIT_DDIM_BASE(7, callback); \ + PADDLE_VISIT_DDIM_BASE(8, callback); \ + PADDLE_VISIT_DDIM_BASE(9, callback); \ + default: \ + PADDLE_THROW(platform::errors::Unimplemented( \ + "Invalid dimension to be accessed. Now only supports access to " \ + "dimension 0 to 9, but received dimension is %d.", \ + rank)); \ } template @@ -92,13 +95,31 @@ class DDim { inline int64_t operator[](int idx) const { return dim_[idx]; } - inline int64_t& at(int idx) { - PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx); + int64_t& at(int idx) { + PADDLE_ENFORCE_GE(idx, 0, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, idx)); + PADDLE_ENFORCE_LT(idx, rank_, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, idx)); return dim_[idx]; } - inline int64_t at(int idx) const { - PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx); + int64_t at(int idx) const { + PADDLE_ENFORCE_GE(idx, 0, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, idx)); + PADDLE_ENFORCE_LT(idx, rank_, + platform::errors::InvalidArgument( + "Invalid DDim index to be accessed. The valid index " + "is between 0 and %d, but received index is %d.", + rank_, idx)); return dim_[idx]; } diff --git a/paddle/fluid/framework/dlpack_tensor.cc b/paddle/fluid/framework/dlpack_tensor.cc index 74e344cfebe..f2421248e33 100644 --- a/paddle/fluid/framework/dlpack_tensor.cc +++ b/paddle/fluid/framework/dlpack_tensor.cc @@ -30,7 +30,10 @@ static ::DLDataType GetDLDataTypeCode() { } else if (std::is_integral::value) { dtype.code = kDLInt; } else { - PADDLE_THROW("Unsupported data type %s", typeid(T).name()); + PADDLE_THROW(platform::errors::Unavailable( + "Unsupported data type (%s), only supports float16, float, unsigned " + "int and int.", + platform::demangle(typeid(T).name()))); } dtype.bits = 8 * sizeof(T); dtype.lanes = 1; @@ -52,8 +55,9 @@ static DLDataType GetDLDataTypeFromTypeIndex(proto::VarType::Type type) { static auto type_to_dtype_map = CreateDLDataTypeMap(); static auto type_to_dtype_map_end_it = type_to_dtype_map.end(); auto it = type_to_dtype_map.find(static_cast(type)); - PADDLE_ENFORCE(it != type_to_dtype_map_end_it, "Unsupported data type %d", - type); + PADDLE_ENFORCE_NE(it, type_to_dtype_map_end_it, + platform::errors::InvalidArgument( + "Unsupported data type (%s).", DataTypeToString(type))); return it->second; #undef REG_DL_DATA_TYPE } @@ -73,7 +77,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> { ctx.device_id = place.device; return ctx; #else - PADDLE_THROW("platform::CUDAPlace is not supported in CPU only version"); + PADDLE_THROW(platform::errors::Unavailable( + "platform::CUDAPlace is not supported in CPU only version.")); #endif } @@ -84,8 +89,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> { ctx.device_id = 0; return ctx; #else - PADDLE_THROW( - "platform::CUDAPinnedPlace is not supported in CPU only version"); + PADDLE_THROW(platform::errors::Unavailable( + "platform::CUDAPinnedPlace is not supported in CPU only version.")); #endif } }; @@ -136,7 +141,10 @@ DLPackTensor::DLPackTensor(const Tensor &tensor, LaneType lanes) { // refer to cupy and cudf, the compact tensor first dim's strides need to be 1 // and second dim's strides need to be length of rows of cudf // cudf now only support dim=2 - PADDLE_ENFORCE_LE(t_.ndim, 2, "cudf now only support dim=2."); + PADDLE_ENFORCE_LE(t_.ndim, 2, platform::errors::InvalidArgument( + "cudf now only supports dimension is 2, " + "but received dimension is %d.", + t_.ndim)); if (t_.ndim > 1) t_.strides = new int64_t[2]{1, t_.shape[1]}; diff --git a/paddle/fluid/framework/downpour_worker.cc b/paddle/fluid/framework/downpour_worker.cc index cbdfa00652a..3f70835c9d3 100644 --- a/paddle/fluid/framework/downpour_worker.cc +++ b/paddle/fluid/framework/downpour_worker.cc @@ -556,9 +556,11 @@ void DownpourWorker::TrainFilesWithProfiler() { continue; } PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false, - "Tensor %s contains Inf", var_name); + platform::errors::InvalidArgument( + "Tensor %s contains Inf.", var_name)); PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false, - "Tensor %s contains NAN", var_name); + platform::errors::InvalidArgument( + "Tensor %s contains NAN.", var_name)); } if (need_to_push_sparse_) { @@ -829,9 +831,11 @@ void DownpourWorker::TrainFiles() { continue; } PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false, - "Tensor %s contains Inf", var_name); + platform::errors::InvalidArgument( + "Tensor %s contains Inf.", var_name)); PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false, - "Tensor %s contains NAN", var_name); + platform::errors::InvalidArgument( + "Tensor %s contains NAN.", var_name)); } if (need_to_push_sparse_) { diff --git a/paddle/fluid/framework/eigen.h b/paddle/fluid/framework/eigen.h index 21adcb9948b..0e3edfb95cb 100644 --- a/paddle/fluid/framework/eigen.h +++ b/paddle/fluid/framework/eigen.h @@ -26,7 +26,11 @@ struct EigenDim { using Type = Eigen::DSizes; static Type From(const DDim& dims) { - PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)"); + PADDLE_ENFORCE_EQ(arity(dims), D, + platform::errors::InvalidArgument( + "Input dimension size should be equal to %d, but " + "received dimension size is %d.", + arity(dims), D)); Type ret; for (int64_t d = 0; d < arity(dims); d++) { ret[d] = dims[d]; @@ -69,8 +73,11 @@ struct EigenMatrix : public EigenTensor { static typename EigenMatrix::Type Reshape(Tensor& tensor, // NOLINT int num_col_dims) { int rank = tensor.dims_.size(); - PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank, - "`num_col_dims` must be between (0, rank_of_tensor)."); + PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true, + platform::errors::InvalidArgument( + "Input dimension number(num_col_dims) must be " + "between 0 and %d, but received number is %d.", + rank, num_col_dims)); return EigenMatrix::From(tensor, flatten_to_2d(tensor.dims(), num_col_dims)); } @@ -78,8 +85,11 @@ struct EigenMatrix : public EigenTensor { static typename EigenMatrix::ConstType Reshape(const Tensor& tensor, int num_col_dims) { int rank = tensor.dims_.size(); - PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank, - "`num_col_dims` must be between (0, rank_of_tensor)."); + PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true, + platform::errors::InvalidArgument( + "Input dimension number(num_col_dims) must be " + "between 0 and %d, but received number is %d.", + rank, num_col_dims)); return EigenMatrix::From(tensor, flatten_to_2d(tensor.dims(), num_col_dims)); } diff --git a/paddle/fluid/framework/executor_gc_helper.cc b/paddle/fluid/framework/executor_gc_helper.cc index 1712d66cf4c..706248229bc 100644 --- a/paddle/fluid/framework/executor_gc_helper.cc +++ b/paddle/fluid/framework/executor_gc_helper.cc @@ -175,8 +175,9 @@ void DeleteUnusedTensors( garbages.emplace_back(t.MoveMemoryHolder()); } } else { - PADDLE_THROW("Type %s of %s is not supported eager deletion", - framework::ToTypeName(var->Type()), var_name); + PADDLE_THROW(platform::errors::Unimplemented( + "Type %s of variable %s is not supported eager deletion.", + framework::ToTypeName(var->Type()), var_name)); } } diff --git a/paddle/fluid/framework/garbage_collector.cc b/paddle/fluid/framework/garbage_collector.cc index 08c3e6d7f59..ac892443de3 100644 --- a/paddle/fluid/framework/garbage_collector.cc +++ b/paddle/fluid/framework/garbage_collector.cc @@ -79,15 +79,15 @@ StreamGarbageCollector::StreamGarbageCollector(const platform::CUDAPlace &place, size_t max_memory_size) : GarbageCollector(place, max_memory_size) { platform::CUDADeviceGuard guard(place.device); - PADDLE_ENFORCE(cudaStreamCreate(&stream_)); + PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream_)); callback_manager_.reset(new platform::StreamCallbackManager(stream_)); } StreamGarbageCollector::~StreamGarbageCollector() { auto place = BOOST_GET_CONST(platform::CUDAPlace, this->dev_ctx_->GetPlace()); platform::CUDADeviceGuard guard(place.device); - PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); - PADDLE_ENFORCE(cudaStreamDestroy(stream_)); + PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_)); + PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream_)); } cudaStream_t StreamGarbageCollector::stream() const { return stream_; } -- GitLab