未验证 提交 364cc536 编写于 作者: C Chen Weihang 提交者: GitHub

Polish paddle fluid framework error message - part2 (#25667)

* polish framework error meg part2

* polish details
上级 4598f116
...@@ -45,9 +45,10 @@ void TransformData(const OpKernelType &expected_kernel_type, ...@@ -45,9 +45,10 @@ void TransformData(const OpKernelType &expected_kernel_type,
if (NeedTransformLayout(lout, lin)) { if (NeedTransformLayout(lout, lin)) {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) { if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
!(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), true,
"No layout transform needed between two MKLDNN OPKernels"); platform::errors::PreconditionNotMet(
"No layout transform needed between two MKLDNN OPKernels."));
if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) { if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) {
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
...@@ -96,7 +97,10 @@ void TransformData(const OpKernelType &expected_kernel_type, ...@@ -96,7 +97,10 @@ void TransformData(const OpKernelType &expected_kernel_type,
PassTensorData(&out, &in); PassTensorData(&out, &in);
} }
PADDLE_ENFORCE(transformed, "No transform is applied, please check!"); PADDLE_ENFORCE_EQ(
transformed, true,
platform::errors::PreconditionNotMet(
"No transform is applied for the data needs to be transformed."));
// get output data // get output data
output_tensor->ShareDataWith(in); output_tensor->ShareDataWith(in);
} }
...@@ -116,7 +120,10 @@ void SetTensorToVariable(const Variable &in_var, const Tensor &tensor, ...@@ -116,7 +120,10 @@ void SetTensorToVariable(const Variable &in_var, const Tensor &tensor,
trans_selected_rows->set_rows(in_selected_rows.rows()); trans_selected_rows->set_rows(in_selected_rows.rows());
trans_selected_rows->mutable_value()->ShareDataWith(tensor); trans_selected_rows->mutable_value()->ShareDataWith(tensor);
} else { } else {
PADDLE_THROW("unknown var type"); PADDLE_THROW(platform::errors::Unavailable(
"Unsupported variable type, only supports LoDTensor or SelectedRows, "
"but the input variable type is %s.",
ToTypeName(in_var.Type())));
} }
} }
......
...@@ -65,7 +65,8 @@ proto::VarType::Type ToDataType(std::type_index type) { ...@@ -65,7 +65,8 @@ proto::VarType::Type ToDataType(std::type_index type) {
if (it != gDataTypeMap().cpp_to_proto_.end()) { if (it != gDataTypeMap().cpp_to_proto_.end()) {
return it->second; return it->second;
} }
PADDLE_THROW("Not support %s as tensor type", type.name()); PADDLE_THROW(platform::errors::Unimplemented(
"Not support %s as tensor data type.", platform::demangle(type.name())));
} }
std::type_index ToTypeIndex(proto::VarType::Type type) { std::type_index ToTypeIndex(proto::VarType::Type type) {
...@@ -73,8 +74,9 @@ std::type_index ToTypeIndex(proto::VarType::Type type) { ...@@ -73,8 +74,9 @@ std::type_index ToTypeIndex(proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_cpp_.end()) { if (it != gDataTypeMap().proto_to_cpp_.end()) {
return it->second; return it->second;
} }
PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", PADDLE_THROW(platform::errors::Unimplemented(
static_cast<int>(type)); "Not support proto::VarType::Type(%d) as tensor type.",
static_cast<int>(type)));
} }
std::string DataTypeToString(const proto::VarType::Type type) { std::string DataTypeToString(const proto::VarType::Type type) {
...@@ -82,8 +84,9 @@ std::string DataTypeToString(const proto::VarType::Type type) { ...@@ -82,8 +84,9 @@ std::string DataTypeToString(const proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_str_.end()) { if (it != gDataTypeMap().proto_to_str_.end()) {
return it->second; return it->second;
} }
PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", PADDLE_THROW(platform::errors::Unimplemented(
static_cast<int>(type)); "Not support proto::VarType::Type(%d) as tensor type.",
static_cast<int>(type)));
} }
size_t SizeOfType(proto::VarType::Type type) { size_t SizeOfType(proto::VarType::Type type) {
...@@ -91,7 +94,8 @@ size_t SizeOfType(proto::VarType::Type type) { ...@@ -91,7 +94,8 @@ size_t SizeOfType(proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_size_.end()) { if (it != gDataTypeMap().proto_to_size_.end()) {
return it->second; return it->second;
} }
PADDLE_THROW("Not support %s as tensor type", DataTypeToString(type)); PADDLE_THROW(platform::errors::Unimplemented("Not support %s as tensor type.",
DataTypeToString(type)));
} }
} // namespace framework } // namespace framework
......
...@@ -78,7 +78,9 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { ...@@ -78,7 +78,9 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) {
_ForEachDataType_(VisitDataTypeCallback); _ForEachDataType_(VisitDataTypeCallback);
#undef VisitDataTypeCallback #undef VisitDataTypeCallback
PADDLE_THROW("Not supported %d", type); PADDLE_THROW(platform::errors::Unimplemented(
"Not supported proto::VarType::Type(%d) as data type.",
static_cast<int>(type)));
} }
template <typename Visitor> template <typename Visitor>
......
...@@ -56,7 +56,8 @@ struct CastDataType { ...@@ -56,7 +56,8 @@ struct CastDataType {
context->Wait(); context->Wait();
#endif #endif
} else { } else {
PADDLE_THROW("Unsupported place!"); PADDLE_THROW(platform::errors::Unimplemented(
"Place type is not supported when casting data type."));
} }
} }
}; };
...@@ -98,7 +99,9 @@ void TransDataType(const OpKernelType& kernel_type_for_var, ...@@ -98,7 +99,9 @@ void TransDataType(const OpKernelType& kernel_type_for_var,
framework::VisitDataType(dst_type, CastDataType<bool>(in, out, ctx)); framework::VisitDataType(dst_type, CastDataType<bool>(in, out, ctx));
break; break;
default: default:
PADDLE_THROW("Not support type %d", src_type); PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when casting data type.",
DataTypeToString(src_type)));
} }
} }
......
...@@ -81,9 +81,11 @@ bool contain_unknown_dim(const DDim& ddim) { ...@@ -81,9 +81,11 @@ bool contain_unknown_dim(const DDim& ddim) {
} }
DDim slice_ddim(const DDim& dim, int begin, int end) { DDim slice_ddim(const DDim& dim, int begin, int end) {
PADDLE_ENFORCE(begin >= 0 && end <= dim.size(), PADDLE_ENFORCE_EQ(
"[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", (begin >= 0 && end <= dim.size()), true,
begin, end, dim.size()); platform::errors::InvalidArgument(
"[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", begin,
end, dim.size()));
// Constructor of DDim would check whether end - begin is valid // Constructor of DDim would check whether end - begin is valid
return DDim(dim.Get() + begin, end - begin); return DDim(dim.Get() + begin, end - begin);
} }
......
...@@ -29,20 +29,23 @@ namespace framework { ...@@ -29,20 +29,23 @@ namespace framework {
return (callback); \ return (callback); \
} }
#define PADDLE_VISIT_DDIM(rank, callback) \ #define PADDLE_VISIT_DDIM(rank, callback) \
switch (rank) { \ switch (rank) { \
PADDLE_VISIT_DDIM_BASE(0, callback); \ PADDLE_VISIT_DDIM_BASE(0, callback); \
PADDLE_VISIT_DDIM_BASE(1, callback); \ PADDLE_VISIT_DDIM_BASE(1, callback); \
PADDLE_VISIT_DDIM_BASE(2, callback); \ PADDLE_VISIT_DDIM_BASE(2, callback); \
PADDLE_VISIT_DDIM_BASE(3, callback); \ PADDLE_VISIT_DDIM_BASE(3, callback); \
PADDLE_VISIT_DDIM_BASE(4, callback); \ PADDLE_VISIT_DDIM_BASE(4, callback); \
PADDLE_VISIT_DDIM_BASE(5, callback); \ PADDLE_VISIT_DDIM_BASE(5, callback); \
PADDLE_VISIT_DDIM_BASE(6, callback); \ PADDLE_VISIT_DDIM_BASE(6, callback); \
PADDLE_VISIT_DDIM_BASE(7, callback); \ PADDLE_VISIT_DDIM_BASE(7, callback); \
PADDLE_VISIT_DDIM_BASE(8, callback); \ PADDLE_VISIT_DDIM_BASE(8, callback); \
PADDLE_VISIT_DDIM_BASE(9, callback); \ PADDLE_VISIT_DDIM_BASE(9, callback); \
default: \ default: \
PADDLE_THROW("Invalid rank %d", rank); \ PADDLE_THROW(platform::errors::Unimplemented( \
"Invalid dimension to be accessed. Now only supports access to " \
"dimension 0 to 9, but received dimension is %d.", \
rank)); \
} }
template <typename T1, typename T2> template <typename T1, typename T2>
...@@ -92,13 +95,31 @@ class DDim { ...@@ -92,13 +95,31 @@ class DDim {
inline int64_t operator[](int idx) const { return dim_[idx]; } inline int64_t operator[](int idx) const { return dim_[idx]; }
inline int64_t& at(int idx) { int64_t& at(int idx) {
PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx); PADDLE_ENFORCE_GE(idx, 0,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
PADDLE_ENFORCE_LT(idx, rank_,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
return dim_[idx]; return dim_[idx];
} }
inline int64_t at(int idx) const { int64_t at(int idx) const {
PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx); PADDLE_ENFORCE_GE(idx, 0,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
PADDLE_ENFORCE_LT(idx, rank_,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
return dim_[idx]; return dim_[idx];
} }
......
...@@ -30,7 +30,10 @@ static ::DLDataType GetDLDataTypeCode() { ...@@ -30,7 +30,10 @@ static ::DLDataType GetDLDataTypeCode() {
} else if (std::is_integral<T>::value) { } else if (std::is_integral<T>::value) {
dtype.code = kDLInt; dtype.code = kDLInt;
} else { } else {
PADDLE_THROW("Unsupported data type %s", typeid(T).name()); PADDLE_THROW(platform::errors::Unavailable(
"Unsupported data type (%s), only supports float16, float, unsigned "
"int and int.",
platform::demangle(typeid(T).name())));
} }
dtype.bits = 8 * sizeof(T); dtype.bits = 8 * sizeof(T);
dtype.lanes = 1; dtype.lanes = 1;
...@@ -52,8 +55,9 @@ static DLDataType GetDLDataTypeFromTypeIndex(proto::VarType::Type type) { ...@@ -52,8 +55,9 @@ static DLDataType GetDLDataTypeFromTypeIndex(proto::VarType::Type type) {
static auto type_to_dtype_map = CreateDLDataTypeMap(); static auto type_to_dtype_map = CreateDLDataTypeMap();
static auto type_to_dtype_map_end_it = type_to_dtype_map.end(); static auto type_to_dtype_map_end_it = type_to_dtype_map.end();
auto it = type_to_dtype_map.find(static_cast<int>(type)); auto it = type_to_dtype_map.find(static_cast<int>(type));
PADDLE_ENFORCE(it != type_to_dtype_map_end_it, "Unsupported data type %d", PADDLE_ENFORCE_NE(it, type_to_dtype_map_end_it,
type); platform::errors::InvalidArgument(
"Unsupported data type (%s).", DataTypeToString(type)));
return it->second; return it->second;
#undef REG_DL_DATA_TYPE #undef REG_DL_DATA_TYPE
} }
...@@ -73,7 +77,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> { ...@@ -73,7 +77,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> {
ctx.device_id = place.device; ctx.device_id = place.device;
return ctx; return ctx;
#else #else
PADDLE_THROW("platform::CUDAPlace is not supported in CPU only version"); PADDLE_THROW(platform::errors::Unavailable(
"platform::CUDAPlace is not supported in CPU only version."));
#endif #endif
} }
...@@ -84,8 +89,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> { ...@@ -84,8 +89,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> {
ctx.device_id = 0; ctx.device_id = 0;
return ctx; return ctx;
#else #else
PADDLE_THROW( PADDLE_THROW(platform::errors::Unavailable(
"platform::CUDAPinnedPlace is not supported in CPU only version"); "platform::CUDAPinnedPlace is not supported in CPU only version."));
#endif #endif
} }
}; };
...@@ -136,7 +141,10 @@ DLPackTensor::DLPackTensor(const Tensor &tensor, LaneType lanes) { ...@@ -136,7 +141,10 @@ DLPackTensor::DLPackTensor(const Tensor &tensor, LaneType lanes) {
// refer to cupy and cudf, the compact tensor first dim's strides need to be 1 // refer to cupy and cudf, the compact tensor first dim's strides need to be 1
// and second dim's strides need to be length of rows of cudf // and second dim's strides need to be length of rows of cudf
// cudf now only support dim=2 // cudf now only support dim=2
PADDLE_ENFORCE_LE(t_.ndim, 2, "cudf now only support dim=2."); PADDLE_ENFORCE_LE(t_.ndim, 2, platform::errors::InvalidArgument(
"cudf now only supports dimension is 2, "
"but received dimension is %d.",
t_.ndim));
if (t_.ndim > 1) if (t_.ndim > 1)
t_.strides = new int64_t[2]{1, t_.shape[1]}; t_.strides = new int64_t[2]{1, t_.shape[1]};
......
...@@ -556,9 +556,11 @@ void DownpourWorker::TrainFilesWithProfiler() { ...@@ -556,9 +556,11 @@ void DownpourWorker::TrainFilesWithProfiler() {
continue; continue;
} }
PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false, PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
"Tensor %s contains Inf", var_name); platform::errors::InvalidArgument(
"Tensor %s contains Inf.", var_name));
PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false, PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
"Tensor %s contains NAN", var_name); platform::errors::InvalidArgument(
"Tensor %s contains NAN.", var_name));
} }
if (need_to_push_sparse_) { if (need_to_push_sparse_) {
...@@ -829,9 +831,11 @@ void DownpourWorker::TrainFiles() { ...@@ -829,9 +831,11 @@ void DownpourWorker::TrainFiles() {
continue; continue;
} }
PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false, PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
"Tensor %s contains Inf", var_name); platform::errors::InvalidArgument(
"Tensor %s contains Inf.", var_name));
PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false, PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
"Tensor %s contains NAN", var_name); platform::errors::InvalidArgument(
"Tensor %s contains NAN.", var_name));
} }
if (need_to_push_sparse_) { if (need_to_push_sparse_) {
......
...@@ -26,7 +26,11 @@ struct EigenDim { ...@@ -26,7 +26,11 @@ struct EigenDim {
using Type = Eigen::DSizes<Eigen::DenseIndex, D>; using Type = Eigen::DSizes<Eigen::DenseIndex, D>;
static Type From(const DDim& dims) { static Type From(const DDim& dims) {
PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)"); PADDLE_ENFORCE_EQ(arity(dims), D,
platform::errors::InvalidArgument(
"Input dimension size should be equal to %d, but "
"received dimension size is %d.",
arity(dims), D));
Type ret; Type ret;
for (int64_t d = 0; d < arity(dims); d++) { for (int64_t d = 0; d < arity(dims); d++) {
ret[d] = dims[d]; ret[d] = dims[d];
...@@ -69,8 +73,11 @@ struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> { ...@@ -69,8 +73,11 @@ struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {
static typename EigenMatrix::Type Reshape(Tensor& tensor, // NOLINT static typename EigenMatrix::Type Reshape(Tensor& tensor, // NOLINT
int num_col_dims) { int num_col_dims) {
int rank = tensor.dims_.size(); int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank, PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true,
"`num_col_dims` must be between (0, rank_of_tensor)."); platform::errors::InvalidArgument(
"Input dimension number(num_col_dims) must be "
"between 0 and %d, but received number is %d.",
rank, num_col_dims));
return EigenMatrix::From(tensor, return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims)); flatten_to_2d(tensor.dims(), num_col_dims));
} }
...@@ -78,8 +85,11 @@ struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> { ...@@ -78,8 +85,11 @@ struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {
static typename EigenMatrix::ConstType Reshape(const Tensor& tensor, static typename EigenMatrix::ConstType Reshape(const Tensor& tensor,
int num_col_dims) { int num_col_dims) {
int rank = tensor.dims_.size(); int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank, PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true,
"`num_col_dims` must be between (0, rank_of_tensor)."); platform::errors::InvalidArgument(
"Input dimension number(num_col_dims) must be "
"between 0 and %d, but received number is %d.",
rank, num_col_dims));
return EigenMatrix::From(tensor, return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims)); flatten_to_2d(tensor.dims(), num_col_dims));
} }
......
...@@ -175,8 +175,9 @@ void DeleteUnusedTensors( ...@@ -175,8 +175,9 @@ void DeleteUnusedTensors(
garbages.emplace_back(t.MoveMemoryHolder()); garbages.emplace_back(t.MoveMemoryHolder());
} }
} else { } else {
PADDLE_THROW("Type %s of %s is not supported eager deletion", PADDLE_THROW(platform::errors::Unimplemented(
framework::ToTypeName(var->Type()), var_name); "Type %s of variable %s is not supported eager deletion.",
framework::ToTypeName(var->Type()), var_name));
} }
} }
......
...@@ -79,15 +79,15 @@ StreamGarbageCollector::StreamGarbageCollector(const platform::CUDAPlace &place, ...@@ -79,15 +79,15 @@ StreamGarbageCollector::StreamGarbageCollector(const platform::CUDAPlace &place,
size_t max_memory_size) size_t max_memory_size)
: GarbageCollector(place, max_memory_size) { : GarbageCollector(place, max_memory_size) {
platform::CUDADeviceGuard guard(place.device); platform::CUDADeviceGuard guard(place.device);
PADDLE_ENFORCE(cudaStreamCreate(&stream_)); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream_));
callback_manager_.reset(new platform::StreamCallbackManager(stream_)); callback_manager_.reset(new platform::StreamCallbackManager(stream_));
} }
StreamGarbageCollector::~StreamGarbageCollector() { StreamGarbageCollector::~StreamGarbageCollector() {
auto place = BOOST_GET_CONST(platform::CUDAPlace, this->dev_ctx_->GetPlace()); auto place = BOOST_GET_CONST(platform::CUDAPlace, this->dev_ctx_->GetPlace());
platform::CUDADeviceGuard guard(place.device); platform::CUDADeviceGuard guard(place.device);
PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_));
PADDLE_ENFORCE(cudaStreamDestroy(stream_)); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream_));
} }
cudaStream_t StreamGarbageCollector::stream() const { return stream_; } cudaStream_t StreamGarbageCollector::stream() const { return stream_; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册