未验证 提交 364cc536 编写于 作者: C Chen Weihang 提交者: GitHub

Polish paddle fluid framework error message - part2 (#25667)

* polish framework error meg part2

* polish details
上级 4598f116
......@@ -45,9 +45,10 @@ void TransformData(const OpKernelType &expected_kernel_type,
if (NeedTransformLayout(lout, lin)) {
#ifdef PADDLE_WITH_MKLDNN
if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) {
PADDLE_ENFORCE(
!(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN),
"No layout transform needed between two MKLDNN OPKernels");
PADDLE_ENFORCE_EQ(
!(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), true,
platform::errors::PreconditionNotMet(
"No layout transform needed between two MKLDNN OPKernels."));
if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) {
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
......@@ -96,7 +97,10 @@ void TransformData(const OpKernelType &expected_kernel_type,
PassTensorData(&out, &in);
}
PADDLE_ENFORCE(transformed, "No transform is applied, please check!");
PADDLE_ENFORCE_EQ(
transformed, true,
platform::errors::PreconditionNotMet(
"No transform is applied for the data needs to be transformed."));
// get output data
output_tensor->ShareDataWith(in);
}
......@@ -116,7 +120,10 @@ void SetTensorToVariable(const Variable &in_var, const Tensor &tensor,
trans_selected_rows->set_rows(in_selected_rows.rows());
trans_selected_rows->mutable_value()->ShareDataWith(tensor);
} else {
PADDLE_THROW("unknown var type");
PADDLE_THROW(platform::errors::Unavailable(
"Unsupported variable type, only supports LoDTensor or SelectedRows, "
"but the input variable type is %s.",
ToTypeName(in_var.Type())));
}
}
......
......@@ -65,7 +65,8 @@ proto::VarType::Type ToDataType(std::type_index type) {
if (it != gDataTypeMap().cpp_to_proto_.end()) {
return it->second;
}
PADDLE_THROW("Not support %s as tensor type", type.name());
PADDLE_THROW(platform::errors::Unimplemented(
"Not support %s as tensor data type.", platform::demangle(type.name())));
}
std::type_index ToTypeIndex(proto::VarType::Type type) {
......@@ -73,8 +74,9 @@ std::type_index ToTypeIndex(proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_cpp_.end()) {
return it->second;
}
PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type",
static_cast<int>(type));
PADDLE_THROW(platform::errors::Unimplemented(
"Not support proto::VarType::Type(%d) as tensor type.",
static_cast<int>(type)));
}
std::string DataTypeToString(const proto::VarType::Type type) {
......@@ -82,8 +84,9 @@ std::string DataTypeToString(const proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_str_.end()) {
return it->second;
}
PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type",
static_cast<int>(type));
PADDLE_THROW(platform::errors::Unimplemented(
"Not support proto::VarType::Type(%d) as tensor type.",
static_cast<int>(type)));
}
size_t SizeOfType(proto::VarType::Type type) {
......@@ -91,7 +94,8 @@ size_t SizeOfType(proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_size_.end()) {
return it->second;
}
PADDLE_THROW("Not support %s as tensor type", DataTypeToString(type));
PADDLE_THROW(platform::errors::Unimplemented("Not support %s as tensor type.",
DataTypeToString(type)));
}
} // namespace framework
......
......@@ -78,7 +78,9 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) {
_ForEachDataType_(VisitDataTypeCallback);
#undef VisitDataTypeCallback
PADDLE_THROW("Not supported %d", type);
PADDLE_THROW(platform::errors::Unimplemented(
"Not supported proto::VarType::Type(%d) as data type.",
static_cast<int>(type)));
}
template <typename Visitor>
......
......@@ -56,7 +56,8 @@ struct CastDataType {
context->Wait();
#endif
} else {
PADDLE_THROW("Unsupported place!");
PADDLE_THROW(platform::errors::Unimplemented(
"Place type is not supported when casting data type."));
}
}
};
......@@ -98,7 +99,9 @@ void TransDataType(const OpKernelType& kernel_type_for_var,
framework::VisitDataType(dst_type, CastDataType<bool>(in, out, ctx));
break;
default:
PADDLE_THROW("Not support type %d", src_type);
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when casting data type.",
DataTypeToString(src_type)));
}
}
......
......@@ -81,9 +81,11 @@ bool contain_unknown_dim(const DDim& ddim) {
}
DDim slice_ddim(const DDim& dim, int begin, int end) {
PADDLE_ENFORCE(begin >= 0 && end <= dim.size(),
"[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.",
begin, end, dim.size());
PADDLE_ENFORCE_EQ(
(begin >= 0 && end <= dim.size()), true,
platform::errors::InvalidArgument(
"[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", begin,
end, dim.size()));
// Constructor of DDim would check whether end - begin is valid
return DDim(dim.Get() + begin, end - begin);
}
......
......@@ -29,20 +29,23 @@ namespace framework {
return (callback); \
}
#define PADDLE_VISIT_DDIM(rank, callback) \
switch (rank) { \
PADDLE_VISIT_DDIM_BASE(0, callback); \
PADDLE_VISIT_DDIM_BASE(1, callback); \
PADDLE_VISIT_DDIM_BASE(2, callback); \
PADDLE_VISIT_DDIM_BASE(3, callback); \
PADDLE_VISIT_DDIM_BASE(4, callback); \
PADDLE_VISIT_DDIM_BASE(5, callback); \
PADDLE_VISIT_DDIM_BASE(6, callback); \
PADDLE_VISIT_DDIM_BASE(7, callback); \
PADDLE_VISIT_DDIM_BASE(8, callback); \
PADDLE_VISIT_DDIM_BASE(9, callback); \
default: \
PADDLE_THROW("Invalid rank %d", rank); \
#define PADDLE_VISIT_DDIM(rank, callback) \
switch (rank) { \
PADDLE_VISIT_DDIM_BASE(0, callback); \
PADDLE_VISIT_DDIM_BASE(1, callback); \
PADDLE_VISIT_DDIM_BASE(2, callback); \
PADDLE_VISIT_DDIM_BASE(3, callback); \
PADDLE_VISIT_DDIM_BASE(4, callback); \
PADDLE_VISIT_DDIM_BASE(5, callback); \
PADDLE_VISIT_DDIM_BASE(6, callback); \
PADDLE_VISIT_DDIM_BASE(7, callback); \
PADDLE_VISIT_DDIM_BASE(8, callback); \
PADDLE_VISIT_DDIM_BASE(9, callback); \
default: \
PADDLE_THROW(platform::errors::Unimplemented( \
"Invalid dimension to be accessed. Now only supports access to " \
"dimension 0 to 9, but received dimension is %d.", \
rank)); \
}
template <typename T1, typename T2>
......@@ -92,13 +95,31 @@ class DDim {
inline int64_t operator[](int idx) const { return dim_[idx]; }
inline int64_t& at(int idx) {
PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx);
int64_t& at(int idx) {
PADDLE_ENFORCE_GE(idx, 0,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
PADDLE_ENFORCE_LT(idx, rank_,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
return dim_[idx];
}
inline int64_t at(int idx) const {
PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx);
int64_t at(int idx) const {
PADDLE_ENFORCE_GE(idx, 0,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
PADDLE_ENFORCE_LT(idx, rank_,
platform::errors::InvalidArgument(
"Invalid DDim index to be accessed. The valid index "
"is between 0 and %d, but received index is %d.",
rank_, idx));
return dim_[idx];
}
......
......@@ -30,7 +30,10 @@ static ::DLDataType GetDLDataTypeCode() {
} else if (std::is_integral<T>::value) {
dtype.code = kDLInt;
} else {
PADDLE_THROW("Unsupported data type %s", typeid(T).name());
PADDLE_THROW(platform::errors::Unavailable(
"Unsupported data type (%s), only supports float16, float, unsigned "
"int and int.",
platform::demangle(typeid(T).name())));
}
dtype.bits = 8 * sizeof(T);
dtype.lanes = 1;
......@@ -52,8 +55,9 @@ static DLDataType GetDLDataTypeFromTypeIndex(proto::VarType::Type type) {
static auto type_to_dtype_map = CreateDLDataTypeMap();
static auto type_to_dtype_map_end_it = type_to_dtype_map.end();
auto it = type_to_dtype_map.find(static_cast<int>(type));
PADDLE_ENFORCE(it != type_to_dtype_map_end_it, "Unsupported data type %d",
type);
PADDLE_ENFORCE_NE(it, type_to_dtype_map_end_it,
platform::errors::InvalidArgument(
"Unsupported data type (%s).", DataTypeToString(type)));
return it->second;
#undef REG_DL_DATA_TYPE
}
......@@ -73,7 +77,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> {
ctx.device_id = place.device;
return ctx;
#else
PADDLE_THROW("platform::CUDAPlace is not supported in CPU only version");
PADDLE_THROW(platform::errors::Unavailable(
"platform::CUDAPlace is not supported in CPU only version."));
#endif
}
......@@ -84,8 +89,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> {
ctx.device_id = 0;
return ctx;
#else
PADDLE_THROW(
"platform::CUDAPinnedPlace is not supported in CPU only version");
PADDLE_THROW(platform::errors::Unavailable(
"platform::CUDAPinnedPlace is not supported in CPU only version."));
#endif
}
};
......@@ -136,7 +141,10 @@ DLPackTensor::DLPackTensor(const Tensor &tensor, LaneType lanes) {
// refer to cupy and cudf, the compact tensor first dim's strides need to be 1
// and second dim's strides need to be length of rows of cudf
// cudf now only support dim=2
PADDLE_ENFORCE_LE(t_.ndim, 2, "cudf now only support dim=2.");
PADDLE_ENFORCE_LE(t_.ndim, 2, platform::errors::InvalidArgument(
"cudf now only supports dimension is 2, "
"but received dimension is %d.",
t_.ndim));
if (t_.ndim > 1)
t_.strides = new int64_t[2]{1, t_.shape[1]};
......
......@@ -556,9 +556,11 @@ void DownpourWorker::TrainFilesWithProfiler() {
continue;
}
PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
"Tensor %s contains Inf", var_name);
platform::errors::InvalidArgument(
"Tensor %s contains Inf.", var_name));
PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
"Tensor %s contains NAN", var_name);
platform::errors::InvalidArgument(
"Tensor %s contains NAN.", var_name));
}
if (need_to_push_sparse_) {
......@@ -829,9 +831,11 @@ void DownpourWorker::TrainFiles() {
continue;
}
PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
"Tensor %s contains Inf", var_name);
platform::errors::InvalidArgument(
"Tensor %s contains Inf.", var_name));
PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
"Tensor %s contains NAN", var_name);
platform::errors::InvalidArgument(
"Tensor %s contains NAN.", var_name));
}
if (need_to_push_sparse_) {
......
......@@ -26,7 +26,11 @@ struct EigenDim {
using Type = Eigen::DSizes<Eigen::DenseIndex, D>;
static Type From(const DDim& dims) {
PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)");
PADDLE_ENFORCE_EQ(arity(dims), D,
platform::errors::InvalidArgument(
"Input dimension size should be equal to %d, but "
"received dimension size is %d.",
arity(dims), D));
Type ret;
for (int64_t d = 0; d < arity(dims); d++) {
ret[d] = dims[d];
......@@ -69,8 +73,11 @@ struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {
static typename EigenMatrix::Type Reshape(Tensor& tensor, // NOLINT
int num_col_dims) {
int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
"`num_col_dims` must be between (0, rank_of_tensor).");
PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true,
platform::errors::InvalidArgument(
"Input dimension number(num_col_dims) must be "
"between 0 and %d, but received number is %d.",
rank, num_col_dims));
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
......@@ -78,8 +85,11 @@ struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {
static typename EigenMatrix::ConstType Reshape(const Tensor& tensor,
int num_col_dims) {
int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
"`num_col_dims` must be between (0, rank_of_tensor).");
PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true,
platform::errors::InvalidArgument(
"Input dimension number(num_col_dims) must be "
"between 0 and %d, but received number is %d.",
rank, num_col_dims));
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
......
......@@ -175,8 +175,9 @@ void DeleteUnusedTensors(
garbages.emplace_back(t.MoveMemoryHolder());
}
} else {
PADDLE_THROW("Type %s of %s is not supported eager deletion",
framework::ToTypeName(var->Type()), var_name);
PADDLE_THROW(platform::errors::Unimplemented(
"Type %s of variable %s is not supported eager deletion.",
framework::ToTypeName(var->Type()), var_name));
}
}
......
......@@ -79,15 +79,15 @@ StreamGarbageCollector::StreamGarbageCollector(const platform::CUDAPlace &place,
size_t max_memory_size)
: GarbageCollector(place, max_memory_size) {
platform::CUDADeviceGuard guard(place.device);
PADDLE_ENFORCE(cudaStreamCreate(&stream_));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream_));
callback_manager_.reset(new platform::StreamCallbackManager(stream_));
}
StreamGarbageCollector::~StreamGarbageCollector() {
auto place = BOOST_GET_CONST(platform::CUDAPlace, this->dev_ctx_->GetPlace());
platform::CUDADeviceGuard guard(place.device);
PADDLE_ENFORCE(cudaStreamSynchronize(stream_));
PADDLE_ENFORCE(cudaStreamDestroy(stream_));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream_));
}
cudaStream_t StreamGarbageCollector::stream() const { return stream_; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册