未验证 提交 c34c80d3 编写于 作者: C Chen Weihang 提交者: GitHub

Polish framework error message part3 (#25701)

* polish framework error message part3

* polish details

* fix error message print error
上级 e52df3b1
......@@ -96,14 +96,14 @@ class GradOpDescMakerBase {
if (!drop_empty_grad) {
return ret_val;
}
PADDLE_ENFORCE_LE(var_names.size(), 1UL,
"BUG from operator developer:"
" for input argument with a list of variables, "
" drop_empty_grad is not allowed because it makes"
" the correspondence bewteen a variable and its gradient"
" ambiguous."
" Op type %s",
fwd_op_.Type());
PADDLE_ENFORCE_LE(
var_names.size(), 1UL,
platform::errors::Unavailable(
"BUG from operator developer:"
" for input argument with a list of variables, "
" drop_empty_grad is not allowed because it makes"
" the correspondence bewteen a variable and its gradient"
" ambiguous."));
std::vector<std::string> dropped_ret_val;
dropped_ret_val.reserve(ret_val.size());
......@@ -157,7 +157,8 @@ class GradOpDescMakerBase {
const Attribute& GetAttr(const std::string& name) const {
auto& map = fwd_op_.GetAttrMap();
auto it = map.find(name);
PADDLE_ENFORCE(it != map.end(), "Cannot find attribute %s", name);
PADDLE_ENFORCE_NE(it, map.end(), platform::errors::NotFound(
"Cannot find attribute (%s).", name));
return it->second;
}
......
......@@ -53,7 +53,9 @@ void HogwildWorker::CreateThreadScope(const ProgramDesc &program) {
auto &block = program.Block(0);
PADDLE_ENFORCE_NOT_NULL(
root_scope_, "root_scope should be set before creating thread scope");
root_scope_,
platform::errors::NotFound(
"Root scope should be set before creating thread scope."));
thread_scope_ = &root_scope_->NewScope();
......
......@@ -37,7 +37,10 @@ inline std::string LibraryTypeToString(const LibraryType& library_type) {
case LibraryType::kCUDNN:
return "CUDNN";
default:
PADDLE_THROW("unknown LibraryType %d", static_cast<int>(library_type));
PADDLE_THROW(platform::errors::Unimplemented(
"Unknown LibraryType code (%d), only supports library type include "
"PLAIN(0), MKLDNN(1), CUDNN(2).",
static_cast<int>(library_type)));
}
}
......@@ -59,7 +62,10 @@ inline LibraryType StringToLibraryType(const char* ctype) {
} else if (s == std::string("CUDA")) {
return LibraryType::kPlain;
} else {
PADDLE_THROW("Unknown LibraryType %s", s.c_str());
PADDLE_THROW(platform::errors::Unimplemented(
"Unknown LibraryType string (%s), only support library type string "
"include PLAIN, MKLDNN, CUDNN, CPU and CUDA.",
s.c_str()));
}
}
......
......@@ -35,7 +35,10 @@ T *DynLoad(void *handle, std::string name) {
#else
auto errorno = GetLastError();
#endif // !_WIN32
PADDLE_ENFORCE_NOT_NULL(func, errorno);
PADDLE_ENFORCE_NOT_NULL(
func,
platform::errors::NotFound(
"Failed to load dynamic operator library, error code(%s).", errorno));
return func;
}
......@@ -63,9 +66,9 @@ void LoadOpLib(const std::string &dso_name) {
type == "conditional_block" || type == "conditional_block_grad") {
continue;
}
if (info_map.Has(n.first)) {
PADDLE_THROW("Op %s has been registered.");
}
PADDLE_ENFORCE_NE(info_map.Has(n.first), true,
platform::errors::AlreadyExists(
"Operator (%s) has been registered.", type));
OpInfo info;
info.creator_ = n.second.creator_;
......@@ -88,7 +91,8 @@ void LoadOpLib(const std::string &dso_name) {
for (auto &str : strs) {
proto::OpDesc proto_desc;
PADDLE_ENFORCE_EQ(proto_desc.ParseFromString(str), true,
"Failed to parse OpDesc from string");
platform::errors::InvalidArgument(
"Failed to parse OpDesc from string."));
ret.emplace_back(new OpDesc(proto_desc, nullptr));
}
return ret;
......
......@@ -19,9 +19,11 @@ namespace framework {
void LoDRankTable::Reset(const LoD& lod, size_t level) {
this->coarse_lod_.clear();
this->items_.clear();
PADDLE_ENFORCE(level < lod.size(),
"Cannot rank lod since the level %d is less than lod size %d",
level, lod.size());
PADDLE_ENFORCE_LT(
level, lod.size(),
platform::errors::InvalidArgument(
"Cannot reset LoD since the level %d is less than lod size %d.",
level, lod.size()));
coarse_lod_.reserve(level);
for (size_t i = 0; i < level; ++i) {
coarse_lod_.push_back(lod[i]);
......
......@@ -65,9 +65,23 @@ std::string LoDToString(const LoD &lod) {
LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin,
size_t elem_end) {
PADDLE_ENFORCE_LT(level, in.size());
PADDLE_ENFORCE_LT(elem_begin, elem_end);
PADDLE_ENFORCE_LT(elem_end, in[level].size());
PADDLE_ENFORCE_LT(level, in.size(),
platform::errors::InvalidArgument(
"The input LoDTensor's lod level should be less than "
"the LoD size, but received level is %d, LoD is %s.",
level, in));
PADDLE_ENFORCE_LT(
elem_begin, elem_end,
platform::errors::InvalidArgument(
"The index to start slicing should be less than the index to end "
"slicing, but received start index is %d, end index is %d.",
elem_begin, elem_end));
PADDLE_ENFORCE_LT(
elem_end, in[level].size(),
platform::errors::InvalidArgument(
"The index to end slicing should be less than the input LoD size, "
"but received end index is %d, LoD size is %d.",
elem_end, in[level].size()));
LoD res;
res.resize(in.size() - level);
......@@ -185,8 +199,17 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
LoD sub_lod;
for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) {
PADDLE_ENFORCE_LE(start_idx, end_idx);
PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size());
PADDLE_ENFORCE_LE(start_idx, end_idx,
platform::errors::InvalidArgument(
"The start index should be less than the end index, "
"but received start index is %d, end index is %d.",
start_idx, end_idx));
PADDLE_ENFORCE_LT(
end_idx, lod[level_idx].size(),
platform::errors::InvalidArgument(
"The end index should be less than the LoD level size, but "
"received end index is %d, LoD level size is %d.",
end_idx, lod[level_idx].size()));
std::vector<size_t> level_lens;
for (size_t i = start_idx; i < end_idx; ++i) {
level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]);
......@@ -202,7 +225,10 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx,
void AppendLoD(LoD *lod, const LoD &lod_length) {
PADDLE_ENFORCE(
lod->empty() || lod->size() == lod_length.size(),
"The lod_length should has the same size with the appended lod.");
platform::errors::InvalidArgument(
"The input LoD length should be equal to the appended LoD size, but "
"received input LoD length is %d, actual LoD size is %d.",
lod_length, lod->size()));
if (lod->empty()) {
for (size_t i = 0; i < lod_length.size(); ++i) {
lod->emplace_back(1, 0); // size = 1, value = 0;
......@@ -254,11 +280,11 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
is.read(reinterpret_cast<char *>(&version), sizeof(version));
PADDLE_ENFORCE_EQ(framework::IsTensorVersionSupported(version), true,
platform::errors::InvalidArgument(
"tensor version %u is not supported.", version));
"Tensor version %u is not supported.", version));
PADDLE_ENFORCE_EQ(
version, 0U,
platform::errors::InvalidArgument(
"tensor version %u is not supported, Only version 0 is supported",
"Tensor version %u is not supported, only version 0 is supported.",
version));
}
{
......@@ -280,11 +306,11 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
is.read(reinterpret_cast<char *>(&version), sizeof(version));
PADDLE_ENFORCE_EQ(framework::IsTensorVersionSupported(version), true,
platform::errors::InvalidArgument(
"tensor version %u is not supported.", version));
"Tensor version %u is not supported.", version));
PADDLE_ENFORCE_EQ(
version, 0U,
platform::errors::InvalidArgument(
"tensor version %u is not supported, Only version 0 is supported",
"Tensor version %u is not supported, only version 0 is supported.",
version));
}
{
......@@ -310,7 +336,7 @@ std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
const std::vector<platform::Place> places) const {
PADDLE_ENFORCE_GT(places.size(), 0,
platform::errors::InvalidArgument(
"place number cannot be empty when splitting"));
"Place number cannot be empty when splitting."));
check_memory_size();
size_t batch_size =
lod().empty() ? static_cast<size_t>(dims()[0]) : lod()[0].size() - 1;
......@@ -342,7 +368,9 @@ std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
auto end = std::min<size_t>((i + 1) * step_width, batch_size);
PADDLE_ENFORCE_LT(begin, end,
platform::errors::InvalidArgument(
"begin must be less than end, this may be a bug"));
"The begin index must be less than the end index, "
"but received begin index is %d, end index is %d.",
begin, end));
LoDTensor dst;
if (lod().empty()) {
......@@ -376,7 +404,9 @@ std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
void LoDTensor::MergeLoDTensor(
const std::vector<const LoDTensor *> &lod_tensors,
platform::Place dst_place) {
PADDLE_ENFORCE(!lod_tensors.empty());
PADDLE_ENFORCE_EQ(lod_tensors.empty(), false,
platform::errors::InvalidArgument(
"The LoDTensors to be merged are empty."));
framework::DDim new_dim = lod_tensors[0]->dims();
proto::VarType::Type new_type = proto::VarType::FP32;
......@@ -395,15 +425,35 @@ void LoDTensor::MergeLoDTensor(
for (size_t i = 1; i < lod_tensors.size(); ++i) {
auto *t = lod_tensors[i];
if (t->numel() && t->IsInitialized()) {
PADDLE_ENFORCE_EQ(new_type, t->type());
PADDLE_ENFORCE_EQ(new_layout, t->layout());
PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0],
framework::product(t->dims()) / t->dims()[0]);
PADDLE_ENFORCE_EQ(
new_type, t->type(),
platform::errors::InvalidArgument(
"LoDTensor data type does not match, expected type is %s, actual "
"type is %s.",
DataTypeToString(new_type), DataTypeToString(t->type())));
PADDLE_ENFORCE_EQ(
new_layout, t->layout(),
platform::errors::InvalidArgument(
"LoDTensor layout does not match, expected layout is %s, "
"actual layout is %s.",
DataLayoutToString(new_layout), DataLayoutToString(t->layout())));
PADDLE_ENFORCE_EQ(
framework::product(new_dim) / new_dim[0],
framework::product(t->dims()) / t->dims()[0],
platform::errors::InvalidArgument(
"LoDTensor dimension does not match, all dimensions except the "
"first dimension need to be equal,"
"but expected dimension is %s, actual dimension is %s.",
new_dim, t->dims()));
new_dim[0] += t->dims()[0];
}
auto &lod = t->lod();
PADDLE_ENFORCE_EQ(new_lod.size(), lod.size());
PADDLE_ENFORCE_EQ(new_lod.size(), lod.size(),
platform::errors::InvalidArgument(
"The LoD information of LoDTensor does not match, "
"expected LoD is %s, actual LoD is %s.",
new_lod, lod));
for (size_t j = 0; j < lod.size(); ++j) {
auto &sub_lod = new_lod[j];
size_t offset = sub_lod.back();
......
......@@ -117,8 +117,19 @@ class LoDTensor : public Tensor {
* Get the start offset and end offset of an element from LoD.
*/
std::pair<size_t, size_t> lod_element(size_t level, size_t elem) const {
PADDLE_ENFORCE_LT(level, NumLevels());
PADDLE_ENFORCE_LT(elem, NumElements(level));
PADDLE_ENFORCE_LT(
level, NumLevels(),
platform::errors::InvalidArgument(
"The input level of LoD is invalid, it should be less than LoD "
"size. The input level is %zu, the LoD size is %zu.",
level, NumLevels()));
PADDLE_ENFORCE_LT(elem, NumElements(level),
platform::errors::InvalidArgument(
"The input element of LoD is invalid, it should be "
"less than the number of elements in its level."
"The input element is %zu, the number of elements in "
"its level is %zu.",
elem, NumElements(level)));
return std::make_pair((lod_)[level][elem], (lod_)[level][elem + 1]);
}
......@@ -131,7 +142,12 @@ class LoDTensor : public Tensor {
* Number of elements in a level.
*/
size_t NumElements(size_t level = 0) const {
PADDLE_ENFORCE_LT(level, NumLevels());
PADDLE_ENFORCE_LT(
level, NumLevels(),
platform::errors::InvalidArgument(
"The input level of LoD is invalid, it should be less than LoD "
"size. The input level is %zu, the LoD size is %zu.",
level, NumLevels()));
// the last offset is the end of last element
return (lod_)[level].size() - 1;
}
......@@ -172,7 +188,13 @@ LoDTensor LodExpand(const LoDTensor& source, const LoD& lod, size_t level,
tensor.Resize(dims);
tensor.mutable_data<T>(place);
PADDLE_ENFORCE_EQ(num_instances, lod_level.size() - 1);
PADDLE_ENFORCE_EQ(
num_instances, lod_level.size() - 1,
platform::errors::InvalidArgument(
"The input LoDTensor instance number should be equal to the LoD "
"level size minus 1."
"The input instance number is %zu, LoD level size is %zu.",
num_instances, lod_level.size()));
for (size_t ins = 0; ins < num_instances; ins++) {
for (size_t elem = lod_level[ins]; elem < lod_level[ins + 1]; elem++) {
auto slice = tensor.Slice(elem, elem + 1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册