diff --git a/paddle/fluid/framework/grad_op_desc_maker.h b/paddle/fluid/framework/grad_op_desc_maker.h index 7a3ba0863cf20d69a37d515dd17089c9f46cca26..27575878f2eedb6f3e30e2370a5717c313d58ff9 100644 --- a/paddle/fluid/framework/grad_op_desc_maker.h +++ b/paddle/fluid/framework/grad_op_desc_maker.h @@ -96,14 +96,14 @@ class GradOpDescMakerBase { if (!drop_empty_grad) { return ret_val; } - PADDLE_ENFORCE_LE(var_names.size(), 1UL, - "BUG from operator developer:" - " for input argument with a list of variables, " - " drop_empty_grad is not allowed because it makes" - " the correspondence bewteen a variable and its gradient" - " ambiguous." - " Op type %s", - fwd_op_.Type()); + PADDLE_ENFORCE_LE( + var_names.size(), 1UL, + platform::errors::Unavailable( + "BUG from operator developer:" + " for input argument with a list of variables, " + " drop_empty_grad is not allowed because it makes" + " the correspondence bewteen a variable and its gradient" + " ambiguous.")); std::vector dropped_ret_val; dropped_ret_val.reserve(ret_val.size()); @@ -157,7 +157,8 @@ class GradOpDescMakerBase { const Attribute& GetAttr(const std::string& name) const { auto& map = fwd_op_.GetAttrMap(); auto it = map.find(name); - PADDLE_ENFORCE(it != map.end(), "Cannot find attribute %s", name); + PADDLE_ENFORCE_NE(it, map.end(), platform::errors::NotFound( + "Cannot find attribute (%s).", name)); return it->second; } diff --git a/paddle/fluid/framework/hogwild_worker.cc b/paddle/fluid/framework/hogwild_worker.cc index c51f091c54a98924a239f0e1ae717278863f7d6d..1117d676a5ece5b97a50b6290781f3bbc853cf7a 100644 --- a/paddle/fluid/framework/hogwild_worker.cc +++ b/paddle/fluid/framework/hogwild_worker.cc @@ -53,7 +53,9 @@ void HogwildWorker::CreateThreadScope(const ProgramDesc &program) { auto &block = program.Block(0); PADDLE_ENFORCE_NOT_NULL( - root_scope_, "root_scope should be set before creating thread scope"); + root_scope_, + platform::errors::NotFound( + "Root scope should be set before creating thread scope.")); thread_scope_ = &root_scope_->NewScope(); diff --git a/paddle/fluid/framework/library_type.h b/paddle/fluid/framework/library_type.h index 904cc013012b9c3ea8054816446844f6d2cda26b..d46f8a574c0d956dc0a90bc2741d2cb80313ab7f 100644 --- a/paddle/fluid/framework/library_type.h +++ b/paddle/fluid/framework/library_type.h @@ -37,7 +37,10 @@ inline std::string LibraryTypeToString(const LibraryType& library_type) { case LibraryType::kCUDNN: return "CUDNN"; default: - PADDLE_THROW("unknown LibraryType %d", static_cast(library_type)); + PADDLE_THROW(platform::errors::Unimplemented( + "Unknown LibraryType code (%d), only supports library type include " + "PLAIN(0), MKLDNN(1), CUDNN(2).", + static_cast(library_type))); } } @@ -59,7 +62,10 @@ inline LibraryType StringToLibraryType(const char* ctype) { } else if (s == std::string("CUDA")) { return LibraryType::kPlain; } else { - PADDLE_THROW("Unknown LibraryType %s", s.c_str()); + PADDLE_THROW(platform::errors::Unimplemented( + "Unknown LibraryType string (%s), only support library type string " + "include PLAIN, MKLDNN, CUDNN, CPU and CUDA.", + s.c_str())); } } diff --git a/paddle/fluid/framework/load_op_lib.h b/paddle/fluid/framework/load_op_lib.h index dd96137f02010ca2cf1e71597362d5f03e9fa008..16cffe119d63e0cb8bd6ff76f4ac5792127f480d 100644 --- a/paddle/fluid/framework/load_op_lib.h +++ b/paddle/fluid/framework/load_op_lib.h @@ -35,7 +35,10 @@ T *DynLoad(void *handle, std::string name) { #else auto errorno = GetLastError(); #endif // !_WIN32 - PADDLE_ENFORCE_NOT_NULL(func, errorno); + PADDLE_ENFORCE_NOT_NULL( + func, + platform::errors::NotFound( + "Failed to load dynamic operator library, error code(%s).", errorno)); return func; } @@ -63,9 +66,9 @@ void LoadOpLib(const std::string &dso_name) { type == "conditional_block" || type == "conditional_block_grad") { continue; } - if (info_map.Has(n.first)) { - PADDLE_THROW("Op %s has been registered."); - } + PADDLE_ENFORCE_NE(info_map.Has(n.first), true, + platform::errors::AlreadyExists( + "Operator (%s) has been registered.", type)); OpInfo info; info.creator_ = n.second.creator_; @@ -88,7 +91,8 @@ void LoadOpLib(const std::string &dso_name) { for (auto &str : strs) { proto::OpDesc proto_desc; PADDLE_ENFORCE_EQ(proto_desc.ParseFromString(str), true, - "Failed to parse OpDesc from string"); + platform::errors::InvalidArgument( + "Failed to parse OpDesc from string.")); ret.emplace_back(new OpDesc(proto_desc, nullptr)); } return ret; diff --git a/paddle/fluid/framework/lod_rank_table.cc b/paddle/fluid/framework/lod_rank_table.cc index 6bc795b642bf79b7556869c5ebe9b0323d3cc5fc..70df4f50ec910bfaa78924f834fa2c165ac1048d 100644 --- a/paddle/fluid/framework/lod_rank_table.cc +++ b/paddle/fluid/framework/lod_rank_table.cc @@ -19,9 +19,11 @@ namespace framework { void LoDRankTable::Reset(const LoD& lod, size_t level) { this->coarse_lod_.clear(); this->items_.clear(); - PADDLE_ENFORCE(level < lod.size(), - "Cannot rank lod since the level %d is less than lod size %d", - level, lod.size()); + PADDLE_ENFORCE_LT( + level, lod.size(), + platform::errors::InvalidArgument( + "Cannot reset LoD since the level %d is less than lod size %d.", + level, lod.size())); coarse_lod_.reserve(level); for (size_t i = 0; i < level; ++i) { coarse_lod_.push_back(lod[i]); diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 2d1cba3b0f795cb1b65286adbf51d9bd2ddeb1f9..40615d772e555bb9e2ac44a6339de9f3be3c9562 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -65,9 +65,23 @@ std::string LoDToString(const LoD &lod) { LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { - PADDLE_ENFORCE_LT(level, in.size()); - PADDLE_ENFORCE_LT(elem_begin, elem_end); - PADDLE_ENFORCE_LT(elem_end, in[level].size()); + PADDLE_ENFORCE_LT(level, in.size(), + platform::errors::InvalidArgument( + "The input LoDTensor's lod level should be less than " + "the LoD size, but received level is %d, LoD is %s.", + level, in)); + PADDLE_ENFORCE_LT( + elem_begin, elem_end, + platform::errors::InvalidArgument( + "The index to start slicing should be less than the index to end " + "slicing, but received start index is %d, end index is %d.", + elem_begin, elem_end)); + PADDLE_ENFORCE_LT( + elem_end, in[level].size(), + platform::errors::InvalidArgument( + "The index to end slicing should be less than the input LoD size, " + "but received end index is %d, LoD size is %d.", + elem_end, in[level].size())); LoD res; res.resize(in.size() - level); @@ -185,8 +199,17 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, LoD sub_lod; for (size_t level_idx = start_level; level_idx < lod.size(); ++level_idx) { - PADDLE_ENFORCE_LE(start_idx, end_idx); - PADDLE_ENFORCE_LT(end_idx, lod[level_idx].size()); + PADDLE_ENFORCE_LE(start_idx, end_idx, + platform::errors::InvalidArgument( + "The start index should be less than the end index, " + "but received start index is %d, end index is %d.", + start_idx, end_idx)); + PADDLE_ENFORCE_LT( + end_idx, lod[level_idx].size(), + platform::errors::InvalidArgument( + "The end index should be less than the LoD level size, but " + "received end index is %d, LoD level size is %d.", + end_idx, lod[level_idx].size())); std::vector level_lens; for (size_t i = start_idx; i < end_idx; ++i) { level_lens.push_back(lod[level_idx][i + 1] - lod[level_idx][i]); @@ -202,7 +225,10 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, void AppendLoD(LoD *lod, const LoD &lod_length) { PADDLE_ENFORCE( lod->empty() || lod->size() == lod_length.size(), - "The lod_length should has the same size with the appended lod."); + platform::errors::InvalidArgument( + "The input LoD length should be equal to the appended LoD size, but " + "received input LoD length is %d, actual LoD size is %d.", + lod_length, lod->size())); if (lod->empty()) { for (size_t i = 0; i < lod_length.size(); ++i) { lod->emplace_back(1, 0); // size = 1, value = 0; @@ -254,11 +280,11 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor, is.read(reinterpret_cast(&version), sizeof(version)); PADDLE_ENFORCE_EQ(framework::IsTensorVersionSupported(version), true, platform::errors::InvalidArgument( - "tensor version %u is not supported.", version)); + "Tensor version %u is not supported.", version)); PADDLE_ENFORCE_EQ( version, 0U, platform::errors::InvalidArgument( - "tensor version %u is not supported, Only version 0 is supported", + "Tensor version %u is not supported, only version 0 is supported.", version)); } { @@ -280,11 +306,11 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor, is.read(reinterpret_cast(&version), sizeof(version)); PADDLE_ENFORCE_EQ(framework::IsTensorVersionSupported(version), true, platform::errors::InvalidArgument( - "tensor version %u is not supported.", version)); + "Tensor version %u is not supported.", version)); PADDLE_ENFORCE_EQ( version, 0U, platform::errors::InvalidArgument( - "tensor version %u is not supported, Only version 0 is supported", + "Tensor version %u is not supported, only version 0 is supported.", version)); } { @@ -310,7 +336,7 @@ std::vector LoDTensor::SplitLoDTensor( const std::vector places) const { PADDLE_ENFORCE_GT(places.size(), 0, platform::errors::InvalidArgument( - "place number cannot be empty when splitting")); + "Place number cannot be empty when splitting.")); check_memory_size(); size_t batch_size = lod().empty() ? static_cast(dims()[0]) : lod()[0].size() - 1; @@ -342,7 +368,9 @@ std::vector LoDTensor::SplitLoDTensor( auto end = std::min((i + 1) * step_width, batch_size); PADDLE_ENFORCE_LT(begin, end, platform::errors::InvalidArgument( - "begin must be less than end, this may be a bug")); + "The begin index must be less than the end index, " + "but received begin index is %d, end index is %d.", + begin, end)); LoDTensor dst; if (lod().empty()) { @@ -376,7 +404,9 @@ std::vector LoDTensor::SplitLoDTensor( void LoDTensor::MergeLoDTensor( const std::vector &lod_tensors, platform::Place dst_place) { - PADDLE_ENFORCE(!lod_tensors.empty()); + PADDLE_ENFORCE_EQ(lod_tensors.empty(), false, + platform::errors::InvalidArgument( + "The LoDTensors to be merged are empty.")); framework::DDim new_dim = lod_tensors[0]->dims(); proto::VarType::Type new_type = proto::VarType::FP32; @@ -395,15 +425,35 @@ void LoDTensor::MergeLoDTensor( for (size_t i = 1; i < lod_tensors.size(); ++i) { auto *t = lod_tensors[i]; if (t->numel() && t->IsInitialized()) { - PADDLE_ENFORCE_EQ(new_type, t->type()); - PADDLE_ENFORCE_EQ(new_layout, t->layout()); - PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0], - framework::product(t->dims()) / t->dims()[0]); + PADDLE_ENFORCE_EQ( + new_type, t->type(), + platform::errors::InvalidArgument( + "LoDTensor data type does not match, expected type is %s, actual " + "type is %s.", + DataTypeToString(new_type), DataTypeToString(t->type()))); + PADDLE_ENFORCE_EQ( + new_layout, t->layout(), + platform::errors::InvalidArgument( + "LoDTensor layout does not match, expected layout is %s, " + "actual layout is %s.", + DataLayoutToString(new_layout), DataLayoutToString(t->layout()))); + PADDLE_ENFORCE_EQ( + framework::product(new_dim) / new_dim[0], + framework::product(t->dims()) / t->dims()[0], + platform::errors::InvalidArgument( + "LoDTensor dimension does not match, all dimensions except the " + "first dimension need to be equal," + "but expected dimension is %s, actual dimension is %s.", + new_dim, t->dims())); new_dim[0] += t->dims()[0]; } auto &lod = t->lod(); - PADDLE_ENFORCE_EQ(new_lod.size(), lod.size()); + PADDLE_ENFORCE_EQ(new_lod.size(), lod.size(), + platform::errors::InvalidArgument( + "The LoD information of LoDTensor does not match, " + "expected LoD is %s, actual LoD is %s.", + new_lod, lod)); for (size_t j = 0; j < lod.size(); ++j) { auto &sub_lod = new_lod[j]; size_t offset = sub_lod.back(); diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index 3ad873d1f6c500bf6135a521bfc846869b70f774..da97efb616840b6663677475c4ca5dab68d7ccfe 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -117,8 +117,19 @@ class LoDTensor : public Tensor { * Get the start offset and end offset of an element from LoD. */ std::pair lod_element(size_t level, size_t elem) const { - PADDLE_ENFORCE_LT(level, NumLevels()); - PADDLE_ENFORCE_LT(elem, NumElements(level)); + PADDLE_ENFORCE_LT( + level, NumLevels(), + platform::errors::InvalidArgument( + "The input level of LoD is invalid, it should be less than LoD " + "size. The input level is %zu, the LoD size is %zu.", + level, NumLevels())); + PADDLE_ENFORCE_LT(elem, NumElements(level), + platform::errors::InvalidArgument( + "The input element of LoD is invalid, it should be " + "less than the number of elements in its level." + "The input element is %zu, the number of elements in " + "its level is %zu.", + elem, NumElements(level))); return std::make_pair((lod_)[level][elem], (lod_)[level][elem + 1]); } @@ -131,7 +142,12 @@ class LoDTensor : public Tensor { * Number of elements in a level. */ size_t NumElements(size_t level = 0) const { - PADDLE_ENFORCE_LT(level, NumLevels()); + PADDLE_ENFORCE_LT( + level, NumLevels(), + platform::errors::InvalidArgument( + "The input level of LoD is invalid, it should be less than LoD " + "size. The input level is %zu, the LoD size is %zu.", + level, NumLevels())); // the last offset is the end of last element return (lod_)[level].size() - 1; } @@ -172,7 +188,13 @@ LoDTensor LodExpand(const LoDTensor& source, const LoD& lod, size_t level, tensor.Resize(dims); tensor.mutable_data(place); - PADDLE_ENFORCE_EQ(num_instances, lod_level.size() - 1); + PADDLE_ENFORCE_EQ( + num_instances, lod_level.size() - 1, + platform::errors::InvalidArgument( + "The input LoDTensor instance number should be equal to the LoD " + "level size minus 1." + "The input instance number is %zu, LoD level size is %zu.", + num_instances, lod_level.size())); for (size_t ins = 0; ins < num_instances; ins++) { for (size_t elem = lod_level[ins]; elem < lod_level[ins + 1]; elem++) { auto slice = tensor.Slice(elem, elem + 1);