未验证 提交 7e5f74b8 编写于 作者: C Chen Weihang 提交者: GitHub

Add examples for error message writing specification - InvalidArgument (#21132)

* add examples for error msg spec, test=develop

* change ENFORCE to ENFORCE_**, test=develop

* fix error, test=develop
上级 a5fc291f
...@@ -86,8 +86,10 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program, ...@@ -86,8 +86,10 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
DataFeedDesc data_feed_desc; DataFeedDesc data_feed_desc;
bool success = data_feed_desc.ParseFromString(data_feed_desc_str); bool success = data_feed_desc.ParseFromString(data_feed_desc_str);
PADDLE_ENFORCE(success, "Fail to parse DataFeedDesc from string:\n%s", PADDLE_ENFORCE_EQ(success, true,
data_feed_desc_str.c_str()); platform::errors::InvalidArgument(
"Fail to parse DataFeedDesc from string: %s.",
data_feed_desc_str.c_str()));
actual_thread_num_ = thread_num; actual_thread_num_ = thread_num;
int file_cnt = filelist.size(); int file_cnt = filelist.size();
......
...@@ -105,7 +105,8 @@ bool DataFeed::SetFileList(const std::vector<std::string>& files) { ...@@ -105,7 +105,8 @@ bool DataFeed::SetFileList(const std::vector<std::string>& files) {
} }
void DataFeed::SetBatchSize(int batch_size) { void DataFeed::SetBatchSize(int batch_size) {
PADDLE_ENFORCE(batch_size > 0, "Illegal batch size: %d.", batch_size); PADDLE_ENFORCE_GT(batch_size, 0, platform::errors::InvalidArgument(
"Batch size is illegal.", batch_size));
default_batch_size_ = batch_size; default_batch_size_ = batch_size;
} }
...@@ -556,13 +557,14 @@ bool MultiSlotDataFeed::ParseOneInstanceFromPipe( ...@@ -556,13 +557,14 @@ bool MultiSlotDataFeed::ParseOneInstanceFromPipe(
for (size_t i = 0; i < use_slots_index_.size(); ++i) { for (size_t i = 0; i < use_slots_index_.size(); ++i) {
int idx = use_slots_index_[i]; int idx = use_slots_index_[i];
int num = strtol(&str[pos], &endptr, 10); int num = strtol(&str[pos], &endptr, 10);
PADDLE_ENFORCE( PADDLE_ENFORCE_NE(
num, num, 0,
"The number of ids can not be zero, you need padding " platform::errors::InvalidArgument(
"it in data generator; or if there is something wrong with " "The number of ids can not be zero, you need padding "
"the data, please check if the data contains unresolvable " "it in data generator; or if there is something wrong with "
"characters.\nplease check this error line: %s", "the data, please check if the data contains unresolvable "
str); "characters.\nplease check this error line: %s",
str));
if (idx != -1) { if (idx != -1) {
(*instance)[idx].Init(all_slots_type_[i]); (*instance)[idx].Init(all_slots_type_[i]);
if ((*instance)[idx].GetType()[0] == 'f') { // float if ((*instance)[idx].GetType()[0] == 'f') { // float
......
...@@ -135,9 +135,13 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout, ...@@ -135,9 +135,13 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
platform::Place place) { platform::Place place) {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
PADDLE_ENFORCE_NE(in.format(), MKLDNNMemoryFormat::format_undef, PADDLE_ENFORCE_NE(in.format(), MKLDNNMemoryFormat::format_undef,
"Input tensor should have specified memory format"); platform::errors::InvalidArgument(
"Input tensor format is invalid. Input tensor should "
"have specified memory format."));
PADDLE_ENFORCE_NE(in.format(), MKLDNNMemoryFormat::any, PADDLE_ENFORCE_NE(in.format(), MKLDNNMemoryFormat::any,
"Input tensor should have specified memory format"); platform::errors::InvalidArgument(
"Input tensor format is invalid. Input tensor should "
"have specified memory format."));
// Set default as NCHW in case not specified // Set default as NCHW in case not specified
out_layout = out_layout =
......
...@@ -50,7 +50,8 @@ EagerDeletionOpHandle::EagerDeletionOpHandle( ...@@ -50,7 +50,8 @@ EagerDeletionOpHandle::EagerDeletionOpHandle(
} }
} }
#endif #endif
PADDLE_ENFORCE(!vars.empty(), "Var names cannot be empty"); PADDLE_ENFORCE_NE(vars.empty(), true, platform::errors::InvalidArgument(
"Variable names are empty."));
for (auto *var : var_infos_) { for (auto *var : var_infos_) {
PADDLE_ENFORCE_NOT_NULL(var); PADDLE_ENFORCE_NOT_NULL(var);
} }
......
...@@ -54,8 +54,9 @@ struct OpInfo { ...@@ -54,8 +54,9 @@ struct OpInfo {
const proto::OpProto& Proto() const { const proto::OpProto& Proto() const {
PADDLE_ENFORCE_NOT_NULL(proto_, "Operator's Proto has not been registered"); PADDLE_ENFORCE_NOT_NULL(proto_, "Operator's Proto has not been registered");
PADDLE_ENFORCE(proto_->IsInitialized(), PADDLE_ENFORCE_EQ(proto_->IsInitialized(), true,
"Operator's Proto must be initialized in op info"); platform::errors::InvalidArgument(
"Operator's Proto in op info is not initialized."));
return *proto_; return *proto_;
} }
......
...@@ -1147,17 +1147,21 @@ void OperatorWithKernel::ParseInputDataType( ...@@ -1147,17 +1147,21 @@ void OperatorWithKernel::ParseInputDataType(
t = &(var->Get<SelectedRows>().value()); t = &(var->Get<SelectedRows>().value());
} }
if (t != nullptr) { if (t != nullptr) {
PADDLE_ENFORCE_EQ(t->IsInitialized(), true, PADDLE_ENFORCE_EQ(
"The Tensor in the %s Op's Input Variable %s(%s) is " t->IsInitialized(), true,
"not initialized.", platform::errors::InvalidArgument(
Type(), name, ctx.Inputs(name).at(i)); "The Tensor in the %s Op's Input Variable %s(%s) is "
"not initialized.",
Type(), name, ctx.Inputs(name).at(i)));
proto::VarType::Type tmp = t->type(); proto::VarType::Type tmp = t->type();
PADDLE_ENFORCE(tmp == *data_type || *data_type == dafault_data_type, PADDLE_ENFORCE(
"The DataType of %s Op's duplicable Variable %s must be " tmp == *data_type || *data_type == dafault_data_type,
"consistent. The current variable type is (%s), but the " platform::errors::InvalidArgument(
"previous variable type is (%s).", "The DataType of %s Op's duplicable Variable %s must be "
Type(), name, DataTypeToString(tmp), "consistent. The current variable type is (%s), but the "
DataTypeToString(*data_type)); "previous variable type is (%s).",
Type(), name, DataTypeToString(tmp),
DataTypeToString(*data_type)));
*data_type = tmp; *data_type = tmp;
} }
} }
......
...@@ -25,9 +25,12 @@ inline const T* Tensor::data() const { ...@@ -25,9 +25,12 @@ inline const T* Tensor::data() const {
check_memory_size(); check_memory_size();
bool valid = bool valid =
std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType(); std::is_same<T, void>::value || type_ == DataTypeTrait<T>::DataType();
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
valid, "Tensor holds the wrong type, it holds %s, but desires to be %s", valid, true,
DataTypeToString(type_), DataTypeToString(DataTypeTrait<T>::DataType())); platform::errors::InvalidArgument(
"Tensor holds the wrong type, it holds %s, but desires to be %s.",
DataTypeToString(type_),
DataTypeToString(DataTypeTrait<T>::DataType())));
return reinterpret_cast<const T*>( return reinterpret_cast<const T*>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_); reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
......
...@@ -173,15 +173,16 @@ class ReshapeOp : public framework::OperatorWithKernel { ...@@ -173,15 +173,16 @@ class ReshapeOp : public framework::OperatorWithKernel {
// capacity = -24, in_size = -8, output_shape[0] = 0 // capacity = -24, in_size = -8, output_shape[0] = 0
// the following check will fail. // the following check will fail.
output_shape[unk_dim_idx] = -in_size / capacity; output_shape[unk_dim_idx] = -in_size / capacity;
PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, PADDLE_ENFORCE_EQ(
"ShapeError: The 'shape' in ReshapeOp is invalid. " output_shape[unk_dim_idx] * capacity, -in_size,
"The input tensor X'size must be divisible by known " platform::errors::InvalidArgument(
"capacity of 'shape'. " "The 'shape' attribute in ReshapeOp is invalid. "
"But received X's shape = [%s], X's size = %d, " "The input tensor X'size must be divisible by known "
"'shape' is [%s], known " "capacity of 'shape'. "
"capacity of 'shape' is %d.", "But received X's shape = [%s], X's size = %d, "
in_dims, in_size, framework::make_ddim(shape), "'shape' is [%s], known "
capacity); "capacity of 'shape' is %d.",
in_dims, in_size, framework::make_ddim(shape), capacity));
} else { } else {
output_shape[unk_dim_idx] = -1; output_shape[unk_dim_idx] = -1;
} }
......
...@@ -38,9 +38,9 @@ class SequencePoolKernel : public framework::OpKernel<T> { ...@@ -38,9 +38,9 @@ class SequencePoolKernel : public framework::OpKernel<T> {
auto lod = in->lod(); auto lod = in->lod();
auto lod_level = lod.size(); auto lod_level = lod.size();
// InferShape by lod // InferShape by lod
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(lod_level, 0, platform::errors::InvalidArgument(
lod_level, 0, "Input(X) Tensor of SequencePoolOp "
"Input(X) Tensor of SequencePoolOp does not contain LoD information."); "does not contain LoD information."));
PADDLE_ENFORCE_LE(lod_level, 2UL, PADDLE_ENFORCE_LE(lod_level, 2UL,
"The lod level of input shall be no more than 2."); "The lod level of input shall be no more than 2.");
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册