From 48b9a56f1cd3e0f4bb477bc2019b5b1f9d1824bf Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 31 Jul 2020 11:23:09 +0800 Subject: [PATCH] Polish framework error message - part 4 (#25807) * polish framework error message part 4 * fix type error * fix message error * polish by review comments --- paddle/fluid/framework/mixed_vector.h | 34 +++++++++++-------- paddle/fluid/framework/naive_executor.cc | 15 +++++--- .../no_need_buffer_vars_inference.cc | 5 +-- .../framework/no_need_buffer_vars_inference.h | 20 ++++++++--- paddle/fluid/framework/op_compatible_info.cc | 16 +++++---- paddle/fluid/framework/operator.h | 21 ++++++++---- paddle/fluid/framework/operator_test.cc | 20 +++++------ 7 files changed, 84 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 185ebbcd3c..280996d34d 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -155,8 +155,10 @@ class Vector { // get cuda ptr. immutable const T *CUDAData(platform::Place place) const { - PADDLE_ENFORCE(platform::is_gpu_place(place), - "CUDA Data must on CUDA place"); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(place), true, + platform::errors::Unavailable( + "Place mismatch, CUDA Data must be on CUDA place.")); ImmutableCUDA(place); return reinterpret_cast(gpu_->ptr()); } @@ -234,7 +236,8 @@ class Vector { UnsetFlag(kDirty); SetFlag(kDataInCUDA); } else if (IsInCUDA() && !(place == gpu_->place())) { - PADDLE_THROW("This situation should not happen"); + PADDLE_THROW( + platform::errors::Unavailable("Unexpected data place mismatch.")); // Still dirty } else { // Dirty && DataInCUDA && Device is same @@ -246,7 +249,8 @@ class Vector { CopyCPUDataToCUDA(place); SetFlag(kDataInCUDA); } else if (!(place == gpu_->place())) { - PADDLE_THROW("This situation should not happen."); + PADDLE_THROW( + platform::errors::Unavailable("Unexpected data place mismatch.")); } else { // Not Dirty && DataInCUDA && Device is same // Do nothing. @@ -501,27 +505,29 @@ class CPUVector : public std::vector> { } const T *CUDAData(platform::Place place) const { - PADDLE_THROW( - "Vector::CUDAData() method is not supported in CPU-only version"); + PADDLE_THROW(platform::errors::Unavailable( + "Vector::CUDAData() method is not supported in CPU-only version.")); } T *CUDAMutableData(platform::Place place) { - PADDLE_THROW( + PADDLE_THROW(platform::errors::Unavailable( "Vector::CUDAMutableData() method is not supported in CPU-only " - "version"); + "version.")); } const T *Data(platform::Place place) const { - PADDLE_ENFORCE( - platform::is_cpu_place(place), - "Vector::Data() method is not supported when not in CPUPlace"); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(place), true, + platform::errors::Unavailable( + "Vector::Data() method is not supported when not in CPUPlace.")); return this->data(); } T *MutableData(platform::Place place) { - PADDLE_ENFORCE( - platform::is_cpu_place(place), - "Vector::MutableData() method is not supported when not in CPUPlace"); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(place), true, + platform::errors::Unavailable("Vector::MutableData() method is not " + "supported when not in CPUPlace.")); return this->data(); } diff --git a/paddle/fluid/framework/naive_executor.cc b/paddle/fluid/framework/naive_executor.cc index 6544c1aa7b..be405a2cfb 100644 --- a/paddle/fluid/framework/naive_executor.cc +++ b/paddle/fluid/framework/naive_executor.cc @@ -54,12 +54,16 @@ void NaiveExecutor::Run() { void NaiveExecutor::CreateVariables(const ProgramDesc &desc, int block_id, bool persistable, Scope *scope) { - PADDLE_ENFORCE_NOT_NULL(scope); + PADDLE_ENFORCE_NOT_NULL(scope, + platform::errors::InvalidArgument( + "The Scope to hold variables is nullptr.")); auto &global_block = desc.Block(block_id); const auto *anc = scope; - PADDLE_ENFORCE(anc->parent() != anc); + PADDLE_ENFORCE_NE( + anc->parent(), anc, + platform::errors::InvalidArgument("Input scope should be child scope.")); while (anc->parent()) { anc = anc->parent(); } @@ -104,9 +108,12 @@ void NaiveExecutor::CreateOps(const ProgramDesc &desc, int block_id, } LoDTensor *NaiveExecutor::FindTensor(const std::string &name) { - PADDLE_ENFORCE(scope_, "Need to init scope first"); + PADDLE_ENFORCE_NOT_NULL(scope_, + platform::errors::PreconditionNotMet( + "Need to init scope in NaiveExecutor firstly.")); auto *var = scope_->FindVar(name); - PADDLE_ENFORCE(var, "No variable [%s] in the scope"); + PADDLE_ENFORCE_NOT_NULL(var, platform::errors::NotFound( + "No variable [%s] in current scope.", name)); auto *tensor = const_cast(&var->Get()); return tensor; } diff --git a/paddle/fluid/framework/no_need_buffer_vars_inference.cc b/paddle/fluid/framework/no_need_buffer_vars_inference.cc index 07b84a151f..25f64838c6 100644 --- a/paddle/fluid/framework/no_need_buffer_vars_inference.cc +++ b/paddle/fluid/framework/no_need_buffer_vars_inference.cc @@ -23,8 +23,9 @@ namespace framework { const Attribute &InferNoNeedBufferVarsContext::GetAttr( const std::string &name) const { auto iter = attrs_.find(name); - PADDLE_ENFORCE_EQ(iter != attrs_.end(), true, "Cannot find attribute %s", - name); + PADDLE_ENFORCE_NE( + iter, attrs_.end(), + platform::errors::NotFound("Cannot find attribute (%s).", name)); return iter->second; } diff --git a/paddle/fluid/framework/no_need_buffer_vars_inference.h b/paddle/fluid/framework/no_need_buffer_vars_inference.h index ace2b23715..5d30f34090 100644 --- a/paddle/fluid/framework/no_need_buffer_vars_inference.h +++ b/paddle/fluid/framework/no_need_buffer_vars_inference.h @@ -101,7 +101,10 @@ class InferNoNeedBufferVarsFN { inline const std::unordered_set &operator()( const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs) const { - PADDLE_ENFORCE_NOT_NULL(inferer_); + PADDLE_ENFORCE_NOT_NULL( + inferer_, + platform::errors::PreconditionNotMet( + "The `inferer_` of InferNoNeedBufferVarsFN is not initialized.")); StaticGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs); return (*inferer_)(ctx); } @@ -110,7 +113,10 @@ class InferNoNeedBufferVarsFN { const imperative::NameVarMap &inputs, const imperative::NameVarMap &outputs, const AttributeMap &attrs) const { - PADDLE_ENFORCE_NOT_NULL(inferer_); + PADDLE_ENFORCE_NOT_NULL( + inferer_, + platform::errors::PreconditionNotMet( + "The `inferer_` of InferNoNeedBufferVarsFN is not initialized.")); DyGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs); return (*inferer_)(ctx); } @@ -120,8 +126,14 @@ class InferNoNeedBufferVarsFN { inline bool operator!() const { return inferer_ == nullptr; } inline void Reset(const std::shared_ptr &inferer) { - PADDLE_ENFORCE_NOT_NULL(inferer); - PADDLE_ENFORCE_EQ(inferer_, nullptr); + PADDLE_ENFORCE_NOT_NULL( + inferer, platform::errors::InvalidArgument("The input inferer of " + "InferNoNeedBufferVarsFN::" + "Reset is nullptr.")); + PADDLE_ENFORCE_EQ( + inferer_, nullptr, + platform::errors::AlreadyExists( + "The `inferer_` of InferNoNeedBufferVarsFN has been initialized.")); inferer_ = inferer; } diff --git a/paddle/fluid/framework/op_compatible_info.cc b/paddle/fluid/framework/op_compatible_info.cc index 934f682811..826e14dedb 100644 --- a/paddle/fluid/framework/op_compatible_info.cc +++ b/paddle/fluid/framework/op_compatible_info.cc @@ -24,9 +24,10 @@ namespace framework { inline std::vector ConvertStr2Int(const std::string& str_text) { auto vec_text = string::split_string(str_text, "."); - PADDLE_ENFORCE((vec_text.size() == 2 || vec_text.size() == 3), - "Input[%s] is not a right version format [1.6 or 1.6.0]", - str_text); + PADDLE_ENFORCE( + (vec_text.size() == 2 || vec_text.size() == 3), + platform::errors::InvalidArgument( + "Input[%s] is not a right version format [1.6 or 1.6.0].", str_text)); std::vector vec_res; vec_res.reserve(3); @@ -49,10 +50,11 @@ inline bool CompareVersion(const std::string& str_first, auto vec_second_version = ConvertStr2Int(str_second); // first version id - PADDLE_ENFORCE_EQ( - vec_first_version.size(), vec_second_version.size(), - "version information size not equal, first is [%d] second is [%d]", - vec_first_version.size(), vec_second_version.size()); + PADDLE_ENFORCE_EQ(vec_first_version.size(), vec_second_version.size(), + platform::errors::InvalidArgument( + "Version information size is not equal, the first is " + "[%d], the second is [%d].", + vec_first_version.size(), vec_second_version.size())); for (size_t i = 0; i < vec_first_version.size() - 1; ++i) { if (vec_first_version[i] != vec_second_version[i]) { diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 8cff646186..709f132813 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -155,8 +155,9 @@ class OperatorBase { bool HasAttr(const std::string& name) const { return attrs_.count(name); } template inline const T& Attr(const std::string& name) const { - PADDLE_ENFORCE(attrs_.find(name) != attrs_.end(), - "%s should be in AttributeMap", name); + PADDLE_ENFORCE_NE( + attrs_.find(name), attrs_.end(), + platform::errors::NotFound("(%s) is not found in AttributeMap.", name)); return BOOST_GET_CONST(T, attrs_.at(name)); } const AttributeMap& Attrs() const { return attrs_; } @@ -165,7 +166,9 @@ class OperatorBase { const VariableNameMap& Outputs() const { return outputs_; } const OpInfo& Info() const { - PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_); + PADDLE_ENFORCE_NOT_NULL( + info_, platform::errors::NotFound( + "OpInfo of operator (%s) is not found.", type_)); return *info_; } @@ -369,7 +372,9 @@ class ExecutionContext { #ifdef PADDLE_WITH_CUDA const inline platform::CUDADeviceContext& cuda_device_context() const { - PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true, + platform::errors::PreconditionNotMet( + "Current device context place is not GPUPlace.")); return *reinterpret_cast( &device_context_); } @@ -384,8 +389,12 @@ class ExecutionContext { auto shared_allocation = std::shared_ptr( allocation_ptr, deleter); - PADDLE_ENFORCE_GE(allocation_ptr->size(), - framework::product(dim) * sizeof(T)); + PADDLE_ENFORCE_GE( + allocation_ptr->size(), framework::product(dim) * sizeof(T), + platform::errors::PreconditionNotMet( + "The data memory size(%d) is less than the tensor needed memory " + "size(%d).", + allocation_ptr->size(), framework::product(dim) * sizeof(T))); paddle::framework::Tensor temp_tensor( framework::ToDataType(std::type_index(typeid(T)))); diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index b3ad316c96..c4ce627ff1 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/errors.h" #include "paddle/fluid/platform/init.h" DECLARE_bool(enable_unused_var_check); @@ -546,12 +547,13 @@ class GetLoDLevelTest : public OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true, - "Input(X) should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) should not be null."); - PADDLE_ENFORCE_GT(ctx->GetLoDLevel("X"), 0, - "The LoD level Input(X) should be larger than 0."); + OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "GetLoDLevelTest"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "GetLoDLevelTest"); + + auto lod_level = ctx->GetLoDLevel("X"); + PADDLE_ENFORCE_GT(lod_level, 0, + paddle::platform::errors::InvalidArgument( + "The LoD level Input(X) should be larger than 0.")); } }; @@ -561,10 +563,8 @@ class SetLoDLevelTest : public OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true, - "Input(X) should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) should not be null."); + OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SetLoDLevelTest"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SetLoDLevelTest"); ctx->SetLoDLevel("Out", 1); } }; -- GitLab