未验证 提交 48b9a56f 编写于 作者: C Chen Weihang 提交者: GitHub

Polish framework error message - part 4 (#25807)

* polish framework error message part 4

* fix type error

* fix message error

* polish by review comments
上级 e52dae6e
...@@ -155,8 +155,10 @@ class Vector { ...@@ -155,8 +155,10 @@ class Vector {
// get cuda ptr. immutable // get cuda ptr. immutable
const T *CUDAData(platform::Place place) const { const T *CUDAData(platform::Place place) const {
PADDLE_ENFORCE(platform::is_gpu_place(place), PADDLE_ENFORCE_EQ(
"CUDA Data must on CUDA place"); platform::is_gpu_place(place), true,
platform::errors::Unavailable(
"Place mismatch, CUDA Data must be on CUDA place."));
ImmutableCUDA(place); ImmutableCUDA(place);
return reinterpret_cast<T *>(gpu_->ptr()); return reinterpret_cast<T *>(gpu_->ptr());
} }
...@@ -234,7 +236,8 @@ class Vector { ...@@ -234,7 +236,8 @@ class Vector {
UnsetFlag(kDirty); UnsetFlag(kDirty);
SetFlag(kDataInCUDA); SetFlag(kDataInCUDA);
} else if (IsInCUDA() && !(place == gpu_->place())) { } else if (IsInCUDA() && !(place == gpu_->place())) {
PADDLE_THROW("This situation should not happen"); PADDLE_THROW(
platform::errors::Unavailable("Unexpected data place mismatch."));
// Still dirty // Still dirty
} else { } else {
// Dirty && DataInCUDA && Device is same // Dirty && DataInCUDA && Device is same
...@@ -246,7 +249,8 @@ class Vector { ...@@ -246,7 +249,8 @@ class Vector {
CopyCPUDataToCUDA(place); CopyCPUDataToCUDA(place);
SetFlag(kDataInCUDA); SetFlag(kDataInCUDA);
} else if (!(place == gpu_->place())) { } else if (!(place == gpu_->place())) {
PADDLE_THROW("This situation should not happen."); PADDLE_THROW(
platform::errors::Unavailable("Unexpected data place mismatch."));
} else { } else {
// Not Dirty && DataInCUDA && Device is same // Not Dirty && DataInCUDA && Device is same
// Do nothing. // Do nothing.
...@@ -501,27 +505,29 @@ class CPUVector : public std::vector<T, std::allocator<T>> { ...@@ -501,27 +505,29 @@ class CPUVector : public std::vector<T, std::allocator<T>> {
} }
const T *CUDAData(platform::Place place) const { const T *CUDAData(platform::Place place) const {
PADDLE_THROW( PADDLE_THROW(platform::errors::Unavailable(
"Vector::CUDAData() method is not supported in CPU-only version"); "Vector::CUDAData() method is not supported in CPU-only version."));
} }
T *CUDAMutableData(platform::Place place) { T *CUDAMutableData(platform::Place place) {
PADDLE_THROW( PADDLE_THROW(platform::errors::Unavailable(
"Vector::CUDAMutableData() method is not supported in CPU-only " "Vector::CUDAMutableData() method is not supported in CPU-only "
"version"); "version."));
} }
const T *Data(platform::Place place) const { const T *Data(platform::Place place) const {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
platform::is_cpu_place(place), platform::is_cpu_place(place), true,
"Vector::Data() method is not supported when not in CPUPlace"); platform::errors::Unavailable(
"Vector::Data() method is not supported when not in CPUPlace."));
return this->data(); return this->data();
} }
T *MutableData(platform::Place place) { T *MutableData(platform::Place place) {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
platform::is_cpu_place(place), platform::is_cpu_place(place), true,
"Vector::MutableData() method is not supported when not in CPUPlace"); platform::errors::Unavailable("Vector::MutableData() method is not "
"supported when not in CPUPlace."));
return this->data(); return this->data();
} }
......
...@@ -54,12 +54,16 @@ void NaiveExecutor::Run() { ...@@ -54,12 +54,16 @@ void NaiveExecutor::Run() {
void NaiveExecutor::CreateVariables(const ProgramDesc &desc, int block_id, void NaiveExecutor::CreateVariables(const ProgramDesc &desc, int block_id,
bool persistable, Scope *scope) { bool persistable, Scope *scope) {
PADDLE_ENFORCE_NOT_NULL(scope); PADDLE_ENFORCE_NOT_NULL(scope,
platform::errors::InvalidArgument(
"The Scope to hold variables is nullptr."));
auto &global_block = desc.Block(block_id); auto &global_block = desc.Block(block_id);
const auto *anc = scope; const auto *anc = scope;
PADDLE_ENFORCE(anc->parent() != anc); PADDLE_ENFORCE_NE(
anc->parent(), anc,
platform::errors::InvalidArgument("Input scope should be child scope."));
while (anc->parent()) { while (anc->parent()) {
anc = anc->parent(); anc = anc->parent();
} }
...@@ -104,9 +108,12 @@ void NaiveExecutor::CreateOps(const ProgramDesc &desc, int block_id, ...@@ -104,9 +108,12 @@ void NaiveExecutor::CreateOps(const ProgramDesc &desc, int block_id,
} }
LoDTensor *NaiveExecutor::FindTensor(const std::string &name) { LoDTensor *NaiveExecutor::FindTensor(const std::string &name) {
PADDLE_ENFORCE(scope_, "Need to init scope first"); PADDLE_ENFORCE_NOT_NULL(scope_,
platform::errors::PreconditionNotMet(
"Need to init scope in NaiveExecutor firstly."));
auto *var = scope_->FindVar(name); auto *var = scope_->FindVar(name);
PADDLE_ENFORCE(var, "No variable [%s] in the scope"); PADDLE_ENFORCE_NOT_NULL(var, platform::errors::NotFound(
"No variable [%s] in current scope.", name));
auto *tensor = const_cast<LoDTensor *>(&var->Get<LoDTensor>()); auto *tensor = const_cast<LoDTensor *>(&var->Get<LoDTensor>());
return tensor; return tensor;
} }
......
...@@ -23,8 +23,9 @@ namespace framework { ...@@ -23,8 +23,9 @@ namespace framework {
const Attribute &InferNoNeedBufferVarsContext::GetAttr( const Attribute &InferNoNeedBufferVarsContext::GetAttr(
const std::string &name) const { const std::string &name) const {
auto iter = attrs_.find(name); auto iter = attrs_.find(name);
PADDLE_ENFORCE_EQ(iter != attrs_.end(), true, "Cannot find attribute %s", PADDLE_ENFORCE_NE(
name); iter, attrs_.end(),
platform::errors::NotFound("Cannot find attribute (%s).", name));
return iter->second; return iter->second;
} }
......
...@@ -101,7 +101,10 @@ class InferNoNeedBufferVarsFN { ...@@ -101,7 +101,10 @@ class InferNoNeedBufferVarsFN {
inline const std::unordered_set<std::string> &operator()( inline const std::unordered_set<std::string> &operator()(
const VariableNameMap &inputs, const VariableNameMap &outputs, const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs) const { const AttributeMap &attrs) const {
PADDLE_ENFORCE_NOT_NULL(inferer_); PADDLE_ENFORCE_NOT_NULL(
inferer_,
platform::errors::PreconditionNotMet(
"The `inferer_` of InferNoNeedBufferVarsFN is not initialized."));
StaticGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs); StaticGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs);
return (*inferer_)(ctx); return (*inferer_)(ctx);
} }
...@@ -110,7 +113,10 @@ class InferNoNeedBufferVarsFN { ...@@ -110,7 +113,10 @@ class InferNoNeedBufferVarsFN {
const imperative::NameVarMap<imperative::VariableWrapper> &inputs, const imperative::NameVarMap<imperative::VariableWrapper> &inputs,
const imperative::NameVarMap<imperative::VariableWrapper> &outputs, const imperative::NameVarMap<imperative::VariableWrapper> &outputs,
const AttributeMap &attrs) const { const AttributeMap &attrs) const {
PADDLE_ENFORCE_NOT_NULL(inferer_); PADDLE_ENFORCE_NOT_NULL(
inferer_,
platform::errors::PreconditionNotMet(
"The `inferer_` of InferNoNeedBufferVarsFN is not initialized."));
DyGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs); DyGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs);
return (*inferer_)(ctx); return (*inferer_)(ctx);
} }
...@@ -120,8 +126,14 @@ class InferNoNeedBufferVarsFN { ...@@ -120,8 +126,14 @@ class InferNoNeedBufferVarsFN {
inline bool operator!() const { return inferer_ == nullptr; } inline bool operator!() const { return inferer_ == nullptr; }
inline void Reset(const std::shared_ptr<NoNeedBufferVarsInference> &inferer) { inline void Reset(const std::shared_ptr<NoNeedBufferVarsInference> &inferer) {
PADDLE_ENFORCE_NOT_NULL(inferer); PADDLE_ENFORCE_NOT_NULL(
PADDLE_ENFORCE_EQ(inferer_, nullptr); inferer, platform::errors::InvalidArgument("The input inferer of "
"InferNoNeedBufferVarsFN::"
"Reset is nullptr."));
PADDLE_ENFORCE_EQ(
inferer_, nullptr,
platform::errors::AlreadyExists(
"The `inferer_` of InferNoNeedBufferVarsFN has been initialized."));
inferer_ = inferer; inferer_ = inferer;
} }
......
...@@ -24,9 +24,10 @@ namespace framework { ...@@ -24,9 +24,10 @@ namespace framework {
inline std::vector<int> ConvertStr2Int(const std::string& str_text) { inline std::vector<int> ConvertStr2Int(const std::string& str_text) {
auto vec_text = string::split_string<std::string>(str_text, "."); auto vec_text = string::split_string<std::string>(str_text, ".");
PADDLE_ENFORCE((vec_text.size() == 2 || vec_text.size() == 3), PADDLE_ENFORCE(
"Input[%s] is not a right version format [1.6 or 1.6.0]", (vec_text.size() == 2 || vec_text.size() == 3),
str_text); platform::errors::InvalidArgument(
"Input[%s] is not a right version format [1.6 or 1.6.0].", str_text));
std::vector<int> vec_res; std::vector<int> vec_res;
vec_res.reserve(3); vec_res.reserve(3);
...@@ -49,10 +50,11 @@ inline bool CompareVersion(const std::string& str_first, ...@@ -49,10 +50,11 @@ inline bool CompareVersion(const std::string& str_first,
auto vec_second_version = ConvertStr2Int(str_second); auto vec_second_version = ConvertStr2Int(str_second);
// first version id // first version id
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(vec_first_version.size(), vec_second_version.size(),
vec_first_version.size(), vec_second_version.size(), platform::errors::InvalidArgument(
"version information size not equal, first is [%d] second is [%d]", "Version information size is not equal, the first is "
vec_first_version.size(), vec_second_version.size()); "[%d], the second is [%d].",
vec_first_version.size(), vec_second_version.size()));
for (size_t i = 0; i < vec_first_version.size() - 1; ++i) { for (size_t i = 0; i < vec_first_version.size() - 1; ++i) {
if (vec_first_version[i] != vec_second_version[i]) { if (vec_first_version[i] != vec_second_version[i]) {
......
...@@ -155,8 +155,9 @@ class OperatorBase { ...@@ -155,8 +155,9 @@ class OperatorBase {
bool HasAttr(const std::string& name) const { return attrs_.count(name); } bool HasAttr(const std::string& name) const { return attrs_.count(name); }
template <typename T> template <typename T>
inline const T& Attr(const std::string& name) const { inline const T& Attr(const std::string& name) const {
PADDLE_ENFORCE(attrs_.find(name) != attrs_.end(), PADDLE_ENFORCE_NE(
"%s should be in AttributeMap", name); attrs_.find(name), attrs_.end(),
platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
return BOOST_GET_CONST(T, attrs_.at(name)); return BOOST_GET_CONST(T, attrs_.at(name));
} }
const AttributeMap& Attrs() const { return attrs_; } const AttributeMap& Attrs() const { return attrs_; }
...@@ -165,7 +166,9 @@ class OperatorBase { ...@@ -165,7 +166,9 @@ class OperatorBase {
const VariableNameMap& Outputs() const { return outputs_; } const VariableNameMap& Outputs() const { return outputs_; }
const OpInfo& Info() const { const OpInfo& Info() const {
PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_); PADDLE_ENFORCE_NOT_NULL(
info_, platform::errors::NotFound(
"OpInfo of operator (%s) is not found.", type_));
return *info_; return *info_;
} }
...@@ -369,7 +372,9 @@ class ExecutionContext { ...@@ -369,7 +372,9 @@ class ExecutionContext {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
const inline platform::CUDADeviceContext& cuda_device_context() const { const inline platform::CUDADeviceContext& cuda_device_context() const {
PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true); PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true,
platform::errors::PreconditionNotMet(
"Current device context place is not GPUPlace."));
return *reinterpret_cast<const platform::CUDADeviceContext*>( return *reinterpret_cast<const platform::CUDADeviceContext*>(
&device_context_); &device_context_);
} }
...@@ -384,8 +389,12 @@ class ExecutionContext { ...@@ -384,8 +389,12 @@ class ExecutionContext {
auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>( auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
allocation_ptr, deleter); allocation_ptr, deleter);
PADDLE_ENFORCE_GE(allocation_ptr->size(), PADDLE_ENFORCE_GE(
framework::product(dim) * sizeof(T)); allocation_ptr->size(), framework::product(dim) * sizeof(T),
platform::errors::PreconditionNotMet(
"The data memory size(%d) is less than the tensor needed memory "
"size(%d).",
allocation_ptr->size(), framework::product(dim) * sizeof(T)));
paddle::framework::Tensor temp_tensor( paddle::framework::Tensor temp_tensor(
framework::ToDataType(std::type_index(typeid(T)))); framework::ToDataType(std::type_index(typeid(T))));
......
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/errors.h"
#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/init.h"
DECLARE_bool(enable_unused_var_check); DECLARE_bool(enable_unused_var_check);
...@@ -546,12 +547,13 @@ class GetLoDLevelTest : public OperatorWithKernel { ...@@ -546,12 +547,13 @@ class GetLoDLevelTest : public OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true, OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "GetLoDLevelTest");
"Input(X) should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "GetLoDLevelTest");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) should not be null."); auto lod_level = ctx->GetLoDLevel("X");
PADDLE_ENFORCE_GT(ctx->GetLoDLevel("X"), 0, PADDLE_ENFORCE_GT(lod_level, 0,
"The LoD level Input(X) should be larger than 0."); paddle::platform::errors::InvalidArgument(
"The LoD level Input(X) should be larger than 0."));
} }
}; };
...@@ -561,10 +563,8 @@ class SetLoDLevelTest : public OperatorWithKernel { ...@@ -561,10 +563,8 @@ class SetLoDLevelTest : public OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true, OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SetLoDLevelTest");
"Input(X) should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SetLoDLevelTest");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) should not be null.");
ctx->SetLoDLevel("Out", 1); ctx->SetLoDLevel("Out", 1);
} }
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册