未验证 提交 48b9a56f 编写于 作者: C Chen Weihang 提交者: GitHub

Polish framework error message - part 4 (#25807)

* polish framework error message part 4

* fix type error

* fix message error

* polish by review comments
上级 e52dae6e
......@@ -155,8 +155,10 @@ class Vector {
// get cuda ptr. immutable
const T *CUDAData(platform::Place place) const {
PADDLE_ENFORCE(platform::is_gpu_place(place),
"CUDA Data must on CUDA place");
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(place), true,
platform::errors::Unavailable(
"Place mismatch, CUDA Data must be on CUDA place."));
ImmutableCUDA(place);
return reinterpret_cast<T *>(gpu_->ptr());
}
......@@ -234,7 +236,8 @@ class Vector {
UnsetFlag(kDirty);
SetFlag(kDataInCUDA);
} else if (IsInCUDA() && !(place == gpu_->place())) {
PADDLE_THROW("This situation should not happen");
PADDLE_THROW(
platform::errors::Unavailable("Unexpected data place mismatch."));
// Still dirty
} else {
// Dirty && DataInCUDA && Device is same
......@@ -246,7 +249,8 @@ class Vector {
CopyCPUDataToCUDA(place);
SetFlag(kDataInCUDA);
} else if (!(place == gpu_->place())) {
PADDLE_THROW("This situation should not happen.");
PADDLE_THROW(
platform::errors::Unavailable("Unexpected data place mismatch."));
} else {
// Not Dirty && DataInCUDA && Device is same
// Do nothing.
......@@ -501,27 +505,29 @@ class CPUVector : public std::vector<T, std::allocator<T>> {
}
const T *CUDAData(platform::Place place) const {
PADDLE_THROW(
"Vector::CUDAData() method is not supported in CPU-only version");
PADDLE_THROW(platform::errors::Unavailable(
"Vector::CUDAData() method is not supported in CPU-only version."));
}
T *CUDAMutableData(platform::Place place) {
PADDLE_THROW(
PADDLE_THROW(platform::errors::Unavailable(
"Vector::CUDAMutableData() method is not supported in CPU-only "
"version");
"version."));
}
const T *Data(platform::Place place) const {
PADDLE_ENFORCE(
platform::is_cpu_place(place),
"Vector::Data() method is not supported when not in CPUPlace");
PADDLE_ENFORCE_EQ(
platform::is_cpu_place(place), true,
platform::errors::Unavailable(
"Vector::Data() method is not supported when not in CPUPlace."));
return this->data();
}
T *MutableData(platform::Place place) {
PADDLE_ENFORCE(
platform::is_cpu_place(place),
"Vector::MutableData() method is not supported when not in CPUPlace");
PADDLE_ENFORCE_EQ(
platform::is_cpu_place(place), true,
platform::errors::Unavailable("Vector::MutableData() method is not "
"supported when not in CPUPlace."));
return this->data();
}
......
......@@ -54,12 +54,16 @@ void NaiveExecutor::Run() {
void NaiveExecutor::CreateVariables(const ProgramDesc &desc, int block_id,
bool persistable, Scope *scope) {
PADDLE_ENFORCE_NOT_NULL(scope);
PADDLE_ENFORCE_NOT_NULL(scope,
platform::errors::InvalidArgument(
"The Scope to hold variables is nullptr."));
auto &global_block = desc.Block(block_id);
const auto *anc = scope;
PADDLE_ENFORCE(anc->parent() != anc);
PADDLE_ENFORCE_NE(
anc->parent(), anc,
platform::errors::InvalidArgument("Input scope should be child scope."));
while (anc->parent()) {
anc = anc->parent();
}
......@@ -104,9 +108,12 @@ void NaiveExecutor::CreateOps(const ProgramDesc &desc, int block_id,
}
LoDTensor *NaiveExecutor::FindTensor(const std::string &name) {
PADDLE_ENFORCE(scope_, "Need to init scope first");
PADDLE_ENFORCE_NOT_NULL(scope_,
platform::errors::PreconditionNotMet(
"Need to init scope in NaiveExecutor firstly."));
auto *var = scope_->FindVar(name);
PADDLE_ENFORCE(var, "No variable [%s] in the scope");
PADDLE_ENFORCE_NOT_NULL(var, platform::errors::NotFound(
"No variable [%s] in current scope.", name));
auto *tensor = const_cast<LoDTensor *>(&var->Get<LoDTensor>());
return tensor;
}
......
......@@ -23,8 +23,9 @@ namespace framework {
const Attribute &InferNoNeedBufferVarsContext::GetAttr(
const std::string &name) const {
auto iter = attrs_.find(name);
PADDLE_ENFORCE_EQ(iter != attrs_.end(), true, "Cannot find attribute %s",
name);
PADDLE_ENFORCE_NE(
iter, attrs_.end(),
platform::errors::NotFound("Cannot find attribute (%s).", name));
return iter->second;
}
......
......@@ -101,7 +101,10 @@ class InferNoNeedBufferVarsFN {
inline const std::unordered_set<std::string> &operator()(
const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs) const {
PADDLE_ENFORCE_NOT_NULL(inferer_);
PADDLE_ENFORCE_NOT_NULL(
inferer_,
platform::errors::PreconditionNotMet(
"The `inferer_` of InferNoNeedBufferVarsFN is not initialized."));
StaticGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs);
return (*inferer_)(ctx);
}
......@@ -110,7 +113,10 @@ class InferNoNeedBufferVarsFN {
const imperative::NameVarMap<imperative::VariableWrapper> &inputs,
const imperative::NameVarMap<imperative::VariableWrapper> &outputs,
const AttributeMap &attrs) const {
PADDLE_ENFORCE_NOT_NULL(inferer_);
PADDLE_ENFORCE_NOT_NULL(
inferer_,
platform::errors::PreconditionNotMet(
"The `inferer_` of InferNoNeedBufferVarsFN is not initialized."));
DyGraphInferNoNeedBufferVarsContext ctx(inputs, outputs, attrs);
return (*inferer_)(ctx);
}
......@@ -120,8 +126,14 @@ class InferNoNeedBufferVarsFN {
inline bool operator!() const { return inferer_ == nullptr; }
inline void Reset(const std::shared_ptr<NoNeedBufferVarsInference> &inferer) {
PADDLE_ENFORCE_NOT_NULL(inferer);
PADDLE_ENFORCE_EQ(inferer_, nullptr);
PADDLE_ENFORCE_NOT_NULL(
inferer, platform::errors::InvalidArgument("The input inferer of "
"InferNoNeedBufferVarsFN::"
"Reset is nullptr."));
PADDLE_ENFORCE_EQ(
inferer_, nullptr,
platform::errors::AlreadyExists(
"The `inferer_` of InferNoNeedBufferVarsFN has been initialized."));
inferer_ = inferer;
}
......
......@@ -24,9 +24,10 @@ namespace framework {
inline std::vector<int> ConvertStr2Int(const std::string& str_text) {
auto vec_text = string::split_string<std::string>(str_text, ".");
PADDLE_ENFORCE((vec_text.size() == 2 || vec_text.size() == 3),
"Input[%s] is not a right version format [1.6 or 1.6.0]",
str_text);
PADDLE_ENFORCE(
(vec_text.size() == 2 || vec_text.size() == 3),
platform::errors::InvalidArgument(
"Input[%s] is not a right version format [1.6 or 1.6.0].", str_text));
std::vector<int> vec_res;
vec_res.reserve(3);
......@@ -49,10 +50,11 @@ inline bool CompareVersion(const std::string& str_first,
auto vec_second_version = ConvertStr2Int(str_second);
// first version id
PADDLE_ENFORCE_EQ(
vec_first_version.size(), vec_second_version.size(),
"version information size not equal, first is [%d] second is [%d]",
vec_first_version.size(), vec_second_version.size());
PADDLE_ENFORCE_EQ(vec_first_version.size(), vec_second_version.size(),
platform::errors::InvalidArgument(
"Version information size is not equal, the first is "
"[%d], the second is [%d].",
vec_first_version.size(), vec_second_version.size()));
for (size_t i = 0; i < vec_first_version.size() - 1; ++i) {
if (vec_first_version[i] != vec_second_version[i]) {
......
......@@ -155,8 +155,9 @@ class OperatorBase {
bool HasAttr(const std::string& name) const { return attrs_.count(name); }
template <typename T>
inline const T& Attr(const std::string& name) const {
PADDLE_ENFORCE(attrs_.find(name) != attrs_.end(),
"%s should be in AttributeMap", name);
PADDLE_ENFORCE_NE(
attrs_.find(name), attrs_.end(),
platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
return BOOST_GET_CONST(T, attrs_.at(name));
}
const AttributeMap& Attrs() const { return attrs_; }
......@@ -165,7 +166,9 @@ class OperatorBase {
const VariableNameMap& Outputs() const { return outputs_; }
const OpInfo& Info() const {
PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_);
PADDLE_ENFORCE_NOT_NULL(
info_, platform::errors::NotFound(
"OpInfo of operator (%s) is not found.", type_));
return *info_;
}
......@@ -369,7 +372,9 @@ class ExecutionContext {
#ifdef PADDLE_WITH_CUDA
const inline platform::CUDADeviceContext& cuda_device_context() const {
PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true);
PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true,
platform::errors::PreconditionNotMet(
"Current device context place is not GPUPlace."));
return *reinterpret_cast<const platform::CUDADeviceContext*>(
&device_context_);
}
......@@ -384,8 +389,12 @@ class ExecutionContext {
auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
allocation_ptr, deleter);
PADDLE_ENFORCE_GE(allocation_ptr->size(),
framework::product(dim) * sizeof(T));
PADDLE_ENFORCE_GE(
allocation_ptr->size(), framework::product(dim) * sizeof(T),
platform::errors::PreconditionNotMet(
"The data memory size(%d) is less than the tensor needed memory "
"size(%d).",
allocation_ptr->size(), framework::product(dim) * sizeof(T)));
paddle::framework::Tensor temp_tensor(
framework::ToDataType(std::type_index(typeid(T))));
......
......@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/errors.h"
#include "paddle/fluid/platform/init.h"
DECLARE_bool(enable_unused_var_check);
......@@ -546,12 +547,13 @@ class GetLoDLevelTest : public OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true,
"Input(X) should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) should not be null.");
PADDLE_ENFORCE_GT(ctx->GetLoDLevel("X"), 0,
"The LoD level Input(X) should be larger than 0.");
OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "GetLoDLevelTest");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "GetLoDLevelTest");
auto lod_level = ctx->GetLoDLevel("X");
PADDLE_ENFORCE_GT(lod_level, 0,
paddle::platform::errors::InvalidArgument(
"The LoD level Input(X) should be larger than 0."));
}
};
......@@ -561,10 +563,8 @@ class SetLoDLevelTest : public OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true,
"Input(X) should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) should not be null.");
OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SetLoDLevelTest");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SetLoDLevelTest");
ctx->SetLoDLevel("Out", 1);
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册