未验证 提交 dae62556 编写于 作者: W Wilber 提交者: GitHub

Enhance infer error info message (#26731)

上级 c89f269c
...@@ -27,8 +27,9 @@ Analyzer::Analyzer() {} ...@@ -27,8 +27,9 @@ Analyzer::Analyzer() {}
void Analyzer::Run(Argument *argument) { RunAnalysis(argument); } void Analyzer::Run(Argument *argument) { RunAnalysis(argument); }
void Analyzer::RunAnalysis(Argument *argument) { void Analyzer::RunAnalysis(Argument *argument) {
PADDLE_ENFORCE(argument->analysis_passes_valid(), PADDLE_ENFORCE_EQ(argument->analysis_passes_valid(), true,
"analsis_passes is not valid in the argument."); platform::errors::InvalidArgument(
"analsis_passes is not valid in the argument."));
const bool disable_logs = argument->disable_logs(); const bool disable_logs = argument->disable_logs();
for (auto &pass : argument->analysis_passes()) { for (auto &pass : argument->analysis_passes()) {
if (!disable_logs) { if (!disable_logs) {
...@@ -38,7 +39,8 @@ void Analyzer::RunAnalysis(Argument *argument) { ...@@ -38,7 +39,8 @@ void Analyzer::RunAnalysis(Argument *argument) {
continue; continue;
auto *ptr = PassRegistry::Global().Retreive(pass); auto *ptr = PassRegistry::Global().Retreive(pass);
PADDLE_ENFORCE_NOT_NULL(ptr, "no analysis pass called %s", pass); PADDLE_ENFORCE_NOT_NULL(ptr, platform::errors::PreconditionNotMet(
"no analysis pass called %s", pass));
ptr->Run(argument); ptr->Run(argument);
} }
} }
......
...@@ -75,9 +75,14 @@ void TestWord2vecPrediction(const std::string& model_path) { ...@@ -75,9 +75,14 @@ void TestWord2vecPrediction(const std::string& model_path) {
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
CHECK(predictor->Run(slots, &outputs)); CHECK(predictor->Run(slots, &outputs));
PADDLE_ENFORCE_EQ(outputs.size(), 1UL); PADDLE_ENFORCE_EQ(outputs.size(), 1UL,
platform::errors::PreconditionNotMet(
"Output size should be 1, but got %d", outputs.size()));
// Check the output buffer size and result of each tid. // Check the output buffer size and result of each tid.
PADDLE_ENFORCE_EQ(outputs.front().data.length(), 33168UL); PADDLE_ENFORCE_EQ(outputs.front().data.length(), 33168UL,
platform::errors::PreconditionNotMet(
"Output's data length should be 33168 but got %d",
outputs.front().data.length()));
float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815, float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815,
0.000932706}; 0.000932706};
const size_t num_elements = outputs.front().data.length() / sizeof(float); const size_t num_elements = outputs.front().data.length() / sizeof(float);
......
...@@ -76,53 +76,62 @@ struct Argument { ...@@ -76,53 +76,62 @@ struct Argument {
} }
} }
#define DECL_ARGUMENT_FIELD(field__, Field, type__) \ #define DECL_ARGUMENT_FIELD(field__, Field, type__) \
public: \ public: \
type__& field__() { \ type__& field__() { \
PADDLE_ENFORCE(Has(#field__), "There is no such field"); \ PADDLE_ENFORCE_EQ( \
return field__##_; \ Has(#field__), true, \
} \ platform::errors::PreconditionNotMet("There is no such field")); \
void Set##Field(const type__& x) { \ return field__##_; \
field__##_ = x; \ } \
valid_fields_.insert(#field__); \ void Set##Field(const type__& x) { \
} \ field__##_ = x; \
DECL_ARGUMENT_FIELD_VALID(field__); \ valid_fields_.insert(#field__); \
type__* field__##_ptr() { return &field__##_; } \ } \
\ DECL_ARGUMENT_FIELD_VALID(field__); \
private: \ type__* field__##_ptr() { return &field__##_; } \
\
private: \
type__ field__##_; type__ field__##_;
#define DECL_ARGUMENT_FIELD_VALID(field__) \ #define DECL_ARGUMENT_FIELD_VALID(field__) \
bool field__##_valid() { return Has(#field__); } bool field__##_valid() { return Has(#field__); }
#define DECL_ARGUMENT_UNIQUE_FIELD(field__, Field, type__) \ #define DECL_ARGUMENT_UNIQUE_FIELD(field__, Field, type__) \
public: \ public: \
type__& field__() { \ type__& field__() { \
PADDLE_ENFORCE_NOT_NULL(field__##_); \ PADDLE_ENFORCE_NOT_NULL(field__##_, platform::errors::PreconditionNotMet( \
PADDLE_ENFORCE(Has(#field__)); \ "filed should not be null.")); \
return *static_cast<type__*>(field__##_.get()); \ PADDLE_ENFORCE_EQ( \
} \ Has(#field__), true, \
void Set##Field(type__* x) { \ platform::errors::PreconditionNotMet("There is no such field")); \
field__##_ = \ return *static_cast<type__*>(field__##_.get()); \
unique_ptr_t(x, [](void* x) { delete static_cast<type__*>(x); }); \ } \
valid_fields_.insert(#field__); \ void Set##Field(type__* x) { \
} \ field__##_ = \
void Set##Field##NotOwned(type__* x) { \ unique_ptr_t(x, [](void* x) { delete static_cast<type__*>(x); }); \
valid_fields_.insert(#field__); \ valid_fields_.insert(#field__); \
field__##_ = unique_ptr_t(x, [](void* x) {}); \ } \
} \ void Set##Field##NotOwned(type__* x) { \
DECL_ARGUMENT_FIELD_VALID(field__); \ valid_fields_.insert(#field__); \
type__* field__##_ptr() { \ field__##_ = unique_ptr_t(x, [](void* x) {}); \
PADDLE_ENFORCE(Has(#field__)); \ } \
return static_cast<type__*>(field__##_.get()); \ DECL_ARGUMENT_FIELD_VALID(field__); \
} \ type__* field__##_ptr() { \
type__* Release##Field() { \ PADDLE_ENFORCE_EQ( \
PADDLE_ENFORCE(Has(#field__)); \ Has(#field__), true, \
valid_fields_.erase(#field__); \ platform::errors::PreconditionNotMet("There is no such field")); \
return static_cast<type__*>(field__##_.release()); \ return static_cast<type__*>(field__##_.get()); \
} \ } \
\ type__* Release##Field() { \
private: \ PADDLE_ENFORCE_EQ( \
Has(#field__), true, \
platform::errors::PreconditionNotMet("There is no such field")); \
valid_fields_.erase(#field__); \
return static_cast<type__*>(field__##_.release()); \
} \
\
private: \
unique_ptr_t field__##_; unique_ptr_t field__##_;
DECL_ARGUMENT_FIELD(predictor_id, PredictorID, int); DECL_ARGUMENT_FIELD(predictor_id, PredictorID, int);
...@@ -227,8 +236,10 @@ struct Argument { ...@@ -227,8 +236,10 @@ struct Argument {
}; };
#define ARGUMENT_CHECK_FIELD(argument__, fieldname__) \ #define ARGUMENT_CHECK_FIELD(argument__, fieldname__) \
PADDLE_ENFORCE(argument__->Has(#fieldname__), \ PADDLE_ENFORCE_EQ( \
"the argument field [%s] should be set", #fieldname__); argument__->Has(#fieldname__), true, \
platform::errors::PreconditionNotMet( \
"the argument field [%s] should be set", #fieldname__));
} // namespace analysis } // namespace analysis
} // namespace inference } // namespace inference
......
...@@ -73,12 +73,15 @@ struct DataTypeNamer { ...@@ -73,12 +73,15 @@ struct DataTypeNamer {
template <typename T> template <typename T>
const std::string &repr() const { const std::string &repr() const {
auto x = std::type_index(typeid(T)); auto x = std::type_index(typeid(T));
PADDLE_ENFORCE(dic_.count(x), "unknown type for representation"); PADDLE_ENFORCE_GT(dic_.count(x), 0, platform::errors::PreconditionNotMet(
"unknown type for representation"));
return dic_.at(x); return dic_.at(x);
} }
const std::string &repr(const std::type_index &type) const { // NOLINT const std::string &repr(const std::type_index &type) const { // NOLINT
PADDLE_ENFORCE(dic_.count(type), "unknown type for representation"); PADDLE_ENFORCE_GT(dic_.count(type), 0,
platform::errors::PreconditionNotMet(
"unknown type for representation"));
return dic_.at(type); return dic_.at(type);
} }
...@@ -116,7 +119,9 @@ template <typename T> ...@@ -116,7 +119,9 @@ template <typename T>
class OrderedRegistry { class OrderedRegistry {
public: public:
T *Register(const std::string &name, T *x) { T *Register(const std::string &name, T *x) {
PADDLE_ENFORCE(!dic_.count(name), "duplicate key [%s]", name); PADDLE_ENFORCE_EQ(dic_.count(name), 0,
platform::errors::PreconditionNotMet(
"There exists duplicate key [%s]", name));
dic_[name] = elements_.size(); dic_[name] = elements_.size();
elements_.emplace_back(std::unique_ptr<T>(x)); elements_.emplace_back(std::unique_ptr<T>(x));
return elements_.back().get(); return elements_.back().get();
...@@ -136,14 +141,20 @@ class OrderedRegistry { ...@@ -136,14 +141,20 @@ class OrderedRegistry {
template <typename T> template <typename T>
T &GetFromScope(const framework::Scope &scope, const std::string &name) { T &GetFromScope(const framework::Scope &scope, const std::string &name) {
framework::Variable *var = scope.FindVar(name); framework::Variable *var = scope.FindVar(name);
PADDLE_ENFORCE(var != nullptr); PADDLE_ENFORCE_NOT_NULL(
var, platform::errors::PreconditionNotMet(
"The var which name is %s should not be nullptr.", name));
return *var->GetMutable<T>(); return *var->GetMutable<T>();
} }
static framework::proto::ProgramDesc LoadProgramDesc( static framework::proto::ProgramDesc LoadProgramDesc(
const std::string &model_path) { const std::string &model_path) {
std::ifstream fin(model_path, std::ios::in | std::ios::binary); std::ifstream fin(model_path, std::ios::in | std::ios::binary);
PADDLE_ENFORCE(fin.is_open(), "Cannot open file %s", model_path); PADDLE_ENFORCE_EQ(
fin.is_open(), true,
platform::errors::NotFound(
"Cannot open file %s, please confirm whether the file exists",
model_path));
fin.seekg(0, std::ios::end); fin.seekg(0, std::ios::end);
std::string buffer(fin.tellg(), ' '); std::string buffer(fin.tellg(), ' ');
fin.seekg(0, std::ios::beg); fin.seekg(0, std::ios::beg);
...@@ -188,10 +199,12 @@ static std::string GetDirRoot(const std::string &path) { ...@@ -188,10 +199,12 @@ static std::string GetDirRoot(const std::string &path) {
static std::string GetOrCreateModelOptCacheDir(const std::string &model_root) { static std::string GetOrCreateModelOptCacheDir(const std::string &model_root) {
std::string opt_cache_dir = model_root + "/_opt_cache/"; std::string opt_cache_dir = model_root + "/_opt_cache/";
if (!PathExists(opt_cache_dir)) { if (!PathExists(opt_cache_dir)) {
PADDLE_ENFORCE(MKDIR(opt_cache_dir.c_str()) != -1, PADDLE_ENFORCE_NE(
"Can not create optimize cache directory: %s, Make sure you " MKDIR(opt_cache_dir.c_str()), -1,
"have permission to write", platform::errors::PreconditionNotMet(
opt_cache_dir); "Can not create optimize cache directory: %s, Make sure you "
"have permission to write",
opt_cache_dir));
} }
return opt_cache_dir; return opt_cache_dir;
} }
......
...@@ -38,7 +38,9 @@ IRPassManager::IRPassManager(Argument *argument) { ...@@ -38,7 +38,9 @@ IRPassManager::IRPassManager(Argument *argument) {
graph_ = std::unique_ptr<Graph>(new Graph(argument->main_program())); graph_ = std::unique_ptr<Graph>(new Graph(argument->main_program()));
if (argument->Has("scope")) { if (argument->Has("scope")) {
auto *scope_ptr = argument->scope_ptr(); auto *scope_ptr = argument->scope_ptr();
PADDLE_ENFORCE(scope_ptr); PADDLE_ENFORCE_NOT_NULL(scope_ptr,
platform::errors::PreconditionNotMet(
"The scope ptr should not be nullptr."));
graph_->SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr); graph_->SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr);
} }
...@@ -101,13 +103,17 @@ void IRPassManager::CreatePasses(Argument *argument, ...@@ -101,13 +103,17 @@ void IRPassManager::CreatePasses(Argument *argument,
std::string optim_cache_dir = argument->optim_cache_dir(); std::string optim_cache_dir = argument->optim_cache_dir();
bool int8_valid = bool int8_valid =
!(model_from_memory && optim_cache_dir.empty() && enable_int8); !(model_from_memory && optim_cache_dir.empty() && enable_int8);
PADDLE_ENFORCE(int8_valid, PADDLE_ENFORCE_EQ(
"When you are in TRT INT8 mode, and load model from " int8_valid, true,
"memory, you should set optim_cache_dir using " platform::errors::PreconditionNotMet(
"config.SetOptimCacheDir()"); "When you are in TRT INT8 mode, and load model from "
PADDLE_ENFORCE(!(model_from_memory && use_static_engine), "memory, you should set optim_cache_dir using "
"When you are using Paddle-TRT, and also using load model " "config.SetOptimCacheDir()"));
"from memory, you should set the use_static to false."); PADDLE_ENFORCE_EQ(
!(model_from_memory && use_static_engine), true,
platform::errors::PreconditionNotMet(
"When you are using Paddle-TRT, and also using load model "
"from memory, you should set the use_static to false."));
if (!optim_cache_dir.empty()) { if (!optim_cache_dir.empty()) {
pass->Set("model_opt_cache_dir", new std::string(optim_cache_dir)); pass->Set("model_opt_cache_dir", new std::string(optim_cache_dir));
......
...@@ -123,7 +123,9 @@ void RenameAndGetOutputs( ...@@ -123,7 +123,9 @@ void RenameAndGetOutputs(
auto add_block_var = [&](const std::string &graph_arg, auto add_block_var = [&](const std::string &graph_arg,
const std::string &block_arg) { const std::string &block_arg) {
auto arg_var_node = graph_var_map.find(graph_arg); auto arg_var_node = graph_var_map.find(graph_arg);
PADDLE_ENFORCE(arg_var_node != graph_var_map.end()); PADDLE_ENFORCE_NE(arg_var_node, graph_var_map.end(),
platform::errors::InvalidArgument(
"Can not find %s in graph_var_map", graph_arg));
auto *var_t = block_desc->Var(block_arg); auto *var_t = block_desc->Var(block_arg);
var_t->SetShape(arg_var_node->second->Var()->GetShape()); var_t->SetShape(arg_var_node->second->Var()->GetShape());
var_t->SetDataType(arg_var_node->second->Var()->GetDataType()); var_t->SetDataType(arg_var_node->second->Var()->GetDataType());
...@@ -133,7 +135,10 @@ void RenameAndGetOutputs( ...@@ -133,7 +135,10 @@ void RenameAndGetOutputs(
framework::proto::OpDesc *op = block_desc->Op(index)->Proto(); framework::proto::OpDesc *op = block_desc->Op(index)->Proto();
framework::OpDesc op_desc(*op, nullptr); framework::OpDesc op_desc(*op, nullptr);
auto correspond_node = subgraph_nodes[index]; auto correspond_node = subgraph_nodes[index];
PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type()); PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type(),
platform::errors::PreconditionNotMet(
"We should get %s, but get %s", op->type(),
correspond_node->Name()));
std::unordered_map<std::string, size_t> var2id; std::unordered_map<std::string, size_t> var2id;
std::unordered_map<std::string, framework::ir::Node *> in_vars; std::unordered_map<std::string, framework::ir::Node *> in_vars;
......
...@@ -97,7 +97,9 @@ void TensorRtSubgraphPass::CreateTensorRTOp( ...@@ -97,7 +97,9 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
std::vector<std::string> *repetitive_params) const { std::vector<std::string> *repetitive_params) const {
auto *op_desc = node->Op(); auto *op_desc = node->Op();
auto &subgraph = *framework::ir::Agent(node).subgraph(); auto &subgraph = *framework::ir::Agent(node).subgraph();
PADDLE_ENFORCE(!subgraph.empty()); PADDLE_ENFORCE_EQ(subgraph.empty(), false,
platform::errors::PreconditionNotMet(
"The subgraph should not be empty."));
framework::ProgramDesc *program_desc = framework::ProgramDesc *program_desc =
Get<framework::ProgramDesc *>("program"); Get<framework::ProgramDesc *>("program");
...@@ -194,12 +196,17 @@ void TensorRtSubgraphPass::CreateTensorRTOp( ...@@ -194,12 +196,17 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
// to Tensor. // to Tensor.
std::vector<std::string> output_mapping; std::vector<std::string> output_mapping;
for (auto name : output_names) { for (auto name : output_names) {
PADDLE_ENFORCE(output_name_map.count(name) != 0); PADDLE_ENFORCE_NE(output_name_map.count(name), 0,
platform::errors::PreconditionNotMet(
"The output_name_map should have %s", name));
output_mapping.push_back(output_name_map[name]); output_mapping.push_back(output_name_map[name]);
} }
PADDLE_ENFORCE(!output_mapping.empty()); PADDLE_ENFORCE_EQ(output_mapping.empty(), false,
PADDLE_ENFORCE(!block_desc.Proto()->vars().empty(), platform::errors::PreconditionNotMet(
"the block has no var-desc"); "The output_mapping should not be empty."));
PADDLE_ENFORCE_EQ(
!block_desc.Proto()->vars().empty(), true,
platform::errors::PreconditionNotMet("the block has no var-desc"));
// Set attrs // Set attrs
op_desc->SetType("tensorrt_engine"); op_desc->SetType("tensorrt_engine");
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h"
#include <memory>
#include <utility>
#include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/inference/analysis/ir_pass_manager.h" #include "paddle/fluid/inference/analysis/ir_pass_manager.h"
...@@ -31,7 +33,10 @@ void IrAnalysisPass::RunImpl(Argument* argument) { ...@@ -31,7 +33,10 @@ void IrAnalysisPass::RunImpl(Argument* argument) {
// Apply passes. // Apply passes.
IRPassManager the_ir_manager(argument); IRPassManager the_ir_manager(argument);
graph = the_ir_manager.Apply(std::move(graph)); graph = the_ir_manager.Apply(std::move(graph));
PADDLE_ENFORCE_GT(graph->Nodes().size(), 0); PADDLE_ENFORCE_GT(
graph->Nodes().size(), 0,
platform::errors::PreconditionNotMet(
"The graph nodes size should be greater than 0, but got 0"));
argument->SetMainGraph(graph.release()); argument->SetMainGraph(graph.release());
CollectFusionStatis(argument); CollectFusionStatis(argument);
} }
......
...@@ -31,7 +31,9 @@ void IrGraphBuildPass::RunImpl(Argument *argument) { ...@@ -31,7 +31,9 @@ void IrGraphBuildPass::RunImpl(Argument *argument) {
if (!argument->scope_valid()) { if (!argument->scope_valid()) {
argument->SetScope(new framework::Scope); argument->SetScope(new framework::Scope);
} }
PADDLE_ENFORCE(argument->use_gpu_valid()); PADDLE_ENFORCE_EQ(argument->use_gpu_valid(), true,
platform::errors::PreconditionNotMet(
"The use_gpu field should be valid"));
// The load program should run on the same device with the inference program, // The load program should run on the same device with the inference program,
// so that the parameters will on the same device, or they will keep copying // so that the parameters will on the same device, or they will keep copying
...@@ -51,14 +53,17 @@ void IrGraphBuildPass::RunImpl(Argument *argument) { ...@@ -51,14 +53,17 @@ void IrGraphBuildPass::RunImpl(Argument *argument) {
argument->model_from_memory_valid() && argument->model_from_memory()); argument->model_from_memory_valid() && argument->model_from_memory());
argument->SetMainProgram(program.release()); argument->SetMainProgram(program.release());
} else { } else {
PADDLE_THROW( PADDLE_THROW(platform::errors::PreconditionNotMet(
"either model_dir or (program path and parameter path) should be set."); "either model_dir or (program path and parameter path) should be "
"set."));
} }
auto graph = std::unique_ptr<Graph>(new Graph(argument->main_program())); auto graph = std::unique_ptr<Graph>(new Graph(argument->main_program()));
argument->SetMainGraph(graph.release()); argument->SetMainGraph(graph.release());
auto *scope_ptr = argument->scope_ptr(); auto *scope_ptr = argument->scope_ptr();
PADDLE_ENFORCE(scope_ptr); PADDLE_ENFORCE_NOT_NULL(scope_ptr,
platform::errors::PreconditionNotMet(
"The scope ptr should not be nullptr."));
argument->main_graph().SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr); argument->main_graph().SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr);
} }
......
...@@ -31,7 +31,8 @@ void IrInferCleanGraphPass::RunImpl(Argument* argument) { ...@@ -31,7 +31,8 @@ void IrInferCleanGraphPass::RunImpl(Argument* argument) {
std::unordered_set<const framework::ir::Node*> invalid_nodes; std::unordered_set<const framework::ir::Node*> invalid_nodes;
int valid_op = 0; int valid_op = 0;
for (auto* node : graph.Nodes()) { for (auto* node : graph.Nodes()) {
PADDLE_ENFORCE_NOT_NULL(node); PADDLE_ENFORCE_NOT_NULL(node, platform::errors::PreconditionNotMet(
"The node should not be nullptr."));
if (is_valid_node(node)) { if (is_valid_node(node)) {
invalid_nodes.insert(node); invalid_nodes.insert(node);
} else if (node->IsOp()) { } else if (node->IsOp()) {
......
...@@ -23,8 +23,12 @@ namespace inference { ...@@ -23,8 +23,12 @@ namespace inference {
namespace analysis { namespace analysis {
void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) { void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) {
PADDLE_ENFORCE(argument->scope_valid()); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(argument->use_gpu_valid()); argument->scope_valid(), true,
platform::errors::PreconditionNotMet("The scope field should be valid"));
PADDLE_ENFORCE_EQ(argument->use_gpu_valid(), true,
platform::errors::PreconditionNotMet(
"The use_gpu field should be valid"));
platform::Place place; platform::Place place;
...@@ -40,7 +44,9 @@ void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) { ...@@ -40,7 +44,9 @@ void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) {
LOG(INFO) << "Sync params from CPU to GPU"; LOG(INFO) << "Sync params from CPU to GPU";
PADDLE_ENFORCE(argument->gpu_device_id_valid()); PADDLE_ENFORCE_EQ(argument->gpu_device_id_valid(), true,
platform::errors::PreconditionNotMet(
"The gpu_device_id field should be valid"));
place = platform::CUDAPlace(argument->gpu_device_id()); place = platform::CUDAPlace(argument->gpu_device_id());
auto *scope = argument->scope_ptr(); auto *scope = argument->scope_ptr();
...@@ -56,7 +62,8 @@ void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) { ...@@ -56,7 +62,8 @@ void IrParamsSyncAmongDevicesPass::RunImpl(Argument *argument) {
continue; continue;
} }
auto *var = scope->FindLocalVar(var_name); auto *var = scope->FindLocalVar(var_name);
PADDLE_ENFORCE(var != nullptr); PADDLE_ENFORCE_NOT_NULL(var, platform::errors::PreconditionNotMet(
"The var should not be nullptr"));
if (var->IsType<framework::LoDTensor>() || if (var->IsType<framework::LoDTensor>() ||
var->IsType<framework::Tensor>()) { var->IsType<framework::Tensor>()) {
auto *t = var->GetMutable<framework::LoDTensor>(); auto *t = var->GetMutable<framework::LoDTensor>();
......
...@@ -224,7 +224,9 @@ void UpdateOpDescsByReuse( ...@@ -224,7 +224,9 @@ void UpdateOpDescsByReuse(
// modify the graph // modify the graph
for (auto input_node : node->inputs) { for (auto input_node : node->inputs) {
PADDLE_ENFORCE(input_node->IsVar()); PADDLE_ENFORCE_EQ(input_node->IsVar(), true,
platform::errors::PreconditionNotMet(
"The input node should be a variable."));
std::string input_node_name = input_node->Name(); std::string input_node_name = input_node->Name();
if (reuse_table.count(input_node_name) && if (reuse_table.count(input_node_name) &&
reuse_table.at(input_node_name) != input_node_name) { reuse_table.at(input_node_name) != input_node_name) {
...@@ -246,7 +248,9 @@ void UpdateOpDescsByReuse( ...@@ -246,7 +248,9 @@ void UpdateOpDescsByReuse(
// modify the graph // modify the graph
for (auto out_node : node->outputs) { for (auto out_node : node->outputs) {
PADDLE_ENFORCE(out_node->IsVar()); PADDLE_ENFORCE_EQ(out_node->IsVar(), true,
platform::errors::PreconditionNotMet(
"The output node should be a variable."));
std::string out_node_name = out_node->Name(); std::string out_node_name = out_node->Name();
if (reuse_table.count(out_node_name) && if (reuse_table.count(out_node_name) &&
reuse_table.at(out_node_name) != out_node_name) { reuse_table.at(out_node_name) != out_node_name) {
......
...@@ -230,7 +230,8 @@ void AnalysisConfig::EnableMkldnnBfloat16() { ...@@ -230,7 +230,8 @@ void AnalysisConfig::EnableMkldnnBfloat16() {
MkldnnQuantizerConfig *AnalysisConfig::mkldnn_quantizer_config() const { MkldnnQuantizerConfig *AnalysisConfig::mkldnn_quantizer_config() const {
PADDLE_ENFORCE_NOT_NULL(mkldnn_quantizer_config_, PADDLE_ENFORCE_NOT_NULL(mkldnn_quantizer_config_,
"MkldnnQuantizer was not enabled yet."); platform::errors::PreconditionNotMet(
"MkldnnQuantizer was not enabled yet."));
return mkldnn_quantizer_config_.get(); return mkldnn_quantizer_config_.get();
} }
......
...@@ -169,7 +169,8 @@ bool AnalysisPredictor::PrepareScope( ...@@ -169,7 +169,8 @@ bool AnalysisPredictor::PrepareScope(
if (parent_scope) { if (parent_scope) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
parent_scope, parent_scope,
"Both program and parent_scope should be set in Clone mode."); platform::errors::PreconditionNotMet(
"Both program and parent_scope should be set in Clone mode."));
scope_ = parent_scope; scope_ = parent_scope;
status_is_cloned_ = true; status_is_cloned_ = true;
} else { } else {
...@@ -235,7 +236,9 @@ bool AnalysisPredictor::PrepareExecutor() { ...@@ -235,7 +236,9 @@ bool AnalysisPredictor::PrepareExecutor() {
executor_->Prepare(sub_scope_, *inference_program_, 0, executor_->Prepare(sub_scope_, *inference_program_, 0,
config_.use_feed_fetch_ops_); config_.use_feed_fetch_ops_);
PADDLE_ENFORCE_NOT_NULL(sub_scope_); PADDLE_ENFORCE_NOT_NULL(sub_scope_,
platform::errors::PreconditionNotMet(
"The sub_scope should not be nullptr."));
return true; return true;
} }
...@@ -297,7 +300,8 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs, ...@@ -297,7 +300,8 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
timer.tic(); timer.tic();
// set feed variable // set feed variable
framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get(); framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr."); PADDLE_ENFORCE_NOT_NULL(scope, platform::errors::PreconditionNotMet(
"The scope should not be nullptr."));
if (!SetFeed(inputs, scope)) { if (!SetFeed(inputs, scope)) {
LOG(ERROR) << "fail to set feed"; LOG(ERROR) << "fail to set feed";
return false; return false;
...@@ -399,7 +403,11 @@ bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs, ...@@ -399,7 +403,11 @@ bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
outputs->resize(fetches_.size()); outputs->resize(fetches_.size());
for (size_t i = 0; i < fetches_.size(); ++i) { for (size_t i = 0; i < fetches_.size(); ++i) {
int idx = BOOST_GET_CONST(int, fetches_[i]->GetAttr("col")); int idx = BOOST_GET_CONST(int, fetches_[i]->GetAttr("col"));
PADDLE_ENFORCE((size_t)idx == i); PADDLE_ENFORCE_EQ(
static_cast<size_t>(idx), i,
platform::errors::InvalidArgument(
"Fetch op's col attr(%d) should be equal to the index(%d)", idx,
i));
framework::FetchType &fetch_var = framework::FetchType &fetch_var =
framework::GetFetchVariable(*scope, "fetch", idx); framework::GetFetchVariable(*scope, "fetch", idx);
auto &fetch = BOOST_GET(framework::LoDTensor, fetch_var); auto &fetch = BOOST_GET(framework::LoDTensor, fetch_var);
...@@ -435,10 +443,12 @@ void AnalysisPredictor::PrepareArgument() { ...@@ -435,10 +443,12 @@ void AnalysisPredictor::PrepareArgument() {
if (!config_.model_dir().empty()) { if (!config_.model_dir().empty()) {
argument_.SetModelDir(config_.model_dir()); argument_.SetModelDir(config_.model_dir());
} else { } else {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(config_.params_file().empty(), false,
!config_.params_file().empty(), platform::errors::PreconditionNotMet(
"Either model_dir or (param_file, prog_file) should be set."); "Either model_dir or param_file should be set."));
PADDLE_ENFORCE(!config_.prog_file().empty()); PADDLE_ENFORCE_EQ(config_.prog_file().empty(), false,
platform::errors::PreconditionNotMet(
"Either model_dir or prog_file should be set."));
std::string dir = inference::analysis::GetDirRoot(config_.prog_file()); std::string dir = inference::analysis::GetDirRoot(config_.prog_file());
argument_.SetModelProgramPath(config_.prog_file()); argument_.SetModelProgramPath(config_.prog_file());
...@@ -503,7 +513,9 @@ void AnalysisPredictor::OptimizeInferenceProgram() { ...@@ -503,7 +513,9 @@ void AnalysisPredictor::OptimizeInferenceProgram() {
PrepareArgument(); PrepareArgument();
Analyzer().Run(&argument_); Analyzer().Run(&argument_);
PADDLE_ENFORCE(argument_.scope_valid()); PADDLE_ENFORCE_EQ(
argument_.scope_valid(), true,
platform::errors::InvalidArgument("The argument scope should be valid."));
VLOG(5) << "to prepare executor"; VLOG(5) << "to prepare executor";
ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program); ARGUMENT_CHECK_FIELD((&argument_), ir_analyzed_program);
inference_program_.reset( inference_program_.reset(
...@@ -525,8 +537,10 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor< ...@@ -525,8 +537,10 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
FLAGS_minloglevel = 2; // GLOG_ERROR FLAGS_minloglevel = 2; // GLOG_ERROR
} }
VLOG(3) << "create AnalysisConfig"; VLOG(3) << "create AnalysisConfig";
PADDLE_ENFORCE(config.is_valid(), PADDLE_ENFORCE_EQ(
"Note: Each config can only be used for one predictor."); config.is_valid(), true,
platform::errors::InvalidArgument(
"Note: Each config can only be used for one predictor."));
if (config.use_gpu()) { if (config.use_gpu()) {
static std::once_flag gflags_initialized; static std::once_flag gflags_initialized;
...@@ -623,7 +637,9 @@ bool AnalysisPredictor::MkldnnQuantize() { ...@@ -623,7 +637,9 @@ bool AnalysisPredictor::MkldnnQuantize() {
} }
void AnalysisPredictor::PrepareFeedFetch() { void AnalysisPredictor::PrepareFeedFetch() {
PADDLE_ENFORCE_NOT_NULL(sub_scope_); PADDLE_ENFORCE_NOT_NULL(sub_scope_,
platform::errors::InvalidArgument(
"The sub_scope should not be nullptr."));
CreateFeedFetchVar(sub_scope_); CreateFeedFetchVar(sub_scope_);
for (auto *op : inference_program_->Block(0).AllOps()) { for (auto *op : inference_program_->Block(0).AllOps()) {
if (op->Type() == "feed") { if (op->Type() == "feed") {
...@@ -646,7 +662,8 @@ void AnalysisPredictor::PrepareFeedFetch() { ...@@ -646,7 +662,8 @@ void AnalysisPredictor::PrepareFeedFetch() {
} }
void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) { void AnalysisPredictor::CreateFeedFetchVar(framework::Scope *scope) {
PADDLE_ENFORCE_NOT_NULL(scope); PADDLE_ENFORCE_NOT_NULL(scope, platform::errors::InvalidArgument(
"The scope should not be nullptr."));
auto *var = scope->Var("feed"); auto *var = scope->Var("feed");
var->GetMutable<framework::FeedList>(); var->GetMutable<framework::FeedList>();
var = scope->Var("fetch"); var = scope->Var("fetch");
...@@ -667,7 +684,8 @@ AnalysisPredictor::GetInputTensorShape() { ...@@ -667,7 +684,8 @@ AnalysisPredictor::GetInputTensorShape() {
std::vector<std::string> names = GetInputNames(); std::vector<std::string> names = GetInputNames();
for (std::string name : names) { for (std::string name : names) {
auto *var = inference_program_->Block(0).FindVar(name); auto *var = inference_program_->Block(0).FindVar(name);
PADDLE_ENFORCE_NOT_NULL(var, "input %s does not exist.", name); PADDLE_ENFORCE_NOT_NULL(var, platform::errors::PreconditionNotMet(
"Input %s does not exist.", name));
input_shapes[name] = var->GetShape(); input_shapes[name] = var->GetShape();
} }
return input_shapes; return input_shapes;
...@@ -683,7 +701,11 @@ std::vector<std::string> AnalysisPredictor::GetOutputNames() { ...@@ -683,7 +701,11 @@ std::vector<std::string> AnalysisPredictor::GetOutputNames() {
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor( std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
const std::string &name) { const std::string &name) {
PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name); PADDLE_ENFORCE_NOT_NULL(
executor_->scope()->FindVar(name),
platform::errors::PreconditionNotMet(
"The variable named %s is not found in the scope of the exector.",
name));
std::unique_ptr<ZeroCopyTensor> res( std::unique_ptr<ZeroCopyTensor> res(
new ZeroCopyTensor(static_cast<void *>(executor_->scope()))); new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
res->input_or_output_ = true; res->input_or_output_ = true;
...@@ -700,7 +722,11 @@ std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor( ...@@ -700,7 +722,11 @@ std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetInputTensor(
std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor( std::unique_ptr<ZeroCopyTensor> AnalysisPredictor::GetOutputTensor(
const std::string &name) { const std::string &name) {
PADDLE_ENFORCE(executor_->scope()->FindVar(name), "no name called %s", name); PADDLE_ENFORCE_NOT_NULL(
executor_->scope()->FindVar(name),
platform::errors::PreconditionNotMet(
"he variable named %s is not found in the scope of the exector.",
name));
std::unique_ptr<ZeroCopyTensor> res( std::unique_ptr<ZeroCopyTensor> res(
new ZeroCopyTensor(static_cast<void *>(executor_->scope()))); new ZeroCopyTensor(static_cast<void *>(executor_->scope())));
res->input_or_output_ = false; res->input_or_output_ = false;
...@@ -761,8 +787,11 @@ bool AnalysisPredictor::LoadProgramDesc() { ...@@ -761,8 +787,11 @@ bool AnalysisPredictor::LoadProgramDesc() {
std::string pb_content; std::string pb_content;
// Read binary // Read binary
std::ifstream fin(filename, std::ios::in | std::ios::binary); std::ifstream fin(filename, std::ios::in | std::ios::binary);
PADDLE_ENFORCE(static_cast<bool>(fin.is_open()), "Cannot open file %s", PADDLE_ENFORCE_EQ(
filename); static_cast<bool>(fin.is_open()), true,
platform::errors::NotFound(
"Cannot open file %s, please confirm whether the file is normal.",
filename));
fin.seekg(0, std::ios::end); fin.seekg(0, std::ios::end);
pb_content.resize(fin.tellg()); pb_content.resize(fin.tellg());
fin.seekg(0, std::ios::beg); fin.seekg(0, std::ios::beg);
...@@ -779,7 +808,8 @@ bool AnalysisPredictor::LoadProgramDesc() { ...@@ -779,7 +808,8 @@ bool AnalysisPredictor::LoadProgramDesc() {
bool AnalysisPredictor::LoadParameters() { bool AnalysisPredictor::LoadParameters() {
PADDLE_ENFORCE_NOT_NULL(inference_program_.get(), PADDLE_ENFORCE_NOT_NULL(inference_program_.get(),
"The inference program should be loaded first."); platform::errors::PreconditionNotMet(
"The inference program should be loaded first."));
const auto &global_block = inference_program_->MutableBlock(0); const auto &global_block = inference_program_->MutableBlock(0);
...@@ -855,8 +885,9 @@ void AnalysisPredictor::ClearIntermediateTensor() { ...@@ -855,8 +885,9 @@ void AnalysisPredictor::ClearIntermediateTensor() {
#if PADDLE_WITH_TENSORRT #if PADDLE_WITH_TENSORRT
bool AnalysisPredictor::SaveTrtCalibToDisk() { bool AnalysisPredictor::SaveTrtCalibToDisk() {
PADDLE_ENFORCE(config_.tensorrt_engine_enabled(), PADDLE_ENFORCE_EQ(config_.tensorrt_engine_enabled(), true,
"This func can be invoked only in trt mode"); platform::errors::PreconditionNotMet(
"This func can be invoked only in trt mode"));
auto &block = inference_program_->Block(0); auto &block = inference_program_->Block(0);
for (auto &op_desc : block.AllOps()) { for (auto &op_desc : block.AllOps()) {
if (op_desc->Type() == "tensorrt_engine") { if (op_desc->Type() == "tensorrt_engine") {
......
...@@ -62,9 +62,9 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) { ...@@ -62,9 +62,9 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
if (other.length() && other.data()) if (other.length() && other.data())
memcpy(data_, other.data(), other.length()); memcpy(data_, other.data(), other.length());
else if (other.length()) else if (other.length())
PADDLE_THROW( PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid argument, null pointer data with length %u is passed", "Invalid argument, null pointer data with length %u is passed",
other.length()); other.length()));
length_ = other.length(); length_ = other.length();
memory_owned_ = true; memory_owned_ = true;
...@@ -92,7 +92,8 @@ void PaddleBuf::Resize(size_t length) { ...@@ -92,7 +92,8 @@ void PaddleBuf::Resize(size_t length) {
length_ = length; length_ = length;
memory_owned_ = true; memory_owned_ = true;
} else { } else {
PADDLE_THROW("The memory is allocated externally, can not Resized"); PADDLE_THROW(platform::errors::PreconditionNotMet(
"The memory is allocated externally, can not Resized"));
} }
} }
...@@ -105,7 +106,11 @@ void PaddleBuf::Reset(void *data, size_t length) { ...@@ -105,7 +106,11 @@ void PaddleBuf::Reset(void *data, size_t length) {
void PaddleBuf::Free() { void PaddleBuf::Free() {
if (memory_owned_ && data_) { if (memory_owned_ && data_) {
PADDLE_ENFORCE_GT(length_, 0UL); PADDLE_ENFORCE_GT(
length_, 0UL,
platform::errors::PreconditionNotMet(
"The memory used in PaddleBuf %d should be greater than 0",
length_));
delete[] static_cast<char *>(data_); delete[] static_cast<char *>(data_);
data_ = nullptr; data_ = nullptr;
length_ = 0; length_ = 0;
......
...@@ -87,7 +87,9 @@ bool NativePaddlePredictor::Init( ...@@ -87,7 +87,9 @@ bool NativePaddlePredictor::Init(
if (parent_scope) { if (parent_scope) {
scope_ = parent_scope; scope_ = parent_scope;
sub_scope_ = &(parent_scope->NewScope()); sub_scope_ = &(parent_scope->NewScope());
PADDLE_ENFORCE_NOT_NULL(sub_scope_, "create sub scope fail"); PADDLE_ENFORCE_NOT_NULL(sub_scope_,
platform::errors::PreconditionNotMet(
"The sub_scope should not be nullptr."));
} else { } else {
paddle::framework::InitDevices(false); paddle::framework::InitDevices(false);
scope_.reset(new paddle::framework::Scope()); scope_.reset(new paddle::framework::Scope());
...@@ -182,7 +184,10 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() { ...@@ -182,7 +184,10 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_)); std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));
// Hot fix the bug that result diff in multi-thread. // Hot fix the bug that result diff in multi-thread.
// TODO(Superjomn) re-implement a real clone here. // TODO(Superjomn) re-implement a real clone here.
PADDLE_ENFORCE_NOT_NULL(dynamic_cast<NativePaddlePredictor *>(cls.get())); PADDLE_ENFORCE_NOT_NULL(
dynamic_cast<NativePaddlePredictor *>(cls.get()),
platform::errors::PreconditionNotMet(
"Dynamic_cast from PaddlePredictor to NativePaddlePredictor failed"));
if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(nullptr)) { if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(nullptr)) {
LOG(ERROR) << "fail to call Init"; LOG(ERROR) << "fail to call Init";
return nullptr; return nullptr;
...@@ -224,8 +229,13 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs, ...@@ -224,8 +229,13 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
return false; return false;
} }
PADDLE_ENFORCE_NOT_NULL(input_ptr); PADDLE_ENFORCE_NOT_NULL(input_ptr,
PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data()); platform::errors::InvalidArgument(
"The input_ptr should not be nullptr."));
PADDLE_ENFORCE_NOT_NULL(
inputs[i].data.data(),
platform::errors::InvalidArgument(
"The data of input tensor should not be null."));
if (platform::is_cpu_place(place_)) { if (platform::is_cpu_place(place_)) {
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(), std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
...@@ -241,7 +251,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs, ...@@ -241,7 +251,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
platform::CPUPlace(), inputs[i].data.data(), platform::CPUPlace(), inputs[i].data.data(),
inputs[i].data.length(), dev_ctx->stream()); inputs[i].data.length(), dev_ctx->stream());
#else #else
PADDLE_THROW("Not compile with CUDA, should not reach here."); PADDLE_THROW(platform::errors::Unavailable(
"Not compile with CUDA, should not reach here."));
#endif #endif
} }
...@@ -287,7 +298,11 @@ bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs, ...@@ -287,7 +298,11 @@ bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
outputs->resize(fetchs_.size()); outputs->resize(fetchs_.size());
for (size_t i = 0; i < fetchs_.size(); ++i) { for (size_t i = 0; i < fetchs_.size(); ++i) {
int idx = BOOST_GET_CONST(int, fetchs_[i]->GetAttr("col")); int idx = BOOST_GET_CONST(int, fetchs_[i]->GetAttr("col"));
PADDLE_ENFORCE((size_t)idx == i); PADDLE_ENFORCE_EQ(
static_cast<size_t>(idx), i,
platform::errors::InvalidArgument(
"Fetch op's col attr(%d) should be equal to the index(%d)", idx,
i));
framework::FetchType &fetch_var = framework::FetchType &fetch_var =
framework::GetFetchVariable(*scope, "fetch", idx); framework::GetFetchVariable(*scope, "fetch", idx);
auto fetch = BOOST_GET_CONST(framework::LoDTensor, fetch_var); auto fetch = BOOST_GET_CONST(framework::LoDTensor, fetch_var);
...@@ -318,10 +333,15 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor< ...@@ -318,10 +333,15 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
VLOG(3) << "create NativePaddlePredictor"; VLOG(3) << "create NativePaddlePredictor";
if (config.use_gpu) { if (config.use_gpu) {
// 1. GPU memory // 1. GPU memory
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(config.fraction_of_gpu_memory, 0.f,
config.fraction_of_gpu_memory, 0.f, platform::errors::InvalidArgument(
"fraction_of_gpu_memory in the config should be set to range (0., 1.]"); "fraction_of_gpu_memory in the config should be set "
PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device); "to range (0., 1.]"));
PADDLE_ENFORCE_GE(config.device, 0,
platform::errors::PreconditionNotMet(
"Invalid device id %d, the device id should be "
"greater than or equal to 0.",
config.device));
std::vector<std::string> flags; std::vector<std::string> flags;
if (config.fraction_of_gpu_memory >= 0.0f || if (config.fraction_of_gpu_memory >= 0.0f ||
config.fraction_of_gpu_memory <= 0.95f) { config.fraction_of_gpu_memory <= 0.95f) {
...@@ -336,7 +356,9 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor< ...@@ -336,7 +356,9 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config)); std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
dynamic_cast<NativePaddlePredictor *>(predictor.get())); dynamic_cast<NativePaddlePredictor *>(predictor.get()),
platform::errors::PreconditionNotMet(
"Dynamic_cast from PaddlePredictor to NativePaddlePredictor failed"));
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) { if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
return nullptr; return nullptr;
} }
......
...@@ -112,16 +112,19 @@ static T convert(const std::string &item, ...@@ -112,16 +112,19 @@ static T convert(const std::string &item,
std::string message = std::string message =
"invalid_argument exception when try to convert : " + item; "invalid_argument exception when try to convert : " + item;
LOG(ERROR) << message; LOG(ERROR) << message;
PADDLE_THROW(message); PADDLE_THROW(platform::errors::InvalidArgument(
"invalid_argument exception when try to convert %s.", item));
} catch (std::out_of_range &e) { } catch (std::out_of_range &e) {
std::string message = std::string message =
"out_of_range exception when try to convert : " + item; "out_of_range exception when try to convert : " + item;
LOG(ERROR) << message; LOG(ERROR) << message;
PADDLE_THROW(message); PADDLE_THROW(platform::errors::InvalidArgument(
"out_of_range exception when try to convert %s.", item));
} catch (...) { } catch (...) {
std::string message = "unexpected exception when try to convert " + item; std::string message = "unexpected exception when try to convert " + item;
LOG(ERROR) << message; LOG(ERROR) << message;
PADDLE_THROW(message); PADDLE_THROW(platform::errors::InvalidArgument(
"unexpected exception when try to convert %s.", item));
} }
return res; return res;
} }
...@@ -353,7 +356,8 @@ static void PrintTime(int batch_size, int repeat, int num_threads, int tid, ...@@ -353,7 +356,8 @@ static void PrintTime(int batch_size, int repeat, int num_threads, int tid,
double batch_latency, int epoch = 1, double batch_latency, int epoch = 1,
const framework::proto::VarType::Type data_type = const framework::proto::VarType::Type data_type =
framework::proto::VarType::FP32) { framework::proto::VarType::FP32) {
PADDLE_ENFORCE_GT(batch_size, 0, "Non-positive batch size."); PADDLE_ENFORCE_GT(batch_size, 0, platform::errors::InvalidArgument(
"Non-positive batch size."));
double sample_latency = batch_latency / batch_size; double sample_latency = batch_latency / batch_size;
LOG(INFO) << "====== threads: " << num_threads << ", thread id: " << tid LOG(INFO) << "====== threads: " << num_threads << ", thread id: " << tid
<< " ======"; << " ======";
......
...@@ -62,9 +62,12 @@ bool AnalysisPredictor::MkldnnQuantizer::CalculateScales() { ...@@ -62,9 +62,12 @@ bool AnalysisPredictor::MkldnnQuantizer::CalculateScales() {
if (scales_.find(var_name) != scales_.end()) continue; if (scales_.find(var_name) != scales_.end()) continue;
auto* var = predictor_.sub_scope_->FindVar(var_name); auto* var = predictor_.sub_scope_->FindVar(var_name);
PADDLE_ENFORCE(var, "%s is not in the scope", var_name); PADDLE_ENFORCE_NOT_NULL(var,
PADDLE_ENFORCE(var->IsType<LoDTensor>(), platform::errors::PreconditionNotMet(
"Only support lod tensor now."); "%s is not in the scope", var_name));
PADDLE_ENFORCE_EQ(var->IsType<LoDTensor>(), true,
platform::errors::PreconditionNotMet(
"Only support lod tensor now."));
LoDTensor* var_tensor = var->GetMutable<LoDTensor>(); LoDTensor* var_tensor = var->GetMutable<LoDTensor>();
// force unsigned type if already know it // force unsigned type if already know it
...@@ -82,9 +85,11 @@ bool AnalysisPredictor::MkldnnQuantizer::CalculateScales() { ...@@ -82,9 +85,11 @@ bool AnalysisPredictor::MkldnnQuantizer::CalculateScales() {
} else if (op->Type() == "transpose2" || } else if (op->Type() == "transpose2" ||
op->Type() == "reshape2" || op->Type() == "pool2d") { op->Type() == "reshape2" || op->Type() == "pool2d") {
auto input_var_name = op->Input("X")[0]; auto input_var_name = op->Input("X")[0];
PADDLE_ENFORCE(scales_.find(input_var_name) != scales_.end(), PADDLE_ENFORCE_NE(
"Input scales must be calculated before the " scales_.find(input_var_name), scales_.end(),
"output scales to infer if output is unsigned."); platform::errors::PreconditionNotMet(
"Input scales must be calculated before the "
"output scales to infer if output is unsigned."));
if (scales_.find(input_var_name) != scales_.end()) { if (scales_.find(input_var_name) != scales_.end()) {
scales_[var_name] = scales_[input_var_name]; scales_[var_name] = scales_[input_var_name];
} }
...@@ -94,10 +99,11 @@ bool AnalysisPredictor::MkldnnQuantizer::CalculateScales() { ...@@ -94,10 +99,11 @@ bool AnalysisPredictor::MkldnnQuantizer::CalculateScales() {
is_unsigned = true; is_unsigned = true;
double min_scale = std::numeric_limits<double>::max(); double min_scale = std::numeric_limits<double>::max();
for (auto input_var_name : op->Input("X")) { for (auto input_var_name : op->Input("X")) {
PADDLE_ENFORCE( PADDLE_ENFORCE_NE(
scales_.find(input_var_name) != scales_.end(), scales_.find(input_var_name), scales_.end(),
"Input scales must be calculated before the " platform::errors::PreconditionNotMet(
"output scales to infer if output is unsigned."); "Input scales must be calculated before the "
"output scales to infer if output is unsigned."));
is_unsigned = is_unsigned && scales_[input_var_name].first; is_unsigned = is_unsigned && scales_[input_var_name].first;
min_scale = std::min( min_scale = std::min(
min_scale, min_scale,
...@@ -132,11 +138,12 @@ void AnalysisPredictor::MkldnnQuantizer::CalculateSingleScale( ...@@ -132,11 +138,12 @@ void AnalysisPredictor::MkldnnQuantizer::CalculateSingleScale(
auto rule = qconfig_->scale_algo(op_type_name, conn_name); auto rule = qconfig_->scale_algo(op_type_name, conn_name);
if (rule == ScaleAlgo::NONE) return; if (rule == ScaleAlgo::NONE) return;
PADDLE_ENFORCE( PADDLE_ENFORCE_GT(
var_tensor.numel() > 0, var_tensor.numel(), 0,
"MkldnnQuantizer: LoDTensor of variable %s for quantization of op " platform::errors::InvalidArgument(
"%s of connection %s should not be empty.", "MkldnnQuantizer: LoDTensor of variable %s for quantization of op "
var_name, op_type_name, conn_name); "%s of connection %s should not be empty.",
var_name, op_type_name, conn_name));
switch (rule) { switch (rule) {
case ScaleAlgo::MAX: case ScaleAlgo::MAX:
...@@ -205,10 +212,11 @@ AnalysisPredictor::MkldnnQuantizer::GetKLScalingFactor( ...@@ -205,10 +212,11 @@ AnalysisPredictor::MkldnnQuantizer::GetKLScalingFactor(
float min_val = eigen_tensor.minCoeff(); float min_val = eigen_tensor.minCoeff();
bool is_positive = min_val >= 0.0f; bool is_positive = min_val >= 0.0f;
if (is_unsigned) if (is_unsigned)
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
is_positive, is_positive, true,
"Tensor is claimed to be unsigned, but its min value (%f) is < 0.0", platform::errors::InvalidArgument(
min_val); "Tensor is claimed to be unsigned, but its min value (%f) is < 0.0",
min_val));
int num_quantized_bins = 255; int num_quantized_bins = 255;
...@@ -316,10 +324,11 @@ AnalysisPredictor::MkldnnQuantizer::GetMaxScalingFactor( ...@@ -316,10 +324,11 @@ AnalysisPredictor::MkldnnQuantizer::GetMaxScalingFactor(
float max_abs = eigen_tensor.abs().maxCoeff(); float max_abs = eigen_tensor.abs().maxCoeff();
float min_val = eigen_tensor.minCoeff(); float min_val = eigen_tensor.minCoeff();
if (is_unsigned) if (is_unsigned)
PADDLE_ENFORCE( PADDLE_ENFORCE_GE(
min_val >= 0.0f, min_val, 0.0f,
"Tensor is claimed to be unsigned, but its min value (%f) is < 0.0", platform::errors::InvalidArgument(
min_val); "Tensor is claimed to be unsigned, but its min value (%f) is < 0.0",
min_val));
LoDTensor scale_tensor = CreateScaleTensor(); LoDTensor scale_tensor = CreateScaleTensor();
scale_tensor.data<double>()[0] = 1.0 / max_abs; scale_tensor.data<double>()[0] = 1.0 / max_abs;
...@@ -330,16 +339,19 @@ AnalysisPredictor::MkldnnQuantizer::GetMaxScalingFactor( ...@@ -330,16 +339,19 @@ AnalysisPredictor::MkldnnQuantizer::GetMaxScalingFactor(
std::pair<bool, LoDTensor> std::pair<bool, LoDTensor>
AnalysisPredictor::MkldnnQuantizer::GetMaxChScalingFactor( AnalysisPredictor::MkldnnQuantizer::GetMaxChScalingFactor(
const LoDTensor& var_tensor, bool is_unsigned, bool is_transposed) const { const LoDTensor& var_tensor, bool is_unsigned, bool is_transposed) const {
PADDLE_ENFORCE(var_tensor.dims().size() > 0, "Tensor dimension is empty."); PADDLE_ENFORCE_GT(
var_tensor.dims().size(), 0,
platform::errors::InvalidArgument("Tensor dimension is empty."));
ConstEigenVectorArrayMap eigen_tensor{var_tensor.data<float>(), ConstEigenVectorArrayMap eigen_tensor{var_tensor.data<float>(),
var_tensor.numel(), 1}; var_tensor.numel(), 1};
float min_val = eigen_tensor.minCoeff(); float min_val = eigen_tensor.minCoeff();
if (is_unsigned) if (is_unsigned)
PADDLE_ENFORCE( PADDLE_ENFORCE_GE(
min_val >= 0.0f, min_val, 0.0f,
"Tensor is claimed to be unsigned, but its min value (%f) is < 0.0", platform::errors::InvalidArgument(
min_val); "Tensor is claimed to be unsigned, but its min value (%f) is < 0.0",
min_val));
auto dims = var_tensor.dims(); auto dims = var_tensor.dims();
constexpr int num_col_dims = 1; constexpr int num_col_dims = 1;
...@@ -367,17 +379,19 @@ AnalysisPredictor::MkldnnQuantizer::Histogram( ...@@ -367,17 +379,19 @@ AnalysisPredictor::MkldnnQuantizer::Histogram(
const framework::LoDTensor& var_tensor, float min_val, float max_val, const framework::LoDTensor& var_tensor, float min_val, float max_val,
size_t num_bins) const { size_t num_bins) const {
PADDLE_ENFORCE_GT(num_bins, 0, PADDLE_ENFORCE_GT(num_bins, 0,
"MkldnnQuantizer: To calculate Histogram, num_bins (" + platform::errors::InvalidArgument(
std::to_string(num_bins) + ") must be positive."); "MkldnnQuantizer: To calculate Histogram, num_bins (" +
PADDLE_ENFORCE_GT( std::to_string(num_bins) + ") must be positive."));
var_tensor.numel(), 0, PADDLE_ENFORCE_GT(var_tensor.numel(), 0,
"MkldnnQuantizer: To calculate Histogram, the tensor must not be empty."); platform::errors::InvalidArgument(
PADDLE_ENFORCE(max_val >= min_val, "MkldnnQuantizer: To calculate Histogram, the tensor "
"MkldnnQuantizer: To calculate Histogram, max_val (" + "must not be empty."));
std::to_string(max_val) + PADDLE_ENFORCE_GE(max_val, min_val,
") must be greater or equal" platform::errors::InvalidArgument(
"to min_val (" + "MkldnnQuantizer: To calculate Histogram, max_val (" +
std::to_string(min_val) + ")."); std::to_string(max_val) + ") must be greater or equal"
"to min_val (" +
std::to_string(min_val) + ")."));
ConstEigenVectorArrayMap eigen_tensor{var_tensor.data<float>(), ConstEigenVectorArrayMap eigen_tensor{var_tensor.data<float>(),
var_tensor.numel(), 1}; var_tensor.numel(), 1};
auto bin_width = std::abs(max_val - min_val) / num_bins; auto bin_width = std::abs(max_val - min_val) / num_bins;
...@@ -407,7 +421,8 @@ void AnalysisPredictor::MkldnnQuantizer::PrepareArgument() const { ...@@ -407,7 +421,8 @@ void AnalysisPredictor::MkldnnQuantizer::PrepareArgument() const {
auto graph = std::unique_ptr<Graph>(new Graph(arg.main_program())); auto graph = std::unique_ptr<Graph>(new Graph(arg.main_program()));
arg.SetMainGraph(graph.release()); arg.SetMainGraph(graph.release());
auto* scope_ptr = arg.scope_ptr(); auto* scope_ptr = arg.scope_ptr();
PADDLE_ENFORCE(scope_ptr); PADDLE_ENFORCE_NOT_NULL(scope_ptr, platform::errors::PreconditionNotMet(
"The scope should not be nullptr."));
arg.main_graph().SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr); arg.main_graph().SetNotOwned(framework::ir::kParamScopeAttr, scope_ptr);
auto* builder = predictor_.config_.pass_builder(); auto* builder = predictor_.config_.pass_builder();
...@@ -441,7 +456,9 @@ bool AnalysisPredictor::MkldnnQuantizer::RunQuantizePasses() const { ...@@ -441,7 +456,9 @@ bool AnalysisPredictor::MkldnnQuantizer::RunQuantizePasses() const {
PrepareArgument(); PrepareArgument();
auto& arg = predictor_.argument_; auto& arg = predictor_.argument_;
Analyzer().Run(&arg); Analyzer().Run(&arg);
PADDLE_ENFORCE(arg.scope_valid()); PADDLE_ENFORCE_EQ(
arg.scope_valid(), true,
platform::errors::PreconditionNotMet("The scope should be valid."));
VLOG(5) << "to prepare executor"; VLOG(5) << "to prepare executor";
ARGUMENT_CHECK_FIELD((&arg), ir_analyzed_program); ARGUMENT_CHECK_FIELD((&arg), ir_analyzed_program);
predictor_.inference_program_.reset( predictor_.inference_program_.reset(
...@@ -456,7 +473,8 @@ bool AnalysisPredictor::MkldnnQuantizer::RunWarmup() const { ...@@ -456,7 +473,8 @@ bool AnalysisPredictor::MkldnnQuantizer::RunWarmup() const {
VLOG(3) << "Predictor: run a quantization warmup iteration"; VLOG(3) << "Predictor: run a quantization warmup iteration";
auto warmup_data = qconfig_->warmup_data(); auto warmup_data = qconfig_->warmup_data();
PADDLE_ENFORCE_NOT_NULL(warmup_data, PADDLE_ENFORCE_NOT_NULL(warmup_data,
"Warmup data cannot be NULL in the config."); platform::errors::PreconditionNotMet(
"Warmup data cannot be NULL in the config."));
PrettyLogH1("--- Running warmup iteration for quantization"); PrettyLogH1("--- Running warmup iteration for quantization");
// Run the inference program // Run the inference program
...@@ -469,7 +487,10 @@ bool AnalysisPredictor::MkldnnQuantizer::RunWarmup() const { ...@@ -469,7 +487,10 @@ bool AnalysisPredictor::MkldnnQuantizer::RunWarmup() const {
float AnalysisPredictor::MkldnnQuantizer::SafeEntropy( float AnalysisPredictor::MkldnnQuantizer::SafeEntropy(
std::vector<int> reference_distr_P, int P_sum, std::vector<int> reference_distr_P, int P_sum,
std::vector<int> candidate_distr_Q, int Q_sum) const { std::vector<int> candidate_distr_Q, int Q_sum) const {
PADDLE_ENFORCE_EQ(reference_distr_P.size(), candidate_distr_Q.size()); PADDLE_ENFORCE_EQ(reference_distr_P.size(), candidate_distr_Q.size(),
platform::errors::InvalidArgument(
"The P size %d should be equal to Q size %d",
reference_distr_P.size(), candidate_distr_Q.size()));
float tmp_sum1 = 0; float tmp_sum1 = 0;
float tmp_sum2 = 0; float tmp_sum2 = 0;
for (size_t idx = 0; idx < reference_distr_P.size(); idx++) { for (size_t idx = 0; idx < reference_distr_P.size(); idx++) {
...@@ -479,10 +500,11 @@ float AnalysisPredictor::MkldnnQuantizer::SafeEntropy( ...@@ -479,10 +500,11 @@ float AnalysisPredictor::MkldnnQuantizer::SafeEntropy(
tmp_sum1 += 0; tmp_sum1 += 0;
tmp_sum2 += 0; tmp_sum2 += 0;
} else { } else {
PADDLE_ENFORCE(q_idx != 0, "MkldnnQuantizer: Fatal error!, idx = " + PADDLE_ENFORCE_NE(
std::to_string(idx) + q_idx, 0,
" qindex = 0! p_idx = " + platform::errors::PreconditionNotMet(
std::to_string(p_idx)); "MkldnnQuantizer: Fatal error!, idx = " + std::to_string(idx) +
" qindex = 0! p_idx = " + std::to_string(p_idx)));
} }
tmp_sum1 += p_idx * (log(Q_sum * p_idx)); tmp_sum1 += p_idx * (log(Q_sum * p_idx));
tmp_sum2 += p_idx * (log(P_sum * q_idx)); tmp_sum2 += p_idx * (log(P_sum * q_idx));
......
...@@ -163,7 +163,8 @@ void TestInference(const std::string& dirname, ...@@ -163,7 +163,8 @@ void TestInference(const std::string& dirname,
// int device_id = place.GetDeviceId(); // int device_id = place.GetDeviceId();
paddle::platform::SetDeviceId(0); paddle::platform::SetDeviceId(0);
#else #else
PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); PADDLE_THROW(paddle::platform::errors::Unavailable(
"'CUDAPlace' is not supported in CPU only device."));
#endif #endif
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册