提交 e5b4643a 编写于 作者: T Tao Luo

add profile_mkldnn test

test=develop
上级 7d680be5
...@@ -20,14 +20,13 @@ namespace paddle { ...@@ -20,14 +20,13 @@ namespace paddle {
namespace inference { namespace inference {
namespace analysis { namespace analysis {
void SetConfig(AnalysisConfig *cfg, bool _use_mkldnn = FLAGS_use_MKLDNN) { void SetConfig(AnalysisConfig *cfg) {
cfg->param_file = FLAGS_infer_model + "/params"; cfg->param_file = FLAGS_infer_model + "/params";
cfg->prog_file = FLAGS_infer_model + "/model"; cfg->prog_file = FLAGS_infer_model + "/model";
cfg->use_gpu = false; cfg->use_gpu = false;
cfg->device = 0; cfg->device = 0;
cfg->enable_ir_optim = true; cfg->enable_ir_optim = true;
cfg->specify_input_name = true; cfg->specify_input_name = true;
cfg->_use_mkldnn = _use_mkldnn;
} }
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
...@@ -53,9 +52,10 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { ...@@ -53,9 +52,10 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
} }
// Easy for profiling independently. // Easy for profiling independently.
TEST(Analyzer_resnet50, profile) { void profile(bool use_mkldnn = false) {
AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
cfg._use_mkldnn = use_mkldnn;
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
...@@ -70,6 +70,11 @@ TEST(Analyzer_resnet50, profile) { ...@@ -70,6 +70,11 @@ TEST(Analyzer_resnet50, profile) {
} }
} }
TEST(Analyzer_resnet50, profile) { profile(); }
#ifndef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
// Check the fuse status // Check the fuse status
TEST(Analyzer_resnet50, fuse_statis) { TEST(Analyzer_resnet50, fuse_statis) {
AnalysisConfig cfg; AnalysisConfig cfg;
...@@ -83,25 +88,19 @@ TEST(Analyzer_resnet50, fuse_statis) { ...@@ -83,25 +88,19 @@ TEST(Analyzer_resnet50, fuse_statis) {
} }
// Compare result of NativeConfig and AnalysisConfig // Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_resnet50, compare) { void compare(bool use_mkldnn = false) {
AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
cfg._use_mkldnn = use_mkldnn;
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all); SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all); CompareNativeAndAnalysis(cfg, input_slots_all);
} }
// Compare result of NativeConfig and AnalysisConfig with MKLDNN TEST(Analyzer_resnet50, compare) { compare(); }
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, compare_mkldnn) { TEST(Analyzer_resnet50, compare_mkldnn) { compare(true /* use_mkldnn */); }
AnalysisConfig cfg;
SetConfig(&cfg, true);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all);
}
#endif #endif
} // namespace analysis } // namespace analysis
......
...@@ -50,7 +50,7 @@ Record ProcessALine(const std::string &line) { ...@@ -50,7 +50,7 @@ Record ProcessALine(const std::string &line) {
return record; return record;
} }
void SetConfig(AnalysisConfig *cfg, bool _use_mkldnn = FLAGS_use_MKLDNN) { void SetConfig(AnalysisConfig *cfg) {
cfg->param_file = FLAGS_infer_model + "/__params__"; cfg->param_file = FLAGS_infer_model + "/__params__";
cfg->prog_file = FLAGS_infer_model + "/__model__"; cfg->prog_file = FLAGS_infer_model + "/__model__";
cfg->use_gpu = false; cfg->use_gpu = false;
...@@ -59,7 +59,6 @@ void SetConfig(AnalysisConfig *cfg, bool _use_mkldnn = FLAGS_use_MKLDNN) { ...@@ -59,7 +59,6 @@ void SetConfig(AnalysisConfig *cfg, bool _use_mkldnn = FLAGS_use_MKLDNN) {
cfg->specify_input_name = true; cfg->specify_input_name = true;
// TODO(TJ): fix fusion gru // TODO(TJ): fix fusion gru
cfg->ir_passes.push_back("fc_gru_fuse_pass"); cfg->ir_passes.push_back("fc_gru_fuse_pass");
cfg->_use_mkldnn = _use_mkldnn;
} }
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
...@@ -82,9 +81,10 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) { ...@@ -82,9 +81,10 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently. // Easy for profiling independently.
// ocr, mobilenet and se_resnext50 // ocr, mobilenet and se_resnext50
TEST(Analyzer_vis, profile) { void profile(bool use_mkldnn = false) {
AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
cfg._use_mkldnn = use_mkldnn;
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
...@@ -106,6 +106,12 @@ TEST(Analyzer_vis, profile) { ...@@ -106,6 +106,12 @@ TEST(Analyzer_vis, profile) {
} }
} }
TEST(Analyzer_vis, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_vis, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
// Check the fuse status // Check the fuse status
TEST(Analyzer_vis, fuse_statis) { TEST(Analyzer_vis, fuse_statis) {
AnalysisConfig cfg; AnalysisConfig cfg;
...@@ -116,25 +122,19 @@ TEST(Analyzer_vis, fuse_statis) { ...@@ -116,25 +122,19 @@ TEST(Analyzer_vis, fuse_statis) {
} }
// Compare result of NativeConfig and AnalysisConfig // Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_vis, compare) { void compare(bool use_mkldnn = false) {
AnalysisConfig cfg; AnalysisConfig cfg;
SetConfig(&cfg); SetConfig(&cfg);
cfg._use_mkldnn = use_mkldnn;
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all); SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all); CompareNativeAndAnalysis(cfg, input_slots_all);
} }
// Compare result of NativeConfig and AnalysisConfig with MKLDNN TEST(Analyzer_vis, compare) { compare(); }
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_vis, compare_mkldnn) { TEST(Analyzer_vis, compare_mkldnn) { compare(true /* use_mkldnn */); }
AnalysisConfig cfg;
SetConfig(&cfg, true);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all);
}
#endif #endif
} // namespace analysis } // namespace analysis
......
...@@ -35,8 +35,6 @@ DEFINE_bool(test_all_data, false, "Test the all dataset in data file."); ...@@ -35,8 +35,6 @@ DEFINE_bool(test_all_data, false, "Test the all dataset in data file.");
DEFINE_int32(num_threads, 1, "Running the inference program in multi-threads."); DEFINE_int32(num_threads, 1, "Running the inference program in multi-threads.");
DEFINE_bool(use_analysis, true, DEFINE_bool(use_analysis, true,
"Running the inference program in analysis mode."); "Running the inference program in analysis mode.");
DEFINE_bool(use_MKLDNN, false,
"Running the inference program with mkldnn library.");
namespace paddle { namespace paddle {
namespace inference { namespace inference {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册