提交 b8196843 编写于 作者: T Tao Luo

add compare_mkldnn test

test=develop
上级 e47f4186
......@@ -20,16 +20,14 @@ namespace paddle {
namespace inference {
namespace analysis {
void SetConfig(AnalysisConfig *cfg) {
void SetConfig(AnalysisConfig *cfg, bool _use_mkldnn = FLAGS_use_MKLDNN) {
cfg->param_file = FLAGS_infer_model + "/params";
cfg->prog_file = FLAGS_infer_model + "/model";
cfg->use_gpu = false;
cfg->device = 0;
cfg->enable_ir_optim = true;
cfg->specify_input_name = true;
#ifdef PADDLE_WITH_MKLDNN
cfg->_use_mkldnn = FLAGS_use_MKLDNN;
#endif
cfg->_use_mkldnn = _use_mkldnn;
}
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
......@@ -92,17 +90,19 @@ TEST(Analyzer_resnet50, compare) {
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all);
}
// Compare result of NativeConfig and AnalysisConfig with MKLDNN
#ifdef PADDLE_WITH_MKLDNN
// since default config._use_mkldnn=true in this case,
// we should compare analysis_outputs in config._use_mkldnn=false
// with native_outputs as well.
FLAGS_use_MKLDNN = false;
AnalysisConfig cfg1;
SetConfig(&cfg1);
CompareNativeAndAnalysis(cfg1, input_slots_all);
FLAGS_use_MKLDNN = true;
#endif
TEST(Analyzer_resnet50, compare_mkldnn) {
AnalysisConfig cfg;
SetConfig(&cfg, true);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all);
}
#endif
} // namespace analysis
} // namespace inference
......
......@@ -50,7 +50,7 @@ Record ProcessALine(const std::string &line) {
return record;
}
void SetConfig(AnalysisConfig *cfg) {
void SetConfig(AnalysisConfig *cfg, bool _use_mkldnn = FLAGS_use_MKLDNN) {
cfg->param_file = FLAGS_infer_model + "/__params__";
cfg->prog_file = FLAGS_infer_model + "/__model__";
cfg->use_gpu = false;
......@@ -59,9 +59,7 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->specify_input_name = true;
// TODO(TJ): fix fusion gru
cfg->ir_passes.push_back("fc_gru_fuse_pass");
#ifdef PADDLE_WITH_MKLDNN
cfg->_use_mkldnn = FLAGS_use_MKLDNN;
#endif
cfg->_use_mkldnn = _use_mkldnn;
}
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
......@@ -125,17 +123,19 @@ TEST(Analyzer_vis, compare) {
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all);
}
// Compare result of NativeConfig and AnalysisConfig with MKLDNN
#ifdef PADDLE_WITH_MKLDNN
// since default config._use_mkldnn=true in this case,
// we should compare analysis_outputs in config._use_mkldnn=false
// with native_outputs as well.
FLAGS_use_MKLDNN = false;
AnalysisConfig cfg1;
SetConfig(&cfg1);
CompareNativeAndAnalysis(cfg1, input_slots_all);
FLAGS_use_MKLDNN = true;
#endif
TEST(Analyzer_vis, compare_mkldnn) {
AnalysisConfig cfg;
SetConfig(&cfg, true);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(cfg, input_slots_all);
}
#endif
} // namespace analysis
} // namespace inference
......
......@@ -35,7 +35,7 @@ DEFINE_bool(test_all_data, false, "Test the all dataset in data file.");
DEFINE_int32(num_threads, 1, "Running the inference program in multi-threads.");
DEFINE_bool(use_analysis, true,
"Running the inference program in analysis mode.");
DEFINE_bool(use_MKLDNN, true,
DEFINE_bool(use_MKLDNN, false,
"Running the inference program with mkldnn library.");
namespace paddle {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册