未验证 提交 5c61eeef 编写于 作者: J joanna.wozna.intel 提交者: GitHub

Enable all image classification models (#29155)

上级 d8eef4e4
......@@ -376,6 +376,15 @@ if(WITH_MKLDNN)
# resnet50 bfloat16
inference_analysis_api_bfloat16_test_run(test_analyzer_bfloat16_resnet50 ${BF16_IMG_CLASS_TEST_APP} ${INT8_RESNET50_MODEL_DIR} ${IMAGENET_DATA_PATH})
# googlenet bfloat16
inference_analysis_api_bfloat16_test_run(test_analyzer_bfloat16_googlenet ${BF16_IMG_CLASS_TEST_APP} ${INT8_GOOGLENET_MODEL_DIR} ${IMAGENET_DATA_PATH})
# mobilenetv1 bfloat16
inference_analysis_api_bfloat16_test_run(test_analyzer_bfloat16_mobilenetv1 ${BF16_IMG_CLASS_TEST_APP} ${INT8_MOBILENETV1_MODEL_DIR} ${IMAGENET_DATA_PATH})
# mobilenetv2 bfloat16
inference_analysis_api_bfloat16_test_run(test_analyzer_bfloat16_mobilenetv2 ${BF16_IMG_CLASS_TEST_APP} ${INT8_MOBILENETV2_MODEL_DIR} ${IMAGENET_DATA_PATH})
### Object detection models
set(PASCALVOC_DATA_PATH "${INT8_DATA_DIR}/pascalvoc_val_head_300.bin")
set(INT8_OBJ_DETECT_TEST_APP "test_analyzer_int8_object_detection")
......
......@@ -28,20 +28,18 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->EnableMKLDNN();
}
TEST(Analyzer_int8_image_classification, bfloat16) {
TEST(Analyzer_bfloat16_image_classification, bfloat16) {
AnalysisConfig cfg;
SetConfig(&cfg);
AnalysisConfig q_cfg;
SetConfig(&q_cfg);
AnalysisConfig b_cfg;
SetConfig(&b_cfg);
// read data from file and prepare batches with test data
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInputs(&input_slots_all);
q_cfg.SwitchIrDebug();
q_cfg.EnableMkldnnBfloat16();
q_cfg.SetBfloat16Op({"conv2d"});
CompareBFloat16AndAnalysis(&cfg, &q_cfg, input_slots_all);
b_cfg.EnableMkldnnBfloat16();
CompareBFloat16AndAnalysis(&cfg, &b_cfg, input_slots_all);
}
} // namespace analysis
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册