From b7a64e8698f61ddd82f6a8718e722d3309fd5aa7 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 14 Sep 2018 10:59:48 +0800 Subject: [PATCH] fix confilts --- paddle/fluid/inference/tests/api/CMakeLists.txt | 7 ++++++- paddle/fluid/inference/tests/api/analyzer_lac_tester.cc | 7 +++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index f1075ea708c..3eba3755148 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -60,7 +60,12 @@ inference_analysis_test(test_analyzer_text_classification SRCS analyzer_text_cla set(OCR_MODEL_URL "http://paddlemodels.cdn.bcebos.com/inference-vis-demos%2Focr.tar.gz") set(OCR_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/ocr") if (NOT EXISTS ${OCR_INSTALL_DIR} AND WITH_INFERENCE) - inference_download_and_uncompress(${OCR_INSTALL_DIR} ${OCR_MODEL_URL}) + get_filename_component(filename ${OCR_MODEL_URL} NAME) + message(STATUS "Download inference test stuff ${filename} from ${OCR_MODEL_URL}") + execute_process(COMMAND bash -c "mkdir -p ${OCR_INSTALL_DIR}") + execute_process(COMMAND bash -c "cd ${OCR_INSTALL_DIR} && wget -q ${OCR_MODEL_URL}") + execute_process(COMMAND bash -c "cd ${OCR_INSTALL_DIR} && tar xzf ${filename}") + message(STATUS "finish downloading ${filename}") endif() inference_analysis_test(test_analyzer_ocr SRCS analyzer_vis_tester.cc EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor diff --git a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc index 45c19af520d..bf893e32569 100644 --- a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc @@ -110,8 +110,7 @@ const int64_t lac_ref_data[] = {24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, void TestLACPrediction(const std::string &model_path, const std::string &data_file, const int batch_size, - const int repeat, bool test_all_data, - bool use_analysis = false) { + const int repeat, bool use_analysis = false) { AnalysisConfig cfg; cfg.model_dir = model_path; cfg.use_gpu = false; @@ -199,13 +198,13 @@ void TestLACPrediction(const std::string &model_path, TEST(Analyzer_LAC, native) { LOG(INFO) << "LAC with native"; TestLACPrediction(FLAGS_infer_model, FLAGS_infer_data, FLAGS_batch_size, - FLAGS_repeat, FLAGS_test_all_data); + FLAGS_repeat); } TEST(Analyzer_LAC, analysis) { LOG(INFO) << "LAC with analysis"; TestLACPrediction(FLAGS_infer_model, FLAGS_infer_data, FLAGS_batch_size, - FLAGS_repeat, FLAGS_test_all_data, true); + FLAGS_repeat, true); } } // namespace analysis -- GitLab