未验证 提交 f8e370ac 编写于 作者: W Wilber 提交者: GitHub

[Inference] [unittest] Inference unit tests rely on dynamic libraries (#24743)

上级 8a9f06e6
......@@ -380,8 +380,7 @@ function(cc_test_run TARGET_NAME)
set(multiValueArgs COMMAND ARGS)
cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
add_test(NAME ${TARGET_NAME}
COMMAND ${cc_test_COMMAND}
ARGS ${cc_test_ARGS}
COMMAND ${cc_test_COMMAND} ${cc_test_ARGS}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cpu_deterministic=true)
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true)
......
......@@ -63,10 +63,6 @@ if(WITH_TESTING AND WITH_INFERENCE_API_TEST)
add_subdirectory(tests/api)
endif()
if(NOT ON_INFER)
return()
endif()
set(SHARED_INFERENCE_SRCS
io.cc
${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed.cc
......
......@@ -49,10 +49,21 @@ cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src}
cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api)
if(WITH_TESTING)
inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS ${inference_deps}
ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book)
if (NOT APPLE AND NOT WIN32)
inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS paddle_fluid_shared
ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book)
else()
inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS ${inference_deps}
ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book)
endif()
set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification)
set_tests_properties(test_api_impl PROPERTIES LABELS "RUN_TYPE=DIST")
endif()
cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor benchmark ${inference_deps}
ARGS --dirname=${WORD2VEC_MODEL_DIR})
if (NOT APPLE AND NOT WIN32)
cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS paddle_fluid_shared
ARGS --dirname=${WORD2VEC_MODEL_DIR})
else()
cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor benchmark ${inference_deps}
ARGS --dirname=${WORD2VEC_MODEL_DIR})
endif()
set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor benchmark)
if (NOT APPLE AND NOT WIN32)
set(INFERENCE_EXTRA_DEPS paddle_fluid_shared)
else()
set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor benchmark)
endif()
if(WITH_GPU AND TENSORRT_FOUND)
set(INFERENCE_EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} analysis ${analysis_deps} ir_pass_manager analysis_predictor)
set(INFERENCE_EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} analysis ${analysis_deps})
endif()
function(download_data install_dir data_file)
......@@ -33,13 +37,13 @@ endfunction()
function(inference_analysis_api_test target install_dir filename)
inference_analysis_test(${target} SRCS ${filename}
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${install_dir}/model --infer_data=${install_dir}/data.txt --refer_result=${install_dir}/result.txt)
endfunction()
function(inference_analysis_api_test_build TARGET_NAME filename)
inference_analysis_test_build(${TARGET_NAME} SRCS ${filename}
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark)
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS})
endfunction()
function(inference_analysis_api_int8_test_run TARGET_NAME test_binary model_dir data_path)
......@@ -49,7 +53,7 @@ function(inference_analysis_api_int8_test_run TARGET_NAME test_binary model_dir
--infer_data=${data_path}
--warmup_batch_size=${WARMUP_BATCH_SIZE}
--batch_size=50
--paddle_num_threads=${CPU_NUM_THREADS_ON_CI}
--cpu_num_threads=${CPU_NUM_THREADS_ON_CI}
--iterations=2)
endfunction()
......@@ -65,7 +69,7 @@ function(inference_analysis_api_object_dection_int8_test_run TARGET_NAME test_bi
--infer_data=${data_path}
--warmup_batch_size=10
--batch_size=300
--paddle_num_threads=${CPU_NUM_THREADS_ON_CI}
--cpu_num_threads=${CPU_NUM_THREADS_ON_CI}
--iterations=1)
endfunction()
......@@ -88,7 +92,7 @@ function(inference_analysis_api_qat_test_run TARGET_NAME test_binary fp32_model_
--int8_model=${int8_model_dir}
--infer_data=${data_path}
--batch_size=50
--paddle_num_threads=${CPU_NUM_THREADS_ON_CI}
--cpu_num_threads=${CPU_NUM_THREADS_ON_CI}
--with_accuracy_layer=false
--iterations=2)
endfunction()
......@@ -167,7 +171,7 @@ set(ERNIE_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/Ernie_Large")
download_model_and_data(${ERNIE_INSTALL_DIR} "Ernie_large_model.tar.gz" "Ernie_large_data.txt.tar.gz" "Ernie_large_result.txt.tar.gz")
download_result(${ERNIE_INSTALL_DIR} "Ernie_large_result.txt.tar.gz")
inference_analysis_test(test_analyzer_ernie_large SRCS analyzer_ernie_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${ERNIE_INSTALL_DIR}/model --infer_data=${ERNIE_INSTALL_DIR}/data.txt --refer_result=${ERNIE_INSTALL_DIR}/result.txt --ernie_large=true)
# text_classification
......@@ -186,7 +190,7 @@ download_model_and_data(${TRANSFORMER_INSTALL_DIR} "temp%2Ftransformer_model.tar
inference_analysis_test(test_analyzer_transformer SRCS analyzer_transformer_tester.cc
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
ARGS --infer_model=${TRANSFORMER_INSTALL_DIR}/model --infer_data=${TRANSFORMER_INSTALL_DIR}/data.txt --batch_size=8
--paddle_num_threads=${CPU_NUM_THREADS_ON_CI})
--cpu_num_threads=${CPU_NUM_THREADS_ON_CI})
# ocr
set(OCR_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/ocr")
......
......@@ -53,7 +53,7 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->DisableGpu();
cfg->SwitchIrDebug();
cfg->SwitchSpecifyInputNames(false);
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
}
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs,
......
......@@ -143,7 +143,7 @@ void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false,
}
cfg->SwitchSpecifyInputNames();
cfg->SwitchIrOptim();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
}
void profile(bool use_mkldnn = false, bool use_gpu = false) {
......
......@@ -27,7 +27,7 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->DisableGpu();
cfg->SwitchIrOptim();
cfg->SwitchSpecifyInputNames();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
}
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
......@@ -40,7 +40,7 @@ void SetOptimConfig(AnalysisConfig *cfg) {
cfg->DisableGpu();
cfg->SwitchIrOptim();
cfg->SwitchSpecifyInputNames();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
}
// Easy for profiling independently.
......
......@@ -26,7 +26,7 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->DisableGpu();
cfg->SwitchIrOptim();
cfg->SwitchSpecifyInputNames();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
cfg->EnableMKLDNN();
}
......
......@@ -27,7 +27,7 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->DisableGpu();
cfg->SwitchIrOptim(true);
cfg->SwitchSpecifyInputNames(false);
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
cfg->EnableMKLDNN();
}
......
......@@ -107,7 +107,7 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->DisableGpu();
cfg->SwitchSpecifyInputNames();
cfg->SwitchIrOptim();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
if (FLAGS_zero_copy) {
cfg->SwitchUseFeedFetchOps(false);
}
......
......@@ -26,7 +26,7 @@ void SetConfig(AnalysisConfig *cfg, std::string model_path) {
cfg->DisableGpu();
cfg->SwitchIrOptim(false);
cfg->SwitchSpecifyInputNames();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
cfg->EnableMKLDNN();
}
......
......@@ -143,7 +143,7 @@ void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false) {
cfg->DisableGpu();
cfg->SwitchSpecifyInputNames();
cfg->SwitchIrDebug();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
if (FLAGS_zero_copy) {
cfg->SwitchUseFeedFetchOps(false);
}
......
......@@ -165,7 +165,7 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->DisableGpu();
cfg->SwitchSpecifyInputNames();
cfg->SwitchIrOptim();
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
}
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
......
......@@ -66,8 +66,8 @@ DEFINE_bool(warmup, false,
"Use warmup to calculate elapsed_time more accurately. "
"To reduce CI time, it sets false in default.");
DECLARE_bool(profile);
DECLARE_int32(paddle_num_threads);
DEFINE_bool(enable_profile, false, "Turn on profiler for fluid");
DEFINE_int32(cpu_num_threads, 1, "Number of threads for each paddle instance.");
namespace paddle {
namespace inference {
......@@ -373,7 +373,7 @@ void PredictionWarmUp(PaddlePredictor *predictor,
predictor->ZeroCopyRun();
}
PrintTime(batch_size, 1, num_threads, tid, warmup_timer.toc(), 1, data_type);
if (FLAGS_profile) {
if (FLAGS_enable_profile) {
paddle::platform::ResetProfiler();
}
}
......
......@@ -38,6 +38,16 @@ DEFINE_int32(multiple_of_cupti_buffer_size, 1,
"Multiple of the CUPTI device buffer size. If the timestamps have "
"been dropped when you are profiling, try increasing this value.");
namespace paddle {
namespace platform {
void ParseCommandLineFlags(int argc, char **argv, bool remove) {
google::ParseCommandLineFlags(&argc, &argv, remove);
}
} // namespace platform
} // namespace paddle
namespace paddle {
namespace framework {
......
......@@ -19,6 +19,14 @@ limitations under the License. */
#include "gflags/gflags.h"
#include "glog/logging.h"
namespace paddle {
namespace platform {
void ParseCommandLineFlags(int argc, char **argv, bool remove);
} // namespace platform
} // namespace paddle
namespace paddle {
namespace framework {
......
......@@ -23,10 +23,41 @@ limitations under the License. */
int main(int argc, char** argv) {
paddle::memory::allocation::UseAllocatorStrategyGFlag();
testing::InitGoogleTest(&argc, argv);
std::vector<char*> new_argv;
std::string gflags_env;
// Because the dynamic library libpaddle_fluid.so clips the symbol table, the
// external program cannot recognize the flag inside the so, and the flag
// defined by the external program cannot be accessed inside the so.
// Therefore, the ParseCommandLine function needs to be called separately
// inside and outside.
std::vector<char*> external_argv;
std::vector<char*> internal_argv;
// ParseNewCommandLineFlags in gflags.cc starts processing
// commandline strings from idx 1.
// The reason is, it assumes that the first one (idx 0) is
// the filename of executable file.
external_argv.push_back(argv[0]);
internal_argv.push_back(argv[0]);
std::vector<google::CommandLineFlagInfo> all_flags;
std::vector<std::string> external_flags_name;
google::GetAllFlags(&all_flags);
for (size_t i = 0; i < all_flags.size(); ++i) {
external_flags_name.push_back(all_flags[i].name);
}
for (int i = 0; i < argc; ++i) {
new_argv.push_back(argv[i]);
bool flag = true;
std::string tmp(argv[i]);
for (size_t j = 0; j < external_flags_name.size(); ++j) {
if (tmp.find(external_flags_name[j]) != std::string::npos) {
external_argv.push_back(argv[i]);
flag = false;
break;
}
}
if (flag) {
internal_argv.push_back(argv[i]);
}
}
std::vector<std::string> envs;
......@@ -70,7 +101,7 @@ int main(int argc, char** argv) {
}
env_string = env_string.substr(0, env_string.length() - 1);
env_str = strdup(env_string.c_str());
new_argv.push_back(env_str);
internal_argv.push_back(env_str);
VLOG(1) << "gtest env_string:" << env_string;
}
......@@ -82,13 +113,17 @@ int main(int argc, char** argv) {
}
undefok_string = undefok_string.substr(0, undefok_string.length() - 1);
undefok_str = strdup(undefok_string.c_str());
new_argv.push_back(undefok_str);
internal_argv.push_back(undefok_str);
VLOG(1) << "gtest undefok_string:" << undefok_string;
}
int new_argc = static_cast<int>(new_argv.size());
char** new_argv_address = new_argv.data();
google::ParseCommandLineFlags(&new_argc, &new_argv_address, false);
int new_argc = static_cast<int>(external_argv.size());
char** external_argv_address = external_argv.data();
google::ParseCommandLineFlags(&new_argc, &external_argv_address, false);
int internal_argc = internal_argv.size();
char** arr = internal_argv.data();
paddle::platform::ParseCommandLineFlags(internal_argc, arr, true);
paddle::framework::InitDevices(true);
int ret = RUN_ALL_TESTS();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册