diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 2c65554e48ba946faec84eb826fbf93b315f519f..fe834604bec8ef9d28f1778a3362a613c7cb2be0 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -382,8 +382,7 @@ function(cc_test_run TARGET_NAME) set(multiValueArgs COMMAND ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND ${cc_test_COMMAND} - ARGS ${cc_test_ARGS} + COMMAND ${cc_test_COMMAND} ${cc_test_ARGS} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cpu_deterministic=true) set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true) diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 88723e24184cce240d4bccd1b4c15a1ea8f14ed6..e0244a876f2cfc6b77db105d94d3262bed4f4014 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -63,10 +63,6 @@ if(WITH_TESTING) endif() endif() -if(NOT ON_INFER) - return() -endif() - set(SHARED_INFERENCE_SRCS io.cc ${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed.cc diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index d375a4443e8c8b71830093b9ab1941e749c99d5b..3433e3d759aef9c1bae643697d727f74aa571f58 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -45,10 +45,21 @@ cc_library(analysis_predictor SRCS analysis_predictor.cc ${mkldnn_quantizer_src} cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api) if(WITH_TESTING) - inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS ${inference_deps} - ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book) + if (NOT APPLE AND NOT WIN32) + inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS paddle_fluid_shared + ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book) + else() + inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS ${inference_deps} + ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book) + endif() set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification) set_tests_properties(test_api_impl PROPERTIES LABELS "RUN_TYPE=DIST") endif() -cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor benchmark ${inference_deps} - ARGS --dirname=${WORD2VEC_MODEL_DIR}) + +if (NOT APPLE AND NOT WIN32) + cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS paddle_fluid_shared + ARGS --dirname=${WORD2VEC_MODEL_DIR}) +else() + cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor benchmark ${inference_deps} + ARGS --dirname=${WORD2VEC_MODEL_DIR}) +endif() diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 8274ca86dc4225946848778cbac689674b789843..d23ef93b922a048722e802543cf2b24324acaaaf 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -1,7 +1,11 @@ -set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor benchmark) +if (NOT APPLE AND NOT WIN32) + set(INFERENCE_EXTRA_DEPS paddle_fluid_shared) +else() + set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor benchmark) +endif() if(WITH_GPU AND TENSORRT_FOUND) - set(INFERENCE_EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} analysis ${analysis_deps} ir_pass_manager analysis_predictor) + set(INFERENCE_EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} analysis ${analysis_deps}) endif() function(download_data install_dir data_file) @@ -33,13 +37,13 @@ endfunction() function(inference_analysis_api_test target install_dir filename) inference_analysis_test(${target} SRCS ${filename} - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${install_dir}/model --infer_data=${install_dir}/data.txt --refer_result=${install_dir}/result.txt) endfunction() function(inference_analysis_api_test_build TARGET_NAME filename) inference_analysis_test_build(${TARGET_NAME} SRCS ${filename} - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark) + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}) endfunction() function(inference_analysis_api_int8_test_run TARGET_NAME test_binary model_dir data_path) @@ -49,7 +53,7 @@ function(inference_analysis_api_int8_test_run TARGET_NAME test_binary model_dir --infer_data=${data_path} --warmup_batch_size=${WARMUP_BATCH_SIZE} --batch_size=50 - --paddle_num_threads=${CPU_NUM_THREADS_ON_CI} + --cpu_num_threads=${CPU_NUM_THREADS_ON_CI} --iterations=2) endfunction() @@ -65,7 +69,7 @@ function(inference_analysis_api_object_dection_int8_test_run TARGET_NAME test_bi --infer_data=${data_path} --warmup_batch_size=10 --batch_size=300 - --paddle_num_threads=${CPU_NUM_THREADS_ON_CI} + --cpu_num_threads=${CPU_NUM_THREADS_ON_CI} --iterations=1) endfunction() @@ -88,7 +92,7 @@ function(inference_analysis_api_qat_test_run TARGET_NAME test_binary fp32_model_ --int8_model=${int8_model_dir} --infer_data=${data_path} --batch_size=50 - --paddle_num_threads=${CPU_NUM_THREADS_ON_CI} + --cpu_num_threads=${CPU_NUM_THREADS_ON_CI} --with_accuracy_layer=false --iterations=2) endfunction() @@ -167,7 +171,7 @@ set(ERNIE_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/Ernie_Large") download_model_and_data(${ERNIE_INSTALL_DIR} "Ernie_large_model.tar.gz" "Ernie_large_data.txt.tar.gz" "Ernie_large_result.txt.tar.gz") download_result(${ERNIE_INSTALL_DIR} "Ernie_large_result.txt.tar.gz") inference_analysis_test(test_analyzer_ernie_large SRCS analyzer_ernie_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${ERNIE_INSTALL_DIR}/model --infer_data=${ERNIE_INSTALL_DIR}/data.txt --refer_result=${ERNIE_INSTALL_DIR}/result.txt --ernie_large=true) # text_classification @@ -186,7 +190,7 @@ download_model_and_data(${TRANSFORMER_INSTALL_DIR} "temp%2Ftransformer_model.tar inference_analysis_test(test_analyzer_transformer SRCS analyzer_transformer_tester.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${TRANSFORMER_INSTALL_DIR}/model --infer_data=${TRANSFORMER_INSTALL_DIR}/data.txt --batch_size=8 - --paddle_num_threads=${CPU_NUM_THREADS_ON_CI}) + --cpu_num_threads=${CPU_NUM_THREADS_ON_CI}) # ocr set(OCR_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/ocr") diff --git a/paddle/fluid/inference/tests/api/analyzer_detect_tester.cc b/paddle/fluid/inference/tests/api/analyzer_detect_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..5333f0052d74250204daa09117602bf8bfe2aae1 --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_detect_tester.cc @@ -0,0 +1,129 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +DEFINE_string(infer_shape, "", "data shape file"); +DEFINE_int32(sample, 20, "number of sample"); + +namespace paddle { +namespace inference { +namespace analysis { + +struct Record { + std::vector data; + std::vector shape; +}; + +Record ProcessALine(const std::string &line, const std::string &shape_line) { + VLOG(3) << "process a line"; + std::vector columns; + + Record record; + std::vector data_strs; + split(line, ' ', &data_strs); + for (auto &d : data_strs) { + record.data.push_back(std::stof(d)); + } + + std::vector shape_strs; + split(shape_line, ' ', &shape_strs); + for (auto &s : shape_strs) { + record.shape.push_back(std::stoi(s)); + } + return record; +} + +void SetConfig(AnalysisConfig *cfg) { + cfg->SetModel(FLAGS_infer_model + "/model", FLAGS_infer_model + "/params"); + cfg->DisableGpu(); + cfg->SwitchIrDebug(); + cfg->SwitchSpecifyInputNames(false); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); +} + +void SetInput(std::vector> *inputs, + const std::string &line, const std::string &shape_line) { + auto record = ProcessALine(line, shape_line); + + PaddleTensor input; + input.shape = record.shape; + input.dtype = PaddleDType::FLOAT32; + size_t input_size = record.data.size() * sizeof(float); + input.data.Resize(input_size); + memcpy(input.data.data(), record.data.data(), input_size); + std::vector input_slots; + input_slots.assign({input}); + (*inputs).emplace_back(input_slots); +} + +void profile(int cache_capacity = 1) { + AnalysisConfig cfg; + SetConfig(&cfg); + cfg.EnableMKLDNN(); + cfg.SetMkldnnCacheCapacity(cache_capacity); + + std::vector> outputs; + std::vector> input_slots_all; + + Timer run_timer; + double elapsed_time = 0; + + int num_times = FLAGS_repeat; + int sample = FLAGS_sample; + auto predictor = CreatePaddlePredictor(cfg); + outputs.resize(sample); + + std::vector threads; + + std::ifstream file(FLAGS_infer_data); + std::ifstream infer_file(FLAGS_infer_shape); + std::string line; + std::string shape_line; + + for (int i = 0; i < sample; i++) { + threads.emplace_back([&, i]() { + std::getline(file, line); + std::getline(infer_file, shape_line); + SetInput(&input_slots_all, line, shape_line); + + run_timer.tic(); + predictor->Run(input_slots_all[0], &outputs[0], FLAGS_batch_size); + elapsed_time += run_timer.toc(); + }); + threads[0].join(); + threads.clear(); + std::vector>().swap(input_slots_all); + } + file.close(); + infer_file.close(); + + auto batch_latency = elapsed_time / (sample * num_times); + PrintTime(FLAGS_batch_size, num_times, FLAGS_num_threads, 0, batch_latency, + sample, VarType::FP32); +} + +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_detect, profile_mkldnn) { + profile(5 /* cache_capacity */); + profile(10 /* cache_capacity */); +} +#endif + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/analyzer_ernie_tester.cc b/paddle/fluid/inference/tests/api/analyzer_ernie_tester.cc index 199eee02d75c1ec0998fd2b657bd567d01d17435..87c8d783160218db2f791816ffbc5a161a66b6a1 100644 --- a/paddle/fluid/inference/tests/api/analyzer_ernie_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_ernie_tester.cc @@ -143,7 +143,7 @@ void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false, } cfg->SwitchSpecifyInputNames(); cfg->SwitchIrOptim(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); } void profile(bool use_mkldnn = false, bool use_gpu = false) { diff --git a/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc index 07934f96dc55ef1e80b54472c74975ff62b6add3..1faffacebcfdb173b96815a6ad223f06ea69c07f 100644 --- a/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_image_classification_tester.cc @@ -27,7 +27,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->DisableGpu(); cfg->SwitchIrOptim(); cfg->SwitchSpecifyInputNames(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); } void SetInput(std::vector> *inputs) { @@ -40,7 +40,7 @@ void SetOptimConfig(AnalysisConfig *cfg) { cfg->DisableGpu(); cfg->SwitchIrOptim(); cfg->SwitchSpecifyInputNames(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); } // Easy for profiling independently. diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc index eb2c7935026a2705adb5fd9e21d639536fdf0fa7..5f2c879fe0a0c755d192a6be34ac6a1173412b06 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc @@ -26,7 +26,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->DisableGpu(); cfg->SwitchIrOptim(); cfg->SwitchSpecifyInputNames(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); cfg->EnableMKLDNN(); } diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc index 8dcc611bb64557dd49fc3b7599aad5886ce4bfb8..7f06a3b9023ba3e907c9731d576f014a3e451113 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc @@ -27,7 +27,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->DisableGpu(); cfg->SwitchIrOptim(true); cfg->SwitchSpecifyInputNames(false); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); cfg->EnableMKLDNN(); } diff --git a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc index 11a49ed2914ae22c2ddb4cfe384900adfce4f21d..06a8e01b10c6eb70fe2cbac19725d96281863c29 100644 --- a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc @@ -107,7 +107,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); cfg->SwitchIrOptim(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); if (FLAGS_zero_copy) { cfg->SwitchUseFeedFetchOps(false); } diff --git a/paddle/fluid/inference/tests/api/analyzer_qat_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_qat_image_classification_tester.cc index fd3210c3384e3f3acbdcf22b6afdacfacc68c9ba..7b2b1c31cc5a7ee84fefc5abc37c342155151d94 100644 --- a/paddle/fluid/inference/tests/api/analyzer_qat_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_qat_image_classification_tester.cc @@ -26,7 +26,7 @@ void SetConfig(AnalysisConfig *cfg, std::string model_path) { cfg->DisableGpu(); cfg->SwitchIrOptim(false); cfg->SwitchSpecifyInputNames(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); cfg->EnableMKLDNN(); } diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc index b88607717036515bbb6b836e89cbf8d7bac9bc51..56f706ae56bda8b06eba5dd9e080552aa9785c6e 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc @@ -143,7 +143,7 @@ void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false) { cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); cfg->SwitchIrDebug(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); if (FLAGS_zero_copy) { cfg->SwitchUseFeedFetchOps(false); } diff --git a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc index f2195966add8c4c159d26682c9578c95301a345f..9726109bf89ac0d5e1048f6cae0483248696f3e2 100644 --- a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc @@ -165,7 +165,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); cfg->SwitchIrOptim(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); } void SetInput(std::vector> *inputs) { diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index c8e5c826a1333f6443c8bbad2ae00d890b823c0a..1aaac6b09d820dece6118dde5655297a938287bc 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -66,8 +66,8 @@ DEFINE_bool(warmup, false, "Use warmup to calculate elapsed_time more accurately. " "To reduce CI time, it sets false in default."); -DECLARE_bool(profile); -DECLARE_int32(paddle_num_threads); +DEFINE_bool(enable_profile, false, "Turn on profiler for fluid"); +DEFINE_int32(cpu_num_threads, 1, "Number of threads for each paddle instance."); namespace paddle { namespace inference { @@ -355,7 +355,7 @@ void PredictionWarmUp(PaddlePredictor *predictor, predictor->ZeroCopyRun(); } PrintTime(batch_size, 1, num_threads, tid, warmup_timer.toc(), 1, data_type); - if (FLAGS_profile) { + if (FLAGS_enable_profile) { paddle::platform::ResetProfiler(); } } diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index c6c84f8b9f0e56165ea07a75c4cf22c6dad3fcd4..3ce3f6d1e1eda58b78d58ece6f0cbd4cbd30fde9 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -38,6 +38,16 @@ DEFINE_int32(multiple_of_cupti_buffer_size, 1, "Multiple of the CUPTI device buffer size. If the timestamps have " "been dropped when you are profiling, try increasing this value."); +namespace paddle { +namespace platform { + +void ParseCommandLineFlags(int argc, char **argv, bool remove) { + google::ParseCommandLineFlags(&argc, &argv, remove); +} + +} // namespace platform +} // namespace paddle + namespace paddle { namespace framework { diff --git a/paddle/fluid/platform/init.h b/paddle/fluid/platform/init.h index d189f0022bf6360bd5a55116c786e5375836e645..e087bb3f37c802ba654fd1e2706bf7258a8b4829 100644 --- a/paddle/fluid/platform/init.h +++ b/paddle/fluid/platform/init.h @@ -19,6 +19,14 @@ limitations under the License. */ #include "gflags/gflags.h" #include "glog/logging.h" +namespace paddle { +namespace platform { + +void ParseCommandLineFlags(int argc, char **argv, bool remove); + +} // namespace platform +} // namespace paddle + namespace paddle { namespace framework { diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index d5acff56a9aa9136b84e216f6f8b0f28b528dbc5..c19bd56fbbf4de343abea198674eaf8b545321f5 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -23,10 +23,41 @@ limitations under the License. */ int main(int argc, char** argv) { paddle::memory::allocation::UseAllocatorStrategyGFlag(); testing::InitGoogleTest(&argc, argv); - std::vector new_argv; - std::string gflags_env; + // Because the dynamic library libpaddle_fluid.so clips the symbol table, the + // external program cannot recognize the flag inside the so, and the flag + // defined by the external program cannot be accessed inside the so. + // Therefore, the ParseCommandLine function needs to be called separately + // inside and outside. + std::vector external_argv; + std::vector internal_argv; + + // ParseNewCommandLineFlags in gflags.cc starts processing + // commandline strings from idx 1. + // The reason is, it assumes that the first one (idx 0) is + // the filename of executable file. + external_argv.push_back(argv[0]); + internal_argv.push_back(argv[0]); + + std::vector all_flags; + std::vector external_flags_name; + google::GetAllFlags(&all_flags); + for (size_t i = 0; i < all_flags.size(); ++i) { + external_flags_name.push_back(all_flags[i].name); + } + for (int i = 0; i < argc; ++i) { - new_argv.push_back(argv[i]); + bool flag = true; + std::string tmp(argv[i]); + for (size_t j = 0; j < external_flags_name.size(); ++j) { + if (tmp.find(external_flags_name[j]) != std::string::npos) { + external_argv.push_back(argv[i]); + flag = false; + break; + } + } + if (flag) { + internal_argv.push_back(argv[i]); + } } std::vector envs; @@ -70,7 +101,7 @@ int main(int argc, char** argv) { } env_string = env_string.substr(0, env_string.length() - 1); env_str = strdup(env_string.c_str()); - new_argv.push_back(env_str); + internal_argv.push_back(env_str); VLOG(1) << "gtest env_string:" << env_string; } @@ -82,13 +113,17 @@ int main(int argc, char** argv) { } undefok_string = undefok_string.substr(0, undefok_string.length() - 1); undefok_str = strdup(undefok_string.c_str()); - new_argv.push_back(undefok_str); + internal_argv.push_back(undefok_str); VLOG(1) << "gtest undefok_string:" << undefok_string; } - int new_argc = static_cast(new_argv.size()); - char** new_argv_address = new_argv.data(); - google::ParseCommandLineFlags(&new_argc, &new_argv_address, false); + int new_argc = static_cast(external_argv.size()); + char** external_argv_address = external_argv.data(); + google::ParseCommandLineFlags(&new_argc, &external_argv_address, false); + + int internal_argc = internal_argv.size(); + char** arr = internal_argv.data(); + paddle::platform::ParseCommandLineFlags(internal_argc, arr, true); paddle::framework::InitDevices(true); int ret = RUN_ALL_TESTS();