未验证 提交 5023530a 编写于 作者: Y Yan Chunwei 提交者: GitHub

Refactor/remove sensitive (#13314)

上级 478a4e85
...@@ -48,18 +48,18 @@ function (inference_download_and_uncompress install_dir url gz_filename) ...@@ -48,18 +48,18 @@ function (inference_download_and_uncompress install_dir url gz_filename)
message(STATUS "finish downloading ${gz_filename}") message(STATUS "finish downloading ${gz_filename}")
endfunction(inference_download_and_uncompress) endfunction(inference_download_and_uncompress)
set(DITU_RNN_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fmodel.tar.gz") set(RNN1_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/rnn1%2Fmodel.tar.gz")
set(DITU_RNN_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fdata.txt.tar.gz") set(RNN1_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/rnn1%2Fdata.txt.tar.gz")
set(DITU_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/ditu_rnn" CACHE PATH "Ditu RNN model and data root." FORCE) set(RNN1_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/rnn1" CACHE PATH "RNN1 model and data root." FORCE)
if (NOT EXISTS ${DITU_INSTALL_DIR} AND WITH_TESTING) if (NOT EXISTS ${RNN1_INSTALL_DIR} AND WITH_TESTING)
inference_download_and_uncompress(${DITU_INSTALL_DIR} ${DITU_RNN_MODEL_URL} "ditu_rnn_fluid%2Fmodel.tar.gz") inference_download_and_uncompress(${RNN1_INSTALL_DIR} ${RNN1_MODEL_URL} "rnn1%2Fmodel.tar.gz")
inference_download_and_uncompress(${DITU_INSTALL_DIR} ${DITU_RNN_DATA_URL} "ditu_rnn_fluid%2Fdata.txt.tar.gz") inference_download_and_uncompress(${RNN1_INSTALL_DIR} ${RNN1_DATA_URL} "rnn1%2Fdata.txt.tar.gz")
endif() endif()
inference_analysis_test(test_analyzer SRCS analyzer_tester.cc inference_analysis_test(test_analyzer SRCS analyzer_tester.cc
EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor
ARGS --infer_ditu_rnn_model=${DITU_INSTALL_DIR}/model ARGS --infer_model=${RNN1_INSTALL_DIR}/model
--infer_ditu_rnn_data=${DITU_INSTALL_DIR}/data.txt) --infer_data=${RNN1_INSTALL_DIR}/data.txt)
inference_analysis_test(test_data_flow_graph SRCS data_flow_graph_tester.cc) inference_analysis_test(test_data_flow_graph SRCS data_flow_graph_tester.cc)
inference_analysis_test(test_data_flow_graph_to_fluid_pass SRCS data_flow_graph_to_fluid_pass_tester.cc) inference_analysis_test(test_data_flow_graph_to_fluid_pass SRCS data_flow_graph_to_fluid_pass_tester.cc)
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
#include "paddle/fluid/inference/api/paddle_inference_pass.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/inference/utils/singleton.h" #include "paddle/fluid/inference/utils/singleton.h"
DEFINE_string(infer_ditu_rnn_model, "", "model path for ditu RNN"); DEFINE_string(infer_model, "", "model path");
DEFINE_string(infer_ditu_rnn_data, "", "data path for ditu RNN"); DEFINE_string(infer_data, "", "data path");
DEFINE_int32(batch_size, 10, "batch size."); DEFINE_int32(batch_size, 10, "batch size.");
DEFINE_int32(repeat, 1, "Running the inference program repeat times."); DEFINE_int32(repeat, 1, "Running the inference program repeat times.");
DEFINE_int32(num_threads, 1, "Running the inference program in multi-threads."); DEFINE_int32(num_threads, 1, "Running the inference program in multi-threads.");
...@@ -223,17 +223,6 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data, ...@@ -223,17 +223,6 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
} // namespace } // namespace
const float ditu_rnn_target_data[] = {
104.711, 11.2431, 1.35422, 0, 0, 0, 0, 0,
27.7039, 1.41486, 7.09526, 0, 0, 0, 0, 0,
7.6481, 6.5324, 56.383, 2.88018, 8.92918, 132.007, 4.27429, 2.02934,
14.1727, 10.7461, 25.0616, 16.0197, 14.4163, 16.9199, 6.75517, 0,
80.0249, 4.77739, 0, 0, 0, 0, 0, 0,
47.5643, 2.67029, 8.76252, 0, 0, 0, 0, 0,
51.8822, 4.4411, 0, 0, 0, 0, 0, 0,
10.7286, 12.0595, 10.6672, 0, 0, 0, 0, 0,
93.5771, 3.84641, 0, 0, 0, 0, 0, 0,
169.426, 0, 0, 0, 0, 0, 0, 0};
void CompareResult(const std::vector<PaddleTensor> &outputs, void CompareResult(const std::vector<PaddleTensor> &outputs,
const std::vector<PaddleTensor> &base_outputs) { const std::vector<PaddleTensor> &base_outputs) {
PADDLE_ENFORCE_GT(outputs.size(), 0); PADDLE_ENFORCE_GT(outputs.size(), 0);
...@@ -255,11 +244,10 @@ void CompareResult(const std::vector<PaddleTensor> &outputs, ...@@ -255,11 +244,10 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
} }
} }
// Test with a really complicate model. // Test with a really complicate model.
void TestDituRNNPrediction(bool use_analysis, bool activate_ir, void TestRNN1Prediction(bool use_analysis, bool activate_ir, int num_threads) {
int num_threads) {
AnalysisConfig config; AnalysisConfig config;
config.prog_file = FLAGS_infer_ditu_rnn_model + "/__model__"; config.prog_file = FLAGS_infer_model + "/__model__";
config.param_file = FLAGS_infer_ditu_rnn_model + "/param"; config.param_file = FLAGS_infer_model + "/param";
config.use_gpu = false; config.use_gpu = false;
config.device = 0; config.device = 0;
config.specify_input_name = true; config.specify_input_name = true;
...@@ -277,7 +265,7 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir, ...@@ -277,7 +265,7 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir,
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>( CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
config); config);
std::vector<PaddleTensor> input_slots; std::vector<PaddleTensor> input_slots;
DataRecord data(FLAGS_infer_ditu_rnn_data, batch_size); DataRecord data(FLAGS_infer_data, batch_size);
// Prepare inputs. // Prepare inputs.
PrepareInputs(&input_slots, &data, batch_size); PrepareInputs(&input_slots, &data, batch_size);
std::vector<PaddleTensor> outputs, base_outputs; std::vector<PaddleTensor> outputs, base_outputs;
...@@ -307,7 +295,7 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir, ...@@ -307,7 +295,7 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir,
threads.emplace_back([&, tid]() { threads.emplace_back([&, tid]() {
// Each thread should have local input_slots and outputs. // Each thread should have local input_slots and outputs.
std::vector<PaddleTensor> input_slots; std::vector<PaddleTensor> input_slots;
DataRecord data(FLAGS_infer_ditu_rnn_data, batch_size); DataRecord data(FLAGS_infer_data, batch_size);
PrepareInputs(&input_slots, &data, batch_size); PrepareInputs(&input_slots, &data, batch_size);
std::vector<PaddleTensor> outputs; std::vector<PaddleTensor> outputs;
Timer timer; Timer timer;
...@@ -354,24 +342,22 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir, ...@@ -354,24 +342,22 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir,
} }
// Inference with analysis and IR, easy for profiling independently. // Inference with analysis and IR, easy for profiling independently.
TEST(Analyzer, DituRNN) { TEST(Analyzer, rnn1) { TestRNN1Prediction(true, true, FLAGS_num_threads); }
TestDituRNNPrediction(true, true, FLAGS_num_threads);
}
// Other unit-tests of DituRNN, test different options of use_analysis, // Other unit-tests of RNN1, test different options of use_analysis,
// activate_ir and multi-threads. // activate_ir and multi-threads.
TEST(Analyzer, DituRNN_tests) { TEST(Analyzer, RNN_tests) {
int num_threads[2] = {1, 4}; int num_threads[2] = {1, 4};
for (auto i : num_threads) { for (auto i : num_threads) {
// Directly infer with the original model. // Directly infer with the original model.
TestDituRNNPrediction(false, false, i); TestRNN1Prediction(false, false, i);
// Inference with the original model with the analysis turned on, the // Inference with the original model with the analysis turned on, the
// analysis // analysis
// module will transform the program to a data flow graph. // module will transform the program to a data flow graph.
TestDituRNNPrediction(true, false, i); TestRNN1Prediction(true, false, i);
// Inference with analysis and IR. The IR module will fuse some large // Inference with analysis and IR. The IR module will fuse some large
// kernels. // kernels.
TestDituRNNPrediction(true, true, i); TestRNN1Prediction(true, true, i);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册