From 2f4aee361a7bacbac375ea082b1a1a646c6b3b40 Mon Sep 17 00:00:00 2001 From: nhzlx Date: Tue, 22 Jan 2019 07:20:52 +0000 Subject: [PATCH] fix comments test=develop --- .../fluid/inference/tests/api/tester_helper.h | 19 +++++++++++- .../inference/tests/api/trt_models_tester.cc | 31 +++++++++++++++++++ .../tensorrt/tensorrt_engine_op_test.cc | 4 +-- 3 files changed, 51 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index ac964dc0c86..8ee89c34f0b 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -56,6 +56,13 @@ DECLARE_int32(paddle_num_threads); namespace paddle { namespace inference { +float Random(float low, float high) { + static std::random_device rd; + static std::mt19937 mt(rd()); + std::uniform_real_distribution dist(low, high); + return dist(mt); +} + void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) { const auto *analysis_config = reinterpret_cast(config); @@ -176,7 +183,7 @@ void SetFakeImageInput(std::vector> *inputs, float *input_data = static_cast(input.data.data()); // fill input data, for profile easily, do not use random data here. for (size_t j = 0; j < len; ++j) { - *(input_data + j) = static_cast(j) / len; + *(input_data + j) = Random(0, 10.); } } (*inputs).emplace_back(input_slots); @@ -344,6 +351,16 @@ void CompareNativeAndAnalysis( CompareResult(analysis_outputs, native_outputs); } +void CompareNativeAndAnalysis( + PaddlePredictor *native_pred, PaddlePredictor *analysis_pred, + const std::vector> &inputs) { + int batch_size = FLAGS_batch_size; + std::vector native_outputs, analysis_outputs; + native_pred->Run(inputs[0], &native_outputs, batch_size); + analysis_pred->Run(inputs[0], &analysis_outputs, batch_size); + CompareResult(analysis_outputs, native_outputs); +} + template std::string LoDTensorSummary(const framework::LoDTensor &tensor) { std::stringstream ss; diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index 9725c190329..8d177542934 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -107,6 +107,27 @@ void compare(std::string model_dir, bool use_tensorrt) { inputs_all); } +void compare_continuous_input(std::string model_dir, bool use_tensorrt) { + contrib::AnalysisConfig analysis_config; + SetConfig(&analysis_config, model_dir, true, + use_tensorrt, FLAGS_batch_size); + auto config = + reinterpret_cast(&analysis_config); + auto native_pred = CreateTestPredictor(config, false); + auto analysis_pred = CreateTestPredictor(config, true); + for (int i = 0; i < 100; i++) { + std::vector> inputs_all; + if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) { + SetFakeImageInput(&inputs_all, model_dir, true, FLAGS_prog_filename, + FLAGS_param_filename); + } else { + SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); + } + CompareNativeAndAnalysis(native_pred.get(), analysis_pred.get(), + inputs_all); + } +} + TEST(TensorRT_mobilenet, compare) { std::string model_dir = FLAGS_infer_model + "/mobilenet"; compare(model_dir, /* use_tensorrt */ true); @@ -157,5 +178,15 @@ TEST(AnalysisPredictor, use_gpu) { } } +TEST(resnet50, compare_continuous_input) { + std::string model_dir = FLAGS_infer_model + "/resnet50"; + compare_continuous_input(model_dir, true); +} + +TEST(resnet50, compare_continuous_input_native) { + std::string model_dir = FLAGS_infer_model + "/resnet50"; + compare_continuous_input(model_dir, false); +} + } // namespace inference } // namespace paddle diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc b/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc index bb25a37584e..391e7a1c070 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc @@ -99,7 +99,7 @@ TEST(TensorRTEngineOp, manual) { SetAttr(engine_op_desc.Proto(), "subgraph", block_->SerializeAsString()); SetAttr(engine_op_desc.Proto(), "max_batch_size", 2); - SetAttr(engine_op_desc.Proto(), "workspace_size", 2 << 20); + SetAttr(engine_op_desc.Proto(), "workspace_size", 1 << 20); SetAttr(engine_op_desc.Proto(), "engine_uniq_key", "a_engine"); SetAttr>(engine_op_desc.Proto(), "parameters", std::vector({})); @@ -193,7 +193,7 @@ void Execute(int batch_size, int input_dim, int output_dim, int nlayers = 1) { SetAttr(engine_op_desc.Proto(), "subgraph", block_->SerializeAsString()); SetAttr(engine_op_desc.Proto(), "max_batch_size", batch_size); - SetAttr(engine_op_desc.Proto(), "workspace_size", 2 << 20); + SetAttr(engine_op_desc.Proto(), "workspace_size", 1 << 20); SetAttr>( engine_op_desc.Proto(), "parameters", std::vector({"y0", "y1", "y2", "y3"})); -- GitLab