diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index f90910ac0d0a897ef01d4ca2bd0bca575baf4c40..5430e5c1ef1c70d27295ebc1a9bd427cd95f006a 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -51,9 +51,7 @@ void TestWord2vecPrediction(const std::string& model_path) { config.model_dir = model_path; config.use_gpu = false; config.device = 0; - auto predictor = - ::paddle::CreatePaddlePredictor( - config); + auto predictor = ::paddle::CreatePaddlePredictor(config); // One single batch diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 1d25f55b3188a684fe38df1417d114348cfa2e8a..13c25da1b52742e6114b294847c21ce735b9fc21 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -27,9 +27,7 @@ TEST(AnalysisPredictor, ZeroCopy) { config.model_dir = FLAGS_dirname + "/word2vec.inference.model"; config.use_feed_fetch_ops = false; - auto predictor = - CreatePaddlePredictor( - config); + auto predictor = CreatePaddlePredictor(config); auto w0 = predictor->GetInputTensor("firstw"); auto w1 = predictor->GetInputTensor("secondw"); diff --git a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc index fc6310e90b0257bc84742fb617a00f5778bb1866..702158ea3bcab854eece3ccd40724d92efcbae67 100644 --- a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc @@ -41,11 +41,8 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) { config1.device = 0; config1.max_batch_size = 10; - auto predictor0 = - CreatePaddlePredictor(config0); - auto predictor1 = - CreatePaddlePredictor(config1); + auto predictor0 = CreatePaddlePredictor(config0); + auto predictor1 = CreatePaddlePredictor(config1); for (int batch_id = 0; batch_id < 1; batch_id++) { //# 2. Prepare input. diff --git a/paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc b/paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc index cf97f064beddb6ede1d4716f323b4c5b46cb266d..f391583812615bafa257003a45d5f31fdbe324c0 100644 --- a/paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc +++ b/paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc @@ -34,9 +34,7 @@ contrib::AnakinConfig GetConfig() { TEST(inference, anakin) { auto config = GetConfig(); - auto predictor = - CreatePaddlePredictor( - config); + auto predictor = CreatePaddlePredictor(config); float data[1 * 3 * 224 * 224] = {1.0f}; PaddleTensor tensor; diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index c76d72ccd99649913aefcb2aa57fe6061db8ca6d..5b6c922f95cf6d2d0683a1e9328463fe21f6bc38 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -308,18 +308,14 @@ TEST(Analyzer_rnn1, ZeroCopy) { PaddlePlace place; int output_size{0}; - auto predictor = - CreatePaddlePredictor( - config); + auto predictor = CreatePaddlePredictor(config); config.use_feed_fetch_ops = true; auto native_predictor = CreatePaddlePredictor(config); config.use_feed_fetch_ops = true; // the analysis predictor needs feed/fetch. - auto analysis_predictor = - CreatePaddlePredictor( - config); + auto analysis_predictor = CreatePaddlePredictor(config); #define NEW_TENSOR(name__) \ auto name__##_tensor = predictor->GetInputTensor(#name__); diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 8603d09cbd09e4cee254faf52c2ccbe8d661994d..04e338653d6946041d83891676fe844e1de3ab10 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -77,8 +77,7 @@ void CompareResult(const std::vector &outputs, std::unique_ptr CreateTestPredictor( const AnalysisConfig &config, bool use_analysis = true) { if (use_analysis) { - return CreatePaddlePredictor(config); + return CreatePaddlePredictor(config); } else { return CreatePaddlePredictor( config); diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index bf320a0cbc2fff5f973c48768281e26d0fde232b..91111f2af56065bbf57ba3a41bddd55ecced1060 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -51,11 +51,8 @@ void CompareTensorRTWithFluid(int batch_size, std::string model_dirname) { config1.model_dir = model_dirname; config1.max_batch_size = batch_size; - auto predictor0 = - CreatePaddlePredictor(config0); - auto predictor1 = - CreatePaddlePredictor(config1); + auto predictor0 = CreatePaddlePredictor(config0); + auto predictor1 = CreatePaddlePredictor(config1); // Prepare inputs int height = 224; int width = 224;