test_ppyolo_mbv3.cc 5.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "test_suite.h"  // NOLINT

DEFINE_string(modeldir, "", "Directory of the inference model.");

namespace paddle_infer {

std::map<std::string, paddle::test::Record> PrepareInput(int batch_size) {
  // init input data
  int channel = 3;
  int width = 320;
  int height = 320;
  paddle::test::Record image, im_shape, scale_factor;
  int input_num = batch_size * channel * width * height;
  int shape_num = batch_size * 2;
  std::vector<float> image_data(input_num, 1);
  for (int i = 1; i < input_num + 1; ++i) {
    image_data[i] = i % 10 * 0.5;
  }
  std::vector<float> im_shape_data(shape_num, 1);
  std::vector<float> scale_factor_data(shape_num, 1);

  image.data = std::vector<float>(image_data.begin(), image_data.end());
  image.shape = std::vector<int>{batch_size, channel, width, height};
  image.type = paddle::PaddleDType::FLOAT32;

  im_shape.data =
      std::vector<float>(im_shape_data.begin(), im_shape_data.end());
  im_shape.shape = std::vector<int>{batch_size, 2};
  im_shape.type = paddle::PaddleDType::FLOAT32;

  scale_factor.data =
      std::vector<float>(scale_factor_data.begin(), scale_factor_data.end());
  scale_factor.shape = std::vector<int>{batch_size, 2};
  scale_factor.type = paddle::PaddleDType::FLOAT32;

  std::map<std::string, paddle::test::Record> input_data_map;
  input_data_map.insert({"image", image});
  input_data_map.insert({"im_shape", im_shape});
  input_data_map.insert({"scale_factor", scale_factor});

  return input_data_map;
}

58
TEST(tensorrt_tester_ppyolo_mbv3, multi_thread4_trt_fp32_bz2) {
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
  int thread_num = 4;
  // init input data
  auto input_data_map = PrepareInput(2);
  // init output data
  std::map<std::string, paddle::test::Record> infer_output_data,
      truth_output_data;
  // prepare groudtruth config
  paddle_infer::Config config, config_no_ir;
  config_no_ir.SetModel(FLAGS_modeldir + "/model.pdmodel",
                        FLAGS_modeldir + "/model.pdiparams");
  config_no_ir.EnableUseGpu(100, 0);
  config_no_ir.SwitchIrOptim(false);
  // prepare inference config
  config.SetModel(FLAGS_modeldir + "/model.pdmodel",
                  FLAGS_modeldir + "/model.pdiparams");
  config.EnableUseGpu(100, 0);
  config.EnableTensorRtEngine(
Z
Zhang Jun 已提交
76
      1 << 25, 2, 3, paddle_infer::PrecisionType::kFloat32, false, false);
77 78 79
  LOG(INFO) << config.Summary();
  // get groudtruth by disbale ir
  paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
80 81
  SingleThreadPrediction(
      pred_pool_no_ir.Retrive(0), &input_data_map, &truth_output_data, 1);
82 83 84 85 86 87

  // get infer results from multi threads
  std::vector<std::thread> threads;
  services::PredictorPool pred_pool(config, thread_num);
  for (int i = 0; i < thread_num; ++i) {
    threads.emplace_back(paddle::test::SingleThreadPrediction,
88 89 90 91
                         pred_pool.Retrive(i),
                         &input_data_map,
                         &infer_output_data,
                         2);
92 93 94 95 96 97
  }

  // thread join & check outputs
  for (int i = 0; i < thread_num; ++i) {
    LOG(INFO) << "join tid : " << i;
    threads[i].join();
98
    CompareRecord(&truth_output_data, &infer_output_data, 0.18);
99 100 101 102 103 104 105
    // TODO(OliverLPH): precision set to 1e-2 since input is fake, change to
    // real input later
  }

  std::cout << "finish multi-thread test" << std::endl;
}

106
TEST(DISABLED_mkldnn_tester_ppyolo_mbv3, multi_thread4_mkl_bz2) {
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
  // TODO(OliverLPH): mkldnn multi thread will fail
  int thread_num = 4;
  // init input data
  auto input_data_map = PrepareInput(2);
  // init output data
  std::map<std::string, paddle::test::Record> infer_output_data,
      truth_output_data;
  // prepare groudtruth config
  paddle_infer::Config config, config_no_ir;
  config_no_ir.SetModel(FLAGS_modeldir + "/model.pdmodel",
                        FLAGS_modeldir + "/model.pdiparams");
  config_no_ir.DisableGpu();
  config_no_ir.SwitchIrOptim(false);
  // prepare inference config
  config.SetModel(FLAGS_modeldir + "/model.pdmodel",
                  FLAGS_modeldir + "/model.pdiparams");
  config.DisableGpu();
  config.EnableMKLDNN();
  config.SetMkldnnCacheCapacity(10);
  config.SetCpuMathLibraryNumThreads(10);
  LOG(INFO) << config.Summary();
  // get groudtruth by disbale ir
  paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
130 131
  SingleThreadPrediction(
      pred_pool_no_ir.Retrive(0), &input_data_map, &truth_output_data, 1);
132 133 134 135 136 137

  // get infer results from multi threads
  std::vector<std::thread> threads;
  services::PredictorPool pred_pool(config, thread_num);
  for (int i = 0; i < thread_num; ++i) {
    threads.emplace_back(paddle::test::SingleThreadPrediction,
138 139 140 141
                         pred_pool.Retrive(i),
                         &input_data_map,
                         &infer_output_data,
                         2);
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
  }

  // thread join & check outputs
  for (int i = 0; i < thread_num; ++i) {
    LOG(INFO) << "join tid : " << i;
    threads[i].join();
    CompareRecord(&truth_output_data, &infer_output_data, 1e-4);
  }

  std::cout << "finish multi-thread test" << std::endl;
}

}  // namespace paddle_infer

int main(int argc, char** argv) {
  ::testing::InitGoogleTest(&argc, argv);
158
  ::GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true);
159 160
  return RUN_ALL_TESTS();
}