test_LeViT.cc 7.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "test_suite.h"  // NOLINT

DEFINE_string(modeldir, "", "Directory of the inference model.");

namespace paddle_infer {

paddle::test::Record PrepareInput(int batch_size) {
  // init input data
  int channel = 3;
  int width = 224;
  int height = 224;
  paddle::test::Record image_Record;
  int input_num = batch_size * channel * width * height;
  std::vector<float> input_data(input_num, 1);
  image_Record.data = input_data;
  image_Record.shape = std::vector<int>{batch_size, channel, width, height};
  image_Record.type = paddle::PaddleDType::FLOAT32;
  return image_Record;
}

35
TEST(gpu_tester_LeViT, analysis_gpu_bz1) {
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
  // init input data
  std::map<std::string, paddle::test::Record> my_input_data_map;
  my_input_data_map["x"] = PrepareInput(1);
  // init output data
  std::map<std::string, paddle::test::Record> infer_output_data,
      truth_output_data;
  // prepare groudtruth config
  paddle_infer::Config config, config_no_ir;
  config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                        FLAGS_modeldir + "/inference.pdiparams");
  config_no_ir.SwitchIrOptim(false);
  // prepare inference config
  config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                  FLAGS_modeldir + "/inference.pdiparams");
  // get groudtruth by disbale ir
  paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
  SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
                         &truth_output_data, 1);
  // get infer results
  paddle_infer::services::PredictorPool pred_pool(config, 1);
  SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
                         &infer_output_data);
  // check outputs
  CompareRecord(&truth_output_data, &infer_output_data);
  std::cout << "finish test" << std::endl;
}

63
TEST(tensorrt_tester_LeViT, trt_fp32_bz2) {
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
  // init input data
  std::map<std::string, paddle::test::Record> my_input_data_map;
  my_input_data_map["x"] = PrepareInput(2);
  // init output data
  std::map<std::string, paddle::test::Record> infer_output_data,
      truth_output_data;
  // prepare groudtruth config
  paddle_infer::Config config, config_no_ir;
  config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                        FLAGS_modeldir + "/inference.pdiparams");
  config_no_ir.SwitchIrOptim(false);
  // prepare inference config
  config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                  FLAGS_modeldir + "/inference.pdiparams");
  config.EnableUseGpu(100, 0);
  config.EnableTensorRtEngine(
80
      1 << 20, 2, 50, paddle_infer::PrecisionType::kFloat32, false, false);
81 82 83 84 85 86 87 88 89 90 91 92 93
  // get groudtruth by disbale ir
  paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
  SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
                         &truth_output_data, 1);
  // get infer results
  paddle_infer::services::PredictorPool pred_pool(config, 1);
  SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
                         &infer_output_data);
  // check outputs
  CompareRecord(&truth_output_data, &infer_output_data);
  std::cout << "finish test" << std::endl;
}

94
TEST(tensorrt_tester_LeViT, serial_diff_batch_trt_fp32) {
95 96 97 98 99 100 101 102 103 104 105
  int max_batch_size = 5;
  // prepare groudtruth config
  paddle_infer::Config config, config_no_ir;
  config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                        FLAGS_modeldir + "/inference.pdiparams");
  config_no_ir.SwitchIrOptim(false);
  paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
  // prepare inference config
  config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                  FLAGS_modeldir + "/inference.pdiparams");
  config.EnableUseGpu(100, 0);
106
  config.EnableTensorRtEngine(1 << 20, max_batch_size, 50,
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
                              paddle_infer::PrecisionType::kFloat32, false,
                              false);
  paddle_infer::services::PredictorPool pred_pool(config, 1);

  for (int i = 1; i < max_batch_size; i++) {
    // init input data
    std::map<std::string, paddle::test::Record> my_input_data_map;
    my_input_data_map["x"] = PrepareInput(i);
    // init output data
    std::map<std::string, paddle::test::Record> infer_output_data,
        truth_output_data;
    // get groudtruth by disbale ir
    SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
                           &truth_output_data, 1);
    // get infer results
    SingleThreadPrediction(pred_pool.Retrive(0), &my_input_data_map,
                           &infer_output_data);
    // check outputs
    CompareRecord(&truth_output_data, &infer_output_data);
  }
  std::cout << "finish test" << std::endl;
}

130
TEST(tensorrt_tester_LeViT, multi_thread4_trt_fp32_bz2) {
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
  int thread_num = 4;
  // init input data
  std::map<std::string, paddle::test::Record> my_input_data_map;
  my_input_data_map["x"] = PrepareInput(2);
  // init output data
  std::map<std::string, paddle::test::Record> infer_output_data,
      truth_output_data;
  // prepare groudtruth config
  paddle_infer::Config config, config_no_ir;
  config_no_ir.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                        FLAGS_modeldir + "/inference.pdiparams");
  config_no_ir.SwitchIrOptim(false);
  // prepare inference config
  config.SetModel(FLAGS_modeldir + "/inference.pdmodel",
                  FLAGS_modeldir + "/inference.pdiparams");
  config.EnableUseGpu(100, 0);
  config.EnableTensorRtEngine(
148
      1 << 20, 2, 50, paddle_infer::PrecisionType::kFloat32, false, false);
149 150 151 152 153 154 155 156 157 158 159
  // get groudtruth by disbale ir
  paddle_infer::services::PredictorPool pred_pool_no_ir(config_no_ir, 1);
  SingleThreadPrediction(pred_pool_no_ir.Retrive(0), &my_input_data_map,
                         &truth_output_data, 1);

  // get infer results from multi threads
  std::vector<std::thread> threads;
  services::PredictorPool pred_pool(config, thread_num);
  for (int i = 0; i < thread_num; ++i) {
    threads.emplace_back(paddle::test::SingleThreadPrediction,
                         pred_pool.Retrive(i), &my_input_data_map,
160
                         &infer_output_data, 10);
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
  }

  // thread join & check outputs
  for (int i = 0; i < thread_num; ++i) {
    LOG(INFO) << "join tid : " << i;
    threads[i].join();
    CompareRecord(&truth_output_data, &infer_output_data);
  }

  std::cout << "finish multi-thread test" << std::endl;
}

}  // namespace paddle_infer

int main(int argc, char** argv) {
  ::testing::InitGoogleTest(&argc, argv);
177
  ::GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true);
178 179
  return RUN_ALL_TESTS();
}