trt_models_tester.cc 6.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
N
nhzlx 已提交
14 15 16 17

#include <gflags/gflags.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
18

19
#include "paddle/fluid/inference/tests/api/tester_helper.h"
N
nhzlx 已提交
20 21

namespace paddle {
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
namespace inference {

DEFINE_bool(use_tensorrt, true, "Test the performance of TensorRT engine.");
DEFINE_string(prog_filename, "", "Name of model file.");
DEFINE_string(param_filename, "", "Name of parameters file.");

template <typename ConfigType>
void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu,
               bool use_tensorrt = false, int batch_size = -1) {
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
    config->prog_file = model_dir + "/" + FLAGS_prog_filename;
    config->param_file = model_dir + "/" + FLAGS_param_filename;
  } else {
    config->model_dir = model_dir;
  }
  if (use_gpu) {
    config->use_gpu = true;
    config->device = 0;
    config->fraction_of_gpu_memory = 0.15;
  }
N
nhzlx 已提交
42 43
}

44 45 46 47 48
template <>
void SetConfig<contrib::AnalysisConfig>(contrib::AnalysisConfig* config,
                                        std::string model_dir, bool use_gpu,
                                        bool use_tensorrt, int batch_size) {
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
49 50
    config->SetModel(model_dir + "/" + FLAGS_prog_filename,
                     model_dir + "/" + FLAGS_param_filename);
51
  } else {
52
    config->SetModel(model_dir);
53 54
  }
  if (use_gpu) {
55
    config->EnableUseGpu(100, 0);
56 57 58 59 60 61
    if (use_tensorrt) {
      config->EnableTensorRtEngine(1 << 10, batch_size);
      config->pass_builder()->DeletePass("conv_bn_fuse_pass");
      config->pass_builder()->DeletePass("fc_fuse_pass");
      config->pass_builder()->TurnOnDebug();
    } else {
62
      config->SwitchIrOptim();
63 64
    }
  }
65 66
}

67 68 69 70 71 72 73
void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) {
  std::vector<std::vector<PaddleTensor>> inputs_all;
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
    SetFakeImageInput(&inputs_all, model_dir, true, FLAGS_prog_filename,
                      FLAGS_param_filename);
  } else {
    SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");
N
nhzlx 已提交
74 75
  }

76 77
  std::vector<PaddleTensor> outputs;
  if (use_analysis || use_tensorrt) {
78 79
    contrib::AnalysisConfig config;
    config.EnableUseGpu(100, 0);
80
    config.pass_builder()->TurnOnDebug();
81 82 83 84 85 86 87 88 89 90
    SetConfig<contrib::AnalysisConfig>(&config, model_dir, true, use_tensorrt,
                                       FLAGS_batch_size);
    TestPrediction(reinterpret_cast<PaddlePredictor::Config*>(&config),
                   inputs_all, &outputs, FLAGS_num_threads, true);
  } else {
    NativeConfig config;
    SetConfig<NativeConfig>(&config, model_dir, true, false);
    TestPrediction(reinterpret_cast<PaddlePredictor::Config*>(&config),
                   inputs_all, &outputs, FLAGS_num_threads, false);
  }
91 92
}

93 94 95 96 97 98 99 100
void compare(std::string model_dir, bool use_tensorrt) {
  std::vector<std::vector<PaddleTensor>> inputs_all;
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
    SetFakeImageInput(&inputs_all, model_dir, true, FLAGS_prog_filename,
                      FLAGS_param_filename);
  } else {
    SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");
  }
101

102
  contrib::AnalysisConfig analysis_config;
103 104
  SetConfig<contrib::AnalysisConfig>(&analysis_config, model_dir, true,
                                     use_tensorrt, FLAGS_batch_size);
T
Tao Luo 已提交
105 106 107
  CompareNativeAndAnalysis(
      reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config),
      inputs_all);
108
}
109

N
nhzlx 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
void compare_continuous_input(std::string model_dir, bool use_tensorrt) {
  contrib::AnalysisConfig analysis_config;
  SetConfig<contrib::AnalysisConfig>(&analysis_config, model_dir, true,
                                     use_tensorrt, FLAGS_batch_size);
  auto config =
      reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config);
  auto native_pred = CreateTestPredictor(config, false);
  auto analysis_pred = CreateTestPredictor(config, true);
  for (int i = 0; i < 100; i++) {
    std::vector<std::vector<PaddleTensor>> inputs_all;
    if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
      SetFakeImageInput(&inputs_all, model_dir, true, FLAGS_prog_filename,
                        FLAGS_param_filename);
    } else {
      SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");
    }
    CompareNativeAndAnalysis(native_pred.get(), analysis_pred.get(),
                             inputs_all);
  }
}

131 132 133 134
TEST(TensorRT_mobilenet, compare) {
  std::string model_dir = FLAGS_infer_model + "/mobilenet";
  compare(model_dir, /* use_tensorrt */ true);
}
135

136 137 138 139
TEST(TensorRT_resnet50, compare) {
  std::string model_dir = FLAGS_infer_model + "/resnet50";
  compare(model_dir, /* use_tensorrt */ true);
}
140

141 142 143 144
TEST(TensorRT_resnext50, compare) {
  std::string model_dir = FLAGS_infer_model + "/resnext50";
  compare(model_dir, /* use_tensorrt */ true);
}
145

146 147
TEST(TensorRT_resnext50, profile) {
  std::string model_dir = FLAGS_infer_model + "/resnext50";
N
nhzlx 已提交
148 149 150
  // Set FLAGS_record_benchmark to true to record benchmark to file.
  // FLAGS_record_benchmark=true;
  FLAGS_model_name = "resnext50";
151 152
  profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt);
}
153

154 155 156 157 158
TEST(resnext50, compare_analysis_native) {
  std::string model_dir = FLAGS_infer_model + "/resnext50";
  compare(model_dir, false /*use tensorrt*/);
}

159 160
TEST(TensorRT_mobilenet, analysis) {
  std::string model_dir = FLAGS_infer_model + "/" + "mobilenet";
161 162 163 164 165
  compare(model_dir, false /* use_tensorrt */);
}

TEST(AnalysisPredictor, use_gpu) {
  std::string model_dir = FLAGS_infer_model + "/" + "mobilenet";
166 167 168
  AnalysisConfig config;
  config.EnableUseGpu(100, 0);
  config.SetModel(model_dir);
169 170 171 172 173 174 175 176 177 178
  config.pass_builder()->TurnOnDebug();

  std::vector<std::vector<PaddleTensor>> inputs_all;
  auto predictor = CreatePaddlePredictor(config);
  SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");

  std::vector<PaddleTensor> outputs;
  for (auto& input : inputs_all) {
    ASSERT_TRUE(predictor->Run(input, &outputs));
  }
N
nhzlx 已提交
179 180
}

N
nhzlx 已提交
181 182 183 184 185 186 187 188 189 190
TEST(resnet50, compare_continuous_input) {
  std::string model_dir = FLAGS_infer_model + "/resnet50";
  compare_continuous_input(model_dir, true);
}

TEST(resnet50, compare_continuous_input_native) {
  std::string model_dir = FLAGS_infer_model + "/resnet50";
  compare_continuous_input(model_dir, false);
}

191
}  // namespace inference
N
nhzlx 已提交
192
}  // namespace paddle