trt_models_tester.cc 6.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
N
nhzlx 已提交
14 15 16 17

#include <gflags/gflags.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
18

19
#include "paddle/fluid/inference/tests/api/tester_helper.h"
N
nhzlx 已提交
20 21

namespace paddle {
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
namespace inference {

DEFINE_bool(use_tensorrt, true, "Test the performance of TensorRT engine.");
DEFINE_string(prog_filename, "", "Name of model file.");
DEFINE_string(param_filename, "", "Name of parameters file.");

template <typename ConfigType>
void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu,
               bool use_tensorrt = false, int batch_size = -1) {
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
    config->prog_file = model_dir + "/" + FLAGS_prog_filename;
    config->param_file = model_dir + "/" + FLAGS_param_filename;
  } else {
    config->model_dir = model_dir;
  }
  if (use_gpu) {
    config->use_gpu = true;
    config->device = 0;
    config->fraction_of_gpu_memory = 0.15;
  }
N
nhzlx 已提交
42 43
}

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
template <>
void SetConfig<contrib::AnalysisConfig>(contrib::AnalysisConfig* config,
                                        std::string model_dir, bool use_gpu,
                                        bool use_tensorrt, int batch_size) {
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
    config->prog_file = model_dir + "/" + FLAGS_prog_filename;
    config->param_file = model_dir + "/" + FLAGS_param_filename;
  } else {
    config->model_dir = model_dir;
  }
  if (use_gpu) {
    config->use_gpu = true;
    config->device = 0;
    config->fraction_of_gpu_memory = 0.15;
    if (use_tensorrt) {
      config->EnableTensorRtEngine(1 << 10, batch_size);
      config->pass_builder()->DeletePass("conv_bn_fuse_pass");
      config->pass_builder()->DeletePass("fc_fuse_pass");
      config->pass_builder()->TurnOnDebug();
    } else {
      config->enable_ir_optim = true;
    }
  }
67 68
}

69 70 71 72 73 74 75
void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) {
  std::vector<std::vector<PaddleTensor>> inputs_all;
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
    SetFakeImageInput(&inputs_all, model_dir, true, FLAGS_prog_filename,
                      FLAGS_param_filename);
  } else {
    SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");
N
nhzlx 已提交
76 77
  }

78 79 80
  std::vector<PaddleTensor> outputs;
  if (use_analysis || use_tensorrt) {
    contrib::AnalysisConfig config(true);
81
    config.pass_builder()->TurnOnDebug();
82 83 84 85 86 87 88 89 90 91
    SetConfig<contrib::AnalysisConfig>(&config, model_dir, true, use_tensorrt,
                                       FLAGS_batch_size);
    TestPrediction(reinterpret_cast<PaddlePredictor::Config*>(&config),
                   inputs_all, &outputs, FLAGS_num_threads, true);
  } else {
    NativeConfig config;
    SetConfig<NativeConfig>(&config, model_dir, true, false);
    TestPrediction(reinterpret_cast<PaddlePredictor::Config*>(&config),
                   inputs_all, &outputs, FLAGS_num_threads, false);
  }
92 93
}

94 95 96 97 98 99 100 101
void compare(std::string model_dir, bool use_tensorrt) {
  std::vector<std::vector<PaddleTensor>> inputs_all;
  if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
    SetFakeImageInput(&inputs_all, model_dir, true, FLAGS_prog_filename,
                      FLAGS_param_filename);
  } else {
    SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");
  }
102

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
  std::vector<PaddleTensor> native_outputs;
  NativeConfig native_config;
  SetConfig<NativeConfig>(&native_config, model_dir, true, false,
                          FLAGS_batch_size);
  TestOneThreadPrediction(
      reinterpret_cast<PaddlePredictor::Config*>(&native_config), inputs_all,
      &native_outputs, false);

  std::vector<PaddleTensor> analysis_outputs;
  contrib::AnalysisConfig analysis_config(true);
  SetConfig<contrib::AnalysisConfig>(&analysis_config, model_dir, true,
                                     use_tensorrt, FLAGS_batch_size);
  TestOneThreadPrediction(
      reinterpret_cast<PaddlePredictor::Config*>(&analysis_config), inputs_all,
      &analysis_outputs, true);

  CompareResult(native_outputs, analysis_outputs);
}
121

122 123 124 125
TEST(TensorRT_mobilenet, compare) {
  std::string model_dir = FLAGS_infer_model + "/mobilenet";
  compare(model_dir, /* use_tensorrt */ true);
}
126

127 128 129 130
TEST(TensorRT_resnet50, compare) {
  std::string model_dir = FLAGS_infer_model + "/resnet50";
  compare(model_dir, /* use_tensorrt */ true);
}
131

132 133 134 135
TEST(TensorRT_resnext50, compare) {
  std::string model_dir = FLAGS_infer_model + "/resnext50";
  compare(model_dir, /* use_tensorrt */ true);
}
136

137 138
TEST(TensorRT_resnext50, profile) {
  std::string model_dir = FLAGS_infer_model + "/resnext50";
N
nhzlx 已提交
139 140 141
  // Set FLAGS_record_benchmark to true to record benchmark to file.
  // FLAGS_record_benchmark=true;
  FLAGS_model_name = "resnext50";
142 143
  profile(model_dir, /* use_analysis */ true, FLAGS_use_tensorrt);
}
144

145 146 147 148 149
TEST(resnext50, compare_analysis_native) {
  std::string model_dir = FLAGS_infer_model + "/resnext50";
  compare(model_dir, false /*use tensorrt*/);
}

150 151
TEST(TensorRT_mobilenet, analysis) {
  std::string model_dir = FLAGS_infer_model + "/" + "mobilenet";
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
  compare(model_dir, false /* use_tensorrt */);
}

TEST(AnalysisPredictor, use_gpu) {
  std::string model_dir = FLAGS_infer_model + "/" + "mobilenet";
  AnalysisConfig config(true);
  config.model_dir = model_dir;
  config.fraction_of_gpu_memory = 0.15;
  config.pass_builder()->TurnOnDebug();

  std::vector<std::vector<PaddleTensor>> inputs_all;
  auto predictor = CreatePaddlePredictor(config);
  SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");

  std::vector<PaddleTensor> outputs;
  for (auto& input : inputs_all) {
    ASSERT_TRUE(predictor->Run(input, &outputs));
  }
N
nhzlx 已提交
170 171
}

172
}  // namespace inference
N
nhzlx 已提交
173
}  // namespace paddle