trt_models_tester.cc 3.4 KB
Newer Older
N
nhzlx 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gflags/gflags.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"

namespace paddle {
N
nhzlx 已提交
22
using paddle::contrib::MixedRTConfig;
N
nhzlx 已提交
23 24 25 26 27 28 29

DEFINE_string(dirname, "", "Directory of the inference model.");

NativeConfig GetConfigNative() {
  NativeConfig config;
  config.model_dir = FLAGS_dirname;
  // LOG(INFO) << "dirname  " << config.model_dir;
30
  config.fraction_of_gpu_memory = 0.45;
N
nhzlx 已提交
31 32 33 34 35
  config.use_gpu = true;
  config.device = 0;
  return config;
}

N
nhzlx 已提交
36 37
MixedRTConfig GetConfigTRT() {
  MixedRTConfig config;
N
nhzlx 已提交
38 39
  config.model_dir = FLAGS_dirname;
  config.use_gpu = true;
40
  config.fraction_of_gpu_memory = 0.2;
N
nhzlx 已提交
41 42 43 44 45 46 47 48 49
  config.device = 0;
  config.max_batch_size = 3;
  return config;
}

void CompareTensorRTWithFluid(int batch_size, std::string model_dirname) {
  NativeConfig config0 = GetConfigNative();
  config0.model_dir = model_dirname;

N
nhzlx 已提交
50
  MixedRTConfig config1 = GetConfigTRT();
N
nhzlx 已提交
51 52 53
  config1.model_dir = model_dirname;
  config1.max_batch_size = batch_size;

S
superjomn 已提交
54 55
  auto predictor0 = CreatePaddlePredictor<NativeConfig>(config0);
  auto predictor1 = CreatePaddlePredictor<MixedRTConfig>(config1);
N
nhzlx 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
  // Prepare inputs
  int height = 224;
  int width = 224;
  float *data = new float[batch_size * 3 * height * width];
  memset(data, 0, sizeof(float) * (batch_size * 3 * height * width));
  data[0] = 1.0f;

  // Prepare inputs
  PaddleTensor tensor;
  tensor.name = "input_0";
  tensor.shape = std::vector<int>({batch_size, 3, height, width});
  tensor.data = PaddleBuf(static_cast<void *>(data),
                          sizeof(float) * (batch_size * 3 * height * width));
  tensor.dtype = PaddleDType::FLOAT32;
  std::vector<PaddleTensor> paddle_tensor_feeds(1, tensor);

  // Prepare outputs
  std::vector<PaddleTensor> outputs0;
  std::vector<PaddleTensor> outputs1;
  CHECK(predictor0->Run(paddle_tensor_feeds, &outputs0));

  CHECK(predictor1->Run(paddle_tensor_feeds, &outputs1, batch_size));

  // Get output.
  ASSERT_EQ(outputs0.size(), 1UL);
  ASSERT_EQ(outputs1.size(), 1UL);

  const size_t num_elements = outputs0.front().data.length() / sizeof(float);
  const size_t num_elements1 = outputs1.front().data.length() / sizeof(float);
  EXPECT_EQ(num_elements, num_elements1);

  auto *data0 = static_cast<float *>(outputs0.front().data.data());
  auto *data1 = static_cast<float *>(outputs1.front().data.data());

  ASSERT_GT(num_elements, 0UL);
  for (size_t i = 0; i < std::min(num_elements, num_elements1); i++) {
    EXPECT_NEAR(data0[i], data1[i], 1e-3);
  }
}

N
nhzlx 已提交
96 97 98 99 100 101
TEST(trt_models_test, mobilenet) {
  CompareTensorRTWithFluid(1, FLAGS_dirname + "/mobilenet");
}

TEST(trt_models_test, resnet50) {
  CompareTensorRTWithFluid(1, FLAGS_dirname + "/resnet50");
N
nhzlx 已提交
102
}
N
nhzlx 已提交
103 104 105 106 107

TEST(trt_models_test, resnext50) {
  CompareTensorRTWithFluid(1, FLAGS_dirname + "/resnext50");
}

N
nhzlx 已提交
108
}  // namespace paddle