api_impl_tester.cc 10.9 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <glog/logging.h>
#include <gtest/gtest.h>

L
Luo Tao 已提交
18
#include <thread>  // NOLINT
T
tensor-tang 已提交
19

X
Xin Pan 已提交
20
#include "gflags/gflags.h"
L
Luo Tao 已提交
21
#include "paddle/fluid/inference/api/api_impl.h"
X
Xin Pan 已提交
22 23
#include "paddle/fluid/inference/tests/test_helper.h"

J
JiabinYang 已提交
24
#ifdef __clang__
25
#define ACC_DIFF 4e-3
J
JiabinYang 已提交
26
#else
27
#define ACC_DIFF 1e-3
J
JiabinYang 已提交
28 29
#endif

30 31 32
DEFINE_string(word2vec_dirname, "",
              "Directory of the word2vec inference model.");
DEFINE_string(book_dirname, "", "Directory of the book inference model.");
X
Xin Pan 已提交
33 34 35 36 37 38

namespace paddle {

PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) {
  PaddleTensor pt;

Y
Yu Yang 已提交
39
  if (t->type() == framework::proto::VarType::INT64) {
40
    pt.data.Reset(t->data<void>(), t->numel() * sizeof(int64_t));
X
Xin Pan 已提交
41
    pt.dtype = PaddleDType::INT64;
Y
Fix ut  
Yu Yang 已提交
42
  } else if (t->type() == framework::proto::VarType::FP32) {
43
    pt.data.Reset(t->data<void>(), t->numel() * sizeof(float));
X
Xin Pan 已提交
44 45 46 47 48 49 50 51
    pt.dtype = PaddleDType::FLOAT32;
  } else {
    LOG(FATAL) << "unsupported type.";
  }
  pt.shape = framework::vectorize2int(t->dims());
  return pt;
}

Y
Yan Chunwei 已提交
52 53
NativeConfig GetConfig() {
  NativeConfig config;
54
  config.model_dir = FLAGS_word2vec_dirname;
X
Xin Pan 已提交
55
  LOG(INFO) << "dirname  " << config.model_dir;
X
Xin Pan 已提交
56
  config.fraction_of_gpu_memory = 0.15;
T
tensor-tang 已提交
57
#ifdef PADDLE_WITH_CUDA
Y
Yan Chunwei 已提交
58
  config.use_gpu = true;
T
tensor-tang 已提交
59 60 61
#else
  config.use_gpu = false;
#endif
X
Xin Pan 已提交
62
  config.device = 0;
63 64
  return config;
}
X
Xin Pan 已提交
65

T
tensor-tang 已提交
66
void MainWord2Vec(bool use_gpu) {
Y
Yan Chunwei 已提交
67 68
  NativeConfig config = GetConfig();
  auto predictor = CreatePaddlePredictor<NativeConfig>(config);
T
tensor-tang 已提交
69
  config.use_gpu = use_gpu;
X
Xin Pan 已提交
70 71 72 73 74 75 76 77 78 79

  framework::LoDTensor first_word, second_word, third_word, fourth_word;
  framework::LoD lod{{0, 1}};
  int64_t dict_size = 2073;  // The size of dictionary

  SetupLoDTensor(&first_word, lod, static_cast<int64_t>(0), dict_size - 1);
  SetupLoDTensor(&second_word, lod, static_cast<int64_t>(0), dict_size - 1);
  SetupLoDTensor(&third_word, lod, static_cast<int64_t>(0), dict_size - 1);
  SetupLoDTensor(&fourth_word, lod, static_cast<int64_t>(0), dict_size - 1);

80 81 82 83 84 85 86 87 88
  std::vector<PaddleTensor> paddle_tensor_feeds;
  paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&first_word));
  paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&second_word));
  paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&third_word));
  paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&fourth_word));

  std::vector<PaddleTensor> outputs;
  ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs));
  ASSERT_EQ(outputs.size(), 1UL);
89 90
  size_t len = outputs[0].data.length();
  float* data = static_cast<float*>(outputs[0].data.data());
91
  for (size_t j = 0; j < len / sizeof(float); ++j) {
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
    ASSERT_LT(data[j], 1.0);
    ASSERT_GT(data[j], -1.0);
  }

  std::vector<paddle::framework::LoDTensor*> cpu_feeds;
  cpu_feeds.push_back(&first_word);
  cpu_feeds.push_back(&second_word);
  cpu_feeds.push_back(&third_word);
  cpu_feeds.push_back(&fourth_word);

  framework::LoDTensor output1;
  std::vector<paddle::framework::LoDTensor*> cpu_fetchs1;
  cpu_fetchs1.push_back(&output1);

  TestInference<platform::CPUPlace>(config.model_dir, cpu_feeds, cpu_fetchs1);

  float* lod_data = output1.data<float>();
109
  for (int i = 0; i < output1.numel(); ++i) {
J
JiabinYang 已提交
110 111
    EXPECT_LT(lod_data[i] - data[i], ACC_DIFF);
    EXPECT_GT(lod_data[i] - data[i], -ACC_DIFF);
112 113 114
  }
}

T
tensor-tang 已提交
115
void MainImageClassification(bool use_gpu) {
116 117
  int batch_size = 2;
  bool repeat = false;
Y
Yan Chunwei 已提交
118
  NativeConfig config = GetConfig();
T
tensor-tang 已提交
119
  config.use_gpu = use_gpu;
120
  config.model_dir =
121
      FLAGS_book_dirname + "/image_classification_resnet.inference.model";
122 123 124 125 126 127 128 129 130 131

  const bool is_combined = false;
  std::vector<std::vector<int64_t>> feed_target_shapes =
      GetFeedTargetShapes(config.model_dir, is_combined);

  framework::LoDTensor input;
  // Use normilized image pixels as input data,
  // which should be in the range [0.0, 1.0].
  feed_target_shapes[0][0] = batch_size;
  framework::DDim input_dims = framework::make_ddim(feed_target_shapes[0]);
132 133
  SetupTensor<float>(&input, input_dims, static_cast<float>(0),
                     static_cast<float>(1));
134 135 136 137 138 139 140
  std::vector<framework::LoDTensor*> cpu_feeds;
  cpu_feeds.push_back(&input);

  framework::LoDTensor output1;
  std::vector<framework::LoDTensor*> cpu_fetchs1;
  cpu_fetchs1.push_back(&output1);

L
Luo Tao 已提交
141 142
  TestInference<platform::CPUPlace, false, true>(
      config.model_dir, cpu_feeds, cpu_fetchs1, repeat, is_combined);
143

Y
Yan Chunwei 已提交
144
  auto predictor = CreatePaddlePredictor(config);
145 146
  std::vector<PaddleTensor> paddle_tensor_feeds;
  paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&input));
X
Xin Pan 已提交
147 148

  std::vector<PaddleTensor> outputs;
149
  ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs));
150
  ASSERT_EQ(outputs.size(), 1UL);
151 152
  size_t len = outputs[0].data.length();
  float* data = static_cast<float*>(outputs[0].data.data());
153 154
  float* lod_data = output1.data<float>();
  for (size_t j = 0; j < len / sizeof(float); ++j) {
J
JiabinYang 已提交
155
    EXPECT_NEAR(lod_data[j], data[j], ACC_DIFF);
X
Xin Pan 已提交
156 157 158
  }
}

T
tensor-tang 已提交
159
void MainThreadsWord2Vec(bool use_gpu) {
T
tensor-tang 已提交
160
  NativeConfig config = GetConfig();
T
tensor-tang 已提交
161
  config.use_gpu = use_gpu;
T
tensor-tang 已提交
162 163
  auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);

164
  // prepare inputs data and reference results
T
tensor-tang 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
  constexpr int num_jobs = 3;
  std::vector<std::vector<framework::LoDTensor>> jobs(num_jobs);
  std::vector<std::vector<PaddleTensor>> paddle_tensor_feeds(num_jobs);
  std::vector<framework::LoDTensor> refs(num_jobs);
  for (size_t i = 0; i < jobs.size(); ++i) {
    // each job has 4 words
    jobs[i].resize(4);
    for (size_t j = 0; j < 4; ++j) {
      framework::LoD lod{{0, 1}};
      int64_t dict_size = 2073;  // The size of dictionary
      SetupLoDTensor(&jobs[i][j], lod, static_cast<int64_t>(0), dict_size - 1);
      paddle_tensor_feeds[i].push_back(LodTensorToPaddleTensor(&jobs[i][j]));
    }

    // get reference result of each job
    std::vector<paddle::framework::LoDTensor*> ref_feeds;
    std::vector<paddle::framework::LoDTensor*> ref_fetches(1, &refs[i]);
    for (auto& word : jobs[i]) {
      ref_feeds.push_back(&word);
    }
    TestInference<platform::CPUPlace>(config.model_dir, ref_feeds, ref_fetches);
  }

  // create threads and each thread run 1 job
  std::vector<std::thread> threads;
  for (int tid = 0; tid < num_jobs; ++tid) {
    threads.emplace_back([&, tid]() {
Y
Yan Chunwei 已提交
192
      auto predictor = CreatePaddlePredictor(config);
T
tensor-tang 已提交
193 194 195 196 197 198
      auto& local_inputs = paddle_tensor_feeds[tid];
      std::vector<PaddleTensor> local_outputs;
      ASSERT_TRUE(predictor->Run(local_inputs, &local_outputs));

      // check outputs range
      ASSERT_EQ(local_outputs.size(), 1UL);
199 200
      const size_t len = local_outputs[0].data.length();
      float* data = static_cast<float*>(local_outputs[0].data.data());
T
tensor-tang 已提交
201 202 203 204 205 206 207
      for (size_t j = 0; j < len / sizeof(float); ++j) {
        ASSERT_LT(data[j], 1.0);
        ASSERT_GT(data[j], -1.0);
      }

      // check outputs correctness
      float* ref_data = refs[tid].data<float>();
208
      EXPECT_EQ(refs[tid].numel(), static_cast<int64_t>(len / sizeof(float)));
T
tensor-tang 已提交
209
      for (int i = 0; i < refs[tid].numel(); ++i) {
S
update  
superjomn 已提交
210
        EXPECT_NEAR(ref_data[i], data[i], 2e-3);
T
tensor-tang 已提交
211
      }
212 213 214 215 216 217 218
    });
  }
  for (int i = 0; i < num_jobs; ++i) {
    threads[i].join();
  }
}

T
tensor-tang 已提交
219
void MainThreadsImageClassification(bool use_gpu) {
220 221 222
  constexpr int num_jobs = 4;  // each job run 1 batch
  constexpr int batch_size = 1;
  NativeConfig config = GetConfig();
T
tensor-tang 已提交
223
  config.use_gpu = use_gpu;
224
  config.model_dir =
225
      FLAGS_book_dirname + "/image_classification_resnet.inference.model";
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244

  auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);
  std::vector<framework::LoDTensor> jobs(num_jobs);
  std::vector<std::vector<PaddleTensor>> paddle_tensor_feeds(num_jobs);
  std::vector<framework::LoDTensor> refs(num_jobs);
  for (size_t i = 0; i < jobs.size(); ++i) {
    // prepare inputs
    std::vector<std::vector<int64_t>> feed_target_shapes =
        GetFeedTargetShapes(config.model_dir, /*is_combined*/ false);
    feed_target_shapes[0][0] = batch_size;
    framework::DDim input_dims = framework::make_ddim(feed_target_shapes[0]);
    SetupTensor<float>(&jobs[i], input_dims, 0.f, 1.f);
    paddle_tensor_feeds[i].push_back(LodTensorToPaddleTensor(&jobs[i]));

    // get reference result of each job
    std::vector<framework::LoDTensor*> ref_feeds(1, &jobs[i]);
    std::vector<framework::LoDTensor*> ref_fetches(1, &refs[i]);
    TestInference<platform::CPUPlace>(config.model_dir, ref_feeds, ref_fetches);
  }
T
tensor-tang 已提交
245

246 247 248 249
  // create threads and each thread run 1 job
  std::vector<std::thread> threads;
  for (int tid = 0; tid < num_jobs; ++tid) {
    threads.emplace_back([&, tid]() {
Y
Yan Chunwei 已提交
250
      auto predictor = CreatePaddlePredictor(config);
251 252 253 254 255 256
      auto& local_inputs = paddle_tensor_feeds[tid];
      std::vector<PaddleTensor> local_outputs;
      ASSERT_TRUE(predictor->Run(local_inputs, &local_outputs));

      // check outputs correctness
      ASSERT_EQ(local_outputs.size(), 1UL);
257 258
      const size_t len = local_outputs[0].data.length();
      float* data = static_cast<float*>(local_outputs[0].data.data());
259
      float* ref_data = refs[tid].data<float>();
260
      EXPECT_EQ((size_t)refs[tid].numel(), len / sizeof(float));
261
      for (int i = 0; i < refs[tid].numel(); ++i) {
J
JiabinYang 已提交
262
        EXPECT_NEAR(ref_data[i], data[i], ACC_DIFF);
263
      }
T
tensor-tang 已提交
264 265 266 267 268 269 270
    });
  }
  for (int i = 0; i < num_jobs; ++i) {
    threads[i].join();
  }
}

T
tensor-tang 已提交
271 272 273 274 275
TEST(inference_api_native, word2vec_cpu) { MainWord2Vec(false /*use_gpu*/); }
TEST(inference_api_native, word2vec_cpu_threads) {
  MainThreadsWord2Vec(false /*use_gpu*/);
}
TEST(inference_api_native, image_classification_cpu) {
S
superjomn 已提交
276
  MainImageClassification(false /*use_gpu*/);
T
tensor-tang 已提交
277 278 279 280 281 282 283
}
TEST(inference_api_native, image_classification_cpu_threads) {
  MainThreadsImageClassification(false /*use_gpu*/);
}

#ifdef PADDLE_WITH_CUDA
TEST(inference_api_native, word2vec_gpu) { MainWord2Vec(true /*use_gpu*/); }
S
superjomn 已提交
284 285 286 287
// Turn off temporarily for the unstable result.
// TEST(inference_api_native, word2vec_gpu_threads) {
//   MainThreadsWord2Vec(true /*use_gpu*/);
// }
T
tensor-tang 已提交
288
TEST(inference_api_native, image_classification_gpu) {
S
superjomn 已提交
289
  MainImageClassification(true /*use_gpu*/);
T
tensor-tang 已提交
290
}
S
superjomn 已提交
291 292 293 294
// Turn off temporarily for the unstable result.
// TEST(inference_api_native, image_classification_gpu_threads) {
//   MainThreadsImageClassification(true /*use_gpu*/);
// }
T
tensor-tang 已提交
295 296
#endif

297
TEST(PassBuilder, Delete) {
298 299
  contrib::AnalysisConfig config;
  config.DisableGpu();
300 301 302 303 304 305
  config.pass_builder()->DeletePass("attention_lstm_fuse_pass");
  const auto& passes = config.pass_builder()->AllPasses();
  auto it = std::find(passes.begin(), passes.end(), "attention_lstm_fuse_pass");
  ASSERT_EQ(it, passes.end());
}

X
Xin Pan 已提交
306
}  // namespace paddle