test_inference_fit_a_line.cc 3.4 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "gflags/gflags.h"
13
#include "gtest/gtest.h"
14
#include "paddle/fluid/inference/tests/test_helper.h"
L
Liu Yiqun 已提交
15
#include "paddle/fluid/inference/tests/test_multi_thread_helper.h"
16 17 18 19 20 21 22 23 24 25 26 27 28 29

DEFINE_string(dirname, "", "Directory of the inference model.");

TEST(inference, fit_a_line) {
  if (FLAGS_dirname.empty()) {
    LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model";
  }

  LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl;
  std::string dirname = FLAGS_dirname;

  // 0. Call `paddle::framework::InitDevices()` initialize all the devices
  // In unittests, this is done in paddle/testing/paddle_gtest_main.cc

30 31 32 33 34 35 36 37 38 39 40 41 42
  for (int num_threads : {1, 2}) {
    std::vector<std::vector<paddle::framework::LoDTensor*>> cpu_feeds;
    cpu_feeds.resize(num_threads);
    for (int i = 0; i < num_threads; ++i) {
      auto* input = new paddle::framework::LoDTensor();
      // The second dim of the input tensor should be 13
      // The input data should be >= 0
      int64_t batch_size = 10;
      SetupTensor<float>(input, {batch_size, 13}, static_cast<float>(0),
                         static_cast<float>(10));
      cpu_feeds[i].push_back(input);
    }

43
    std::vector<std::vector<paddle::framework::FetchType*>> cpu_fetchs1;
44 45
    cpu_fetchs1.resize(num_threads);
    for (int i = 0; i < num_threads; ++i) {
46
      auto* output = new paddle::framework::FetchType();
47 48 49 50 51 52 53 54 55 56 57 58
      cpu_fetchs1[i].push_back(output);
    }

    // Run inference on CPU
    LOG(INFO) << "--- CPU Runs (num_threads: " << num_threads << "): ---";
    if (num_threads == 1) {
      TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds[0],
                                                cpu_fetchs1[0]);
    } else {
      TestMultiThreadInference<paddle::platform::CPUPlace>(
          dirname, cpu_feeds, cpu_fetchs1, num_threads);
    }
59 60

#ifdef PADDLE_WITH_CUDA
61
    std::vector<std::vector<paddle::framework::FetchType*>> cpu_fetchs2;
62 63
    cpu_fetchs2.resize(num_threads);
    for (int i = 0; i < num_threads; ++i) {
64
      auto* output = new paddle::framework::FetchType();
65 66 67 68 69 70 71 72 73 74 75 76 77 78
      cpu_fetchs2[i].push_back(output);
    }

    // Run inference on CUDA GPU
    LOG(INFO) << "--- GPU Runs (num_threads: " << num_threads << "): ---";
    if (num_threads == 1) {
      TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds[0],
                                                 cpu_fetchs2[0]);
    } else {
      TestMultiThreadInference<paddle::platform::CUDAPlace>(
          dirname, cpu_feeds, cpu_fetchs2, num_threads);
    }

    for (int i = 0; i < num_threads; ++i) {
79
      CheckError<float>(
80 81
          BOOST_GET(paddle::framework::LoDTensor, *cpu_fetchs1[i][0]),
          BOOST_GET(paddle::framework::LoDTensor, *cpu_fetchs2[i][0]));
82 83
      delete cpu_fetchs2[i][0];
    }
84
#endif
L
Liu Yiqun 已提交
85

86 87 88 89 90
    for (int i = 0; i < num_threads; ++i) {
      delete cpu_feeds[i][0];
      delete cpu_fetchs1[i][0];
    }
  }  // num_threads-loop
L
Liu Yiqun 已提交
91
}