simple_on_word2vec.cc 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

/*
 * This file contains a simple demo for how to take a model for inference.
 */

19
#include <gflags/gflags.h>
20
#include <glog/logging.h>
D
dzhwinter 已提交
21 22

#include <algorithm>
23
#include <memory>
24
#include <thread>  //NOLINT
25
#include "paddle/fluid/inference/paddle_inference_api.h"
26 27 28 29
#include "paddle/fluid/platform/enforce.h"

DEFINE_string(dirname, "", "Directory of the inference model.");
DEFINE_bool(use_gpu, false, "Whether use gpu.");
30

31 32 33 34 35 36
namespace paddle {
namespace demo {

void Main(bool use_gpu) {
  //# 1. Create PaddlePredictor with a config.
  NativeConfig config;
37 38 39 40 41
  if (FLAGS_dirname.empty()) {
    LOG(INFO) << "Usage: ./simple_on_word2vec --dirname=path/to/your/model";
    exit(1);
  }
  config.model_dir = FLAGS_dirname;
42 43 44 45 46 47 48 49 50 51
  config.use_gpu = use_gpu;
  config.fraction_of_gpu_memory = 0.15;
  config.device = 0;
  auto predictor =
      CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);

  for (int batch_id = 0; batch_id < 3; batch_id++) {
    //# 2. Prepare input.
    int64_t data[4] = {1, 2, 3, 4};

Y
Yan Chunwei 已提交
52 53 54 55
    PaddleTensor tensor;
    tensor.shape = std::vector<int>({4, 1});
    tensor.data = PaddleBuf(data, sizeof(data));
    tensor.dtype = PaddleDType::INT64;
56 57 58 59 60 61 62 63 64

    // For simplicity, we set all the slots with the same data.
    std::vector<PaddleTensor> slots(4, tensor);

    //# 3. Run
    std::vector<PaddleTensor> outputs;
    CHECK(predictor->Run(slots, &outputs));

    //# 4. Get output.
65
    PADDLE_ENFORCE(outputs.size(), 1UL);
L
Luo Tao 已提交
66 67
    // Check the output buffer size and result of each tid.
    PADDLE_ENFORCE(outputs.front().data.length(), 33168UL);
68 69
    float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815,
                       0.000932706};
70
    const size_t num_elements = outputs.front().data.length() / sizeof(float);
71
    // The outputs' buffers are in CPU memory.
D
dzhwinter 已提交
72 73
    for (size_t i = 0; i < std::min(static_cast<size_t>(5), num_elements);
         i++) {
L
Luo Tao 已提交
74 75
      PADDLE_ENFORCE(static_cast<float*>(outputs.front().data.data())[i],
                     result[i]);
76 77 78 79
    }
  }
}

T
tensor-tang 已提交
80
void MainThreads(int num_threads, bool use_gpu) {
T
tensor-tang 已提交
81 82 83
  // Multi-threads only support on CPU
  // 0. Create PaddlePredictor with a config.
  NativeConfig config;
84
  config.model_dir = FLAGS_dirname;
T
tensor-tang 已提交
85
  config.use_gpu = use_gpu;
T
tensor-tang 已提交
86 87
  config.fraction_of_gpu_memory = 0.15;
  config.device = 0;
T
tensor-tang 已提交
88 89 90 91 92 93 94 95 96 97 98 99
  auto main_predictor =
      CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);

  std::vector<std::thread> threads;
  for (int tid = 0; tid < num_threads; ++tid) {
    threads.emplace_back([&, tid]() {
      // 1. clone a predictor which shares the same parameters
      auto predictor = main_predictor->Clone();
      constexpr int num_batches = 3;
      for (int batch_id = 0; batch_id < num_batches; ++batch_id) {
        // 2. Dummy Input Data
        int64_t data[4] = {1, 2, 3, 4};
Y
Yan Chunwei 已提交
100 101 102 103 104
        PaddleTensor tensor;
        tensor.shape = std::vector<int>({4, 1});
        tensor.data = PaddleBuf(data, sizeof(data));
        tensor.dtype = PaddleDType::INT64;

T
tensor-tang 已提交
105 106 107 108 109 110
        std::vector<PaddleTensor> inputs(4, tensor);
        std::vector<PaddleTensor> outputs;
        // 3. Run
        CHECK(predictor->Run(inputs, &outputs));

        // 4. Get output.
111
        PADDLE_ENFORCE(outputs.size(), 1UL);
L
Luo Tao 已提交
112 113
        // Check the output buffer size and result of each tid.
        PADDLE_ENFORCE(outputs.front().data.length(), 33168UL);
114 115
        float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815,
                           0.000932706};
116 117
        const size_t num_elements =
            outputs.front().data.length() / sizeof(float);
T
tensor-tang 已提交
118
        // The outputs' buffers are in CPU memory.
D
dzhwinter 已提交
119 120
        for (size_t i = 0; i < std::min(static_cast<size_t>(5), num_elements);
             i++) {
L
Luo Tao 已提交
121 122
          PADDLE_ENFORCE(static_cast<float*>(outputs.front().data.data())[i],
                         result[i]);
T
tensor-tang 已提交
123 124 125 126 127 128 129 130 131
        }
      }
    });
  }
  for (int i = 0; i < num_threads; ++i) {
    threads[i].join();
  }
}

132 133
}  // namespace demo
}  // namespace paddle
134 135

int main(int argc, char** argv) {
D
dzhwinter 已提交
136
  FLAGS_dirname = "./word2vec.inference.model";
137 138 139 140 141 142 143 144 145 146 147
  google::ParseCommandLineFlags(&argc, &argv, true);
  paddle::demo::Main(false /* use_gpu*/);
  paddle::demo::MainThreads(1, false /* use_gpu*/);
  paddle::demo::MainThreads(4, false /* use_gpu*/);
  if (FLAGS_use_gpu) {
    paddle::demo::Main(true /*use_gpu*/);
    paddle::demo::MainThreads(1, true /*use_gpu*/);
    paddle::demo::MainThreads(4, true /*use_gpu*/);
  }
  return 0;
}