simple_on_word2vec.cc 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

/*
 * This file contains a simple demo for how to take a model for inference.
 */

19
#include <gflags/gflags.h>
20 21
#include <glog/logging.h>
#include <memory>
22
#include <thread>  //NOLINT
23
#include "paddle/fluid/inference/paddle_inference_api.h"
24 25 26 27
#include "paddle/fluid/platform/enforce.h"

DEFINE_string(dirname, "", "Directory of the inference model.");
DEFINE_bool(use_gpu, false, "Whether use gpu.");
28

29 30 31 32 33 34
namespace paddle {
namespace demo {

void Main(bool use_gpu) {
  //# 1. Create PaddlePredictor with a config.
  NativeConfig config;
35 36 37 38 39
  if (FLAGS_dirname.empty()) {
    LOG(INFO) << "Usage: ./simple_on_word2vec --dirname=path/to/your/model";
    exit(1);
  }
  config.model_dir = FLAGS_dirname;
40 41 42 43 44 45 46 47 48 49
  config.use_gpu = use_gpu;
  config.fraction_of_gpu_memory = 0.15;
  config.device = 0;
  auto predictor =
      CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);

  for (int batch_id = 0; batch_id < 3; batch_id++) {
    //# 2. Prepare input.
    int64_t data[4] = {1, 2, 3, 4};

Y
Yan Chunwei 已提交
50 51 52 53
    PaddleTensor tensor;
    tensor.shape = std::vector<int>({4, 1});
    tensor.data = PaddleBuf(data, sizeof(data));
    tensor.dtype = PaddleDType::INT64;
54 55 56 57 58 59 60 61 62

    // For simplicity, we set all the slots with the same data.
    std::vector<PaddleTensor> slots(4, tensor);

    //# 3. Run
    std::vector<PaddleTensor> outputs;
    CHECK(predictor->Run(slots, &outputs));

    //# 4. Get output.
63
    PADDLE_ENFORCE(outputs.size(), 1UL);
L
Luo Tao 已提交
64 65
    // Check the output buffer size and result of each tid.
    PADDLE_ENFORCE(outputs.front().data.length(), 33168UL);
66 67
    float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815,
                       0.000932706};
68
    const size_t num_elements = outputs.front().data.length() / sizeof(float);
69 70
    // The outputs' buffers are in CPU memory.
    for (size_t i = 0; i < std::min(5UL, num_elements); i++) {
L
Luo Tao 已提交
71 72
      PADDLE_ENFORCE(static_cast<float*>(outputs.front().data.data())[i],
                     result[i]);
73 74 75 76
    }
  }
}

T
tensor-tang 已提交
77
void MainThreads(int num_threads, bool use_gpu) {
T
tensor-tang 已提交
78 79 80
  // Multi-threads only support on CPU
  // 0. Create PaddlePredictor with a config.
  NativeConfig config;
81
  config.model_dir = FLAGS_dirname;
T
tensor-tang 已提交
82
  config.use_gpu = use_gpu;
T
tensor-tang 已提交
83 84
  config.fraction_of_gpu_memory = 0.15;
  config.device = 0;
T
tensor-tang 已提交
85 86 87 88 89 90 91 92 93 94 95 96
  auto main_predictor =
      CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);

  std::vector<std::thread> threads;
  for (int tid = 0; tid < num_threads; ++tid) {
    threads.emplace_back([&, tid]() {
      // 1. clone a predictor which shares the same parameters
      auto predictor = main_predictor->Clone();
      constexpr int num_batches = 3;
      for (int batch_id = 0; batch_id < num_batches; ++batch_id) {
        // 2. Dummy Input Data
        int64_t data[4] = {1, 2, 3, 4};
Y
Yan Chunwei 已提交
97 98 99 100 101
        PaddleTensor tensor;
        tensor.shape = std::vector<int>({4, 1});
        tensor.data = PaddleBuf(data, sizeof(data));
        tensor.dtype = PaddleDType::INT64;

T
tensor-tang 已提交
102 103 104 105 106 107
        std::vector<PaddleTensor> inputs(4, tensor);
        std::vector<PaddleTensor> outputs;
        // 3. Run
        CHECK(predictor->Run(inputs, &outputs));

        // 4. Get output.
108
        PADDLE_ENFORCE(outputs.size(), 1UL);
L
Luo Tao 已提交
109 110
        // Check the output buffer size and result of each tid.
        PADDLE_ENFORCE(outputs.front().data.length(), 33168UL);
111 112
        float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815,
                           0.000932706};
113 114
        const size_t num_elements =
            outputs.front().data.length() / sizeof(float);
T
tensor-tang 已提交
115 116
        // The outputs' buffers are in CPU memory.
        for (size_t i = 0; i < std::min(5UL, num_elements); i++) {
L
Luo Tao 已提交
117 118
          PADDLE_ENFORCE(static_cast<float*>(outputs.front().data.data())[i],
                         result[i]);
T
tensor-tang 已提交
119 120 121 122 123 124 125 126 127
        }
      }
    });
  }
  for (int i = 0; i < num_threads; ++i) {
    threads[i].join();
  }
}

128 129
}  // namespace demo
}  // namespace paddle
130 131 132 133 134 135 136 137 138 139 140 141 142

int main(int argc, char** argv) {
  google::ParseCommandLineFlags(&argc, &argv, true);
  paddle::demo::Main(false /* use_gpu*/);
  paddle::demo::MainThreads(1, false /* use_gpu*/);
  paddle::demo::MainThreads(4, false /* use_gpu*/);
  if (FLAGS_use_gpu) {
    paddle::demo::Main(true /*use_gpu*/);
    paddle::demo::MainThreads(1, true /*use_gpu*/);
    paddle::demo::MainThreads(4, true /*use_gpu*/);
  }
  return 0;
}